From d8cb4b96b0cd2e7c45305786cd7e50c913b80f79 Mon Sep 17 00:00:00 2001 From: Attila Uygun Date: Tue, 14 Jul 2020 12:47:06 +0200 Subject: [PATCH] Add glslang, SPIRV-Reflect, VMA, Vulkan SDK. --- build/linux/Makefile | 67 +- src/third_party/BUILD.gn | 74 +- .../glslang/OGLCompilersDLL/InitializeDll.cpp | 165 + .../glslang/OGLCompilersDLL/InitializeDll.h | 49 + .../SPIRV/CInterface/spirv_c_interface.cpp | 110 + src/third_party/glslang/SPIRV/GLSL.ext.AMD.h | 108 + src/third_party/glslang/SPIRV/GLSL.ext.EXT.h | 40 + src/third_party/glslang/SPIRV/GLSL.ext.KHR.h | 51 + src/third_party/glslang/SPIRV/GLSL.ext.NV.h | 81 + src/third_party/glslang/SPIRV/GLSL.std.450.h | 131 + .../glslang/SPIRV/GlslangToSpv.cpp | 8784 ++ src/third_party/glslang/SPIRV/GlslangToSpv.h | 61 + .../glslang/SPIRV/InReadableOrder.cpp | 131 + src/third_party/glslang/SPIRV/Logger.cpp | 72 + src/third_party/glslang/SPIRV/Logger.h | 83 + .../glslang/SPIRV/NonSemanticDebugPrintf.h | 50 + src/third_party/glslang/SPIRV/SPVRemapper.cpp | 1490 + src/third_party/glslang/SPIRV/SPVRemapper.h | 304 + src/third_party/glslang/SPIRV/SpvBuilder.cpp | 3228 + src/third_party/glslang/SPIRV/SpvBuilder.h | 854 + .../glslang/SPIRV/SpvPostProcess.cpp | 450 + src/third_party/glslang/SPIRV/SpvTools.cpp | 241 + src/third_party/glslang/SPIRV/SpvTools.h | 88 + src/third_party/glslang/SPIRV/bitutils.h | 81 + src/third_party/glslang/SPIRV/disassemble.cpp | 747 + src/third_party/glslang/SPIRV/disassemble.h | 53 + src/third_party/glslang/SPIRV/doc.cpp | 2923 + src/third_party/glslang/SPIRV/doc.h | 259 + src/third_party/glslang/SPIRV/hex_float.h | 1078 + src/third_party/glslang/SPIRV/spirv.hpp | 2171 + src/third_party/glslang/SPIRV/spvIR.h | 507 + .../glslang/StandAlone/DirStackFileIncluder.h | 141 + .../glslang/StandAlone/ResourceLimits.cpp | 496 + .../glslang/StandAlone/ResourceLimits.h | 57 + .../CInterface/glslang_c_interface.cpp | 428 + .../glslang/GenericCodeGen/CodeGen.cpp | 76 + .../glslang/glslang/GenericCodeGen/Link.cpp | 91 + .../glslang/glslang/Include/BaseTypes.h | 571 + .../glslang/glslang/Include/Common.h | 291 + .../glslang/glslang/Include/ConstantUnion.h | 974 + .../glslang/glslang/Include/InfoSink.h | 144 + .../glslang/Include/InitializeGlobals.h | 44 + .../glslang/glslang/Include/PoolAlloc.h | 316 + .../glslang/glslang/Include/ResourceLimits.h | 150 + .../glslang/glslang/Include/ShHandle.h | 176 + .../glslang/glslang/Include/Types.h | 2487 + .../glslang/glslang/Include/arrays.h | 341 + .../glslang/Include/glslang_c_interface.h | 249 + .../glslang/Include/glslang_c_shader_types.h | 185 + .../glslang/glslang/Include/intermediate.h | 1806 + .../glslang/MachineIndependent/Constant.cpp | 1428 + .../glslang/MachineIndependent/InfoSink.cpp | 113 + .../glslang/MachineIndependent/Initialize.cpp | 9290 ++ .../glslang/MachineIndependent/Initialize.h | 112 + .../MachineIndependent/IntermTraverse.cpp | 302 + .../MachineIndependent/Intermediate.cpp | 3986 + .../MachineIndependent/LiveTraverser.h | 168 + .../MachineIndependent/ParseContextBase.cpp | 641 + .../MachineIndependent/ParseHelper.cpp | 8626 ++ .../glslang/MachineIndependent/ParseHelper.h | 534 + .../glslang/MachineIndependent/PoolAlloc.cpp | 315 + .../glslang/MachineIndependent/RemoveTree.cpp | 118 + .../glslang/MachineIndependent/RemoveTree.h | 41 + .../glslang/MachineIndependent/Scan.cpp | 1827 + .../glslang/glslang/MachineIndependent/Scan.h | 276 + .../glslang/MachineIndependent/ScanContext.h | 93 + .../glslang/MachineIndependent/ShaderLang.cpp | 2146 + .../MachineIndependent/SymbolTable.cpp | 446 + .../glslang/MachineIndependent/SymbolTable.h | 885 + .../glslang/MachineIndependent/Versions.cpp | 1285 + .../glslang/MachineIndependent/Versions.h | 334 + .../glslang/MachineIndependent/attribute.cpp | 346 + .../glslang/MachineIndependent/attribute.h | 149 + .../glslang/MachineIndependent/gl_types.h | 218 + .../glslang/MachineIndependent/glslang.y | 3906 + .../MachineIndependent/glslang_tab.cpp | 10615 ++ .../MachineIndependent/glslang_tab.cpp.h | 521 + .../glslang/MachineIndependent/intermOut.cpp | 1565 + .../glslang/MachineIndependent/iomapper.cpp | 1291 + .../glslang/MachineIndependent/iomapper.h | 302 + .../glslang/MachineIndependent/limits.cpp | 200 + .../MachineIndependent/linkValidate.cpp | 1781 + .../MachineIndependent/localintermediate.h | 1052 + .../glslang/MachineIndependent/parseConst.cpp | 214 + .../MachineIndependent/parseVersions.h | 245 + .../glslang/glslang/MachineIndependent/pch.h | 49 + .../MachineIndependent/preprocessor/Pp.cpp | 1345 + .../preprocessor/PpAtom.cpp | 181 + .../preprocessor/PpContext.cpp | 120 + .../preprocessor/PpContext.h | 703 + .../preprocessor/PpScanner.cpp | 1315 + .../preprocessor/PpTokens.cpp | 221 + .../preprocessor/PpTokens.h | 179 + .../propagateNoContraction.cpp | 870 + .../propagateNoContraction.h | 55 + .../glslang/MachineIndependent/reflection.cpp | 1269 + .../glslang/MachineIndependent/reflection.h | 223 + .../glslang/OSDependent/Unix/ossource.cpp | 207 + .../glslang/OSDependent/Web/glslang.after.js | 26 + .../glslang/OSDependent/Web/glslang.js.cpp | 287 + .../glslang/OSDependent/Web/glslang.pre.js | 56 + .../glslang/OSDependent/Windows/main.cpp | 74 + .../glslang/OSDependent/Windows/ossource.cpp | 147 + .../glslang/glslang/OSDependent/osinclude.h | 63 + .../glslang/glslang/Public/ShaderLang.h | 948 + src/third_party/glslang/glslang/build_info.h | 62 + .../include/spirv/unified1/spirv.h | 1077 + src/third_party/spirv-reflect/spirv_reflect.c | 4442 + src/third_party/spirv-reflect/spirv_reflect.h | 2045 + src/third_party/vma/vk_mem_alloc.cpp | 3 + src/third_party/vma/vk_mem_alloc.h | 19229 ++++ src/third_party/vulkan/loader/adapters.h | 80 + src/third_party/vulkan/loader/asm_offset.c | 94 + src/third_party/vulkan/loader/cJSON.c | 1215 + src/third_party/vulkan/loader/cJSON.h | 174 + src/third_party/vulkan/loader/debug_utils.c | 996 + src/third_party/vulkan/loader/debug_utils.h | 101 + .../vulkan/loader/dev_ext_trampoline.c | 538 + .../vulkan/loader/dirent_on_windows.c | 128 + .../vulkan/loader/dirent_on_windows.h | 51 + src/third_party/vulkan/loader/dxgi_loader.c | 23 + src/third_party/vulkan/loader/dxgi_loader.h | 8 + .../vulkan/loader/extension_manual.c | 315 + .../vulkan/loader/extension_manual.h | 100 + src/third_party/vulkan/loader/gpa_helper.h | 249 + src/third_party/vulkan/loader/loader.c | 8751 ++ src/third_party/vulkan/loader/loader.h | 547 + src/third_party/vulkan/loader/murmurhash.c | 98 + src/third_party/vulkan/loader/murmurhash.h | 52 + src/third_party/vulkan/loader/phys_dev_ext.c | 1056 + src/third_party/vulkan/loader/trampoline.c | 2599 + .../vulkan/loader/unknown_ext_chain.c | 819 + .../vulkan/loader/vk_dispatch_table_helper.h | 951 + .../vulkan/loader/vk_layer_dispatch_table.h | 757 + .../vulkan/loader/vk_loader_extensions.c | 5483 + .../vulkan/loader/vk_loader_extensions.h | 462 + .../vulkan/loader/vk_loader_layer.h | 46 + .../vulkan/loader/vk_loader_platform.h | 432 + .../vulkan/loader/vk_object_types.h | 387 + src/third_party/vulkan/loader/wsi.c | 2526 + src/third_party/vulkan/loader/wsi.h | 219 + .../vulkan/vk_enum_string_helper.h | 7062 ++ src/third_party/vulkan/vk_icd.h | 236 + src/third_party/vulkan/vk_layer.h | 210 + src/third_party/vulkan/vk_platform.h | 82 + src/third_party/vulkan/vk_sdk_platform.h | 69 + src/third_party/vulkan/vulkan.h | 87 + src/third_party/vulkan/vulkan.hpp | 94292 ++++++++++++++++ src/third_party/vulkan/vulkan_android.h | 112 + src/third_party/vulkan/vulkan_beta.h | 56 + src/third_party/vulkan/vulkan_core.h | 11847 ++ src/third_party/vulkan/vulkan_directfb.h | 54 + src/third_party/vulkan/vulkan_fuchsia.h | 47 + src/third_party/vulkan/vulkan_ggp.h | 58 + src/third_party/vulkan/vulkan_ios.h | 47 + src/third_party/vulkan/vulkan_macos.h | 47 + src/third_party/vulkan/vulkan_metal.h | 54 + src/third_party/vulkan/vulkan_vi.h | 47 + src/third_party/vulkan/vulkan_wayland.h | 54 + src/third_party/vulkan/vulkan_win32.h | 315 + src/third_party/vulkan/vulkan_xcb.h | 55 + src/third_party/vulkan/vulkan_xlib.h | 55 + src/third_party/vulkan/vulkan_xlib_xrandr.h | 45 + 163 files changed, 268300 insertions(+), 9 deletions(-) create mode 100644 src/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp create mode 100644 src/third_party/glslang/OGLCompilersDLL/InitializeDll.h create mode 100644 src/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp create mode 100644 src/third_party/glslang/SPIRV/GLSL.ext.AMD.h create mode 100644 src/third_party/glslang/SPIRV/GLSL.ext.EXT.h create mode 100644 src/third_party/glslang/SPIRV/GLSL.ext.KHR.h create mode 100644 src/third_party/glslang/SPIRV/GLSL.ext.NV.h create mode 100644 src/third_party/glslang/SPIRV/GLSL.std.450.h create mode 100644 src/third_party/glslang/SPIRV/GlslangToSpv.cpp create mode 100644 src/third_party/glslang/SPIRV/GlslangToSpv.h create mode 100644 src/third_party/glslang/SPIRV/InReadableOrder.cpp create mode 100644 src/third_party/glslang/SPIRV/Logger.cpp create mode 100644 src/third_party/glslang/SPIRV/Logger.h create mode 100644 src/third_party/glslang/SPIRV/NonSemanticDebugPrintf.h create mode 100644 src/third_party/glslang/SPIRV/SPVRemapper.cpp create mode 100644 src/third_party/glslang/SPIRV/SPVRemapper.h create mode 100644 src/third_party/glslang/SPIRV/SpvBuilder.cpp create mode 100644 src/third_party/glslang/SPIRV/SpvBuilder.h create mode 100644 src/third_party/glslang/SPIRV/SpvPostProcess.cpp create mode 100644 src/third_party/glslang/SPIRV/SpvTools.cpp create mode 100644 src/third_party/glslang/SPIRV/SpvTools.h create mode 100644 src/third_party/glslang/SPIRV/bitutils.h create mode 100644 src/third_party/glslang/SPIRV/disassemble.cpp create mode 100644 src/third_party/glslang/SPIRV/disassemble.h create mode 100644 src/third_party/glslang/SPIRV/doc.cpp create mode 100644 src/third_party/glslang/SPIRV/doc.h create mode 100644 src/third_party/glslang/SPIRV/hex_float.h create mode 100644 src/third_party/glslang/SPIRV/spirv.hpp create mode 100644 src/third_party/glslang/SPIRV/spvIR.h create mode 100644 src/third_party/glslang/StandAlone/DirStackFileIncluder.h create mode 100644 src/third_party/glslang/StandAlone/ResourceLimits.cpp create mode 100644 src/third_party/glslang/StandAlone/ResourceLimits.h create mode 100644 src/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp create mode 100644 src/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp create mode 100644 src/third_party/glslang/glslang/GenericCodeGen/Link.cpp create mode 100644 src/third_party/glslang/glslang/Include/BaseTypes.h create mode 100644 src/third_party/glslang/glslang/Include/Common.h create mode 100644 src/third_party/glslang/glslang/Include/ConstantUnion.h create mode 100644 src/third_party/glslang/glslang/Include/InfoSink.h create mode 100644 src/third_party/glslang/glslang/Include/InitializeGlobals.h create mode 100644 src/third_party/glslang/glslang/Include/PoolAlloc.h create mode 100644 src/third_party/glslang/glslang/Include/ResourceLimits.h create mode 100644 src/third_party/glslang/glslang/Include/ShHandle.h create mode 100644 src/third_party/glslang/glslang/Include/Types.h create mode 100644 src/third_party/glslang/glslang/Include/arrays.h create mode 100644 src/third_party/glslang/glslang/Include/glslang_c_interface.h create mode 100644 src/third_party/glslang/glslang/Include/glslang_c_shader_types.h create mode 100644 src/third_party/glslang/glslang/Include/intermediate.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Constant.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Initialize.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Initialize.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/LiveTraverser.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/ParseHelper.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/RemoveTree.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Scan.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Scan.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/ScanContext.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/SymbolTable.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Versions.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/Versions.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/attribute.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/attribute.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/gl_types.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/glslang.y create mode 100644 src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/intermOut.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/iomapper.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/iomapper.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/limits.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/localintermediate.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/parseConst.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/parseVersions.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/pch.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.h create mode 100644 src/third_party/glslang/glslang/MachineIndependent/reflection.cpp create mode 100644 src/third_party/glslang/glslang/MachineIndependent/reflection.h create mode 100644 src/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp create mode 100644 src/third_party/glslang/glslang/OSDependent/Web/glslang.after.js create mode 100644 src/third_party/glslang/glslang/OSDependent/Web/glslang.js.cpp create mode 100644 src/third_party/glslang/glslang/OSDependent/Web/glslang.pre.js create mode 100644 src/third_party/glslang/glslang/OSDependent/Windows/main.cpp create mode 100644 src/third_party/glslang/glslang/OSDependent/Windows/ossource.cpp create mode 100644 src/third_party/glslang/glslang/OSDependent/osinclude.h create mode 100644 src/third_party/glslang/glslang/Public/ShaderLang.h create mode 100644 src/third_party/glslang/glslang/build_info.h create mode 100644 src/third_party/spirv-reflect/include/spirv/unified1/spirv.h create mode 100644 src/third_party/spirv-reflect/spirv_reflect.c create mode 100644 src/third_party/spirv-reflect/spirv_reflect.h create mode 100644 src/third_party/vma/vk_mem_alloc.cpp create mode 100644 src/third_party/vma/vk_mem_alloc.h create mode 100644 src/third_party/vulkan/loader/adapters.h create mode 100644 src/third_party/vulkan/loader/asm_offset.c create mode 100644 src/third_party/vulkan/loader/cJSON.c create mode 100644 src/third_party/vulkan/loader/cJSON.h create mode 100644 src/third_party/vulkan/loader/debug_utils.c create mode 100644 src/third_party/vulkan/loader/debug_utils.h create mode 100644 src/third_party/vulkan/loader/dev_ext_trampoline.c create mode 100644 src/third_party/vulkan/loader/dirent_on_windows.c create mode 100644 src/third_party/vulkan/loader/dirent_on_windows.h create mode 100644 src/third_party/vulkan/loader/dxgi_loader.c create mode 100644 src/third_party/vulkan/loader/dxgi_loader.h create mode 100644 src/third_party/vulkan/loader/extension_manual.c create mode 100644 src/third_party/vulkan/loader/extension_manual.h create mode 100644 src/third_party/vulkan/loader/gpa_helper.h create mode 100644 src/third_party/vulkan/loader/loader.c create mode 100644 src/third_party/vulkan/loader/loader.h create mode 100644 src/third_party/vulkan/loader/murmurhash.c create mode 100644 src/third_party/vulkan/loader/murmurhash.h create mode 100644 src/third_party/vulkan/loader/phys_dev_ext.c create mode 100644 src/third_party/vulkan/loader/trampoline.c create mode 100644 src/third_party/vulkan/loader/unknown_ext_chain.c create mode 100644 src/third_party/vulkan/loader/vk_dispatch_table_helper.h create mode 100644 src/third_party/vulkan/loader/vk_layer_dispatch_table.h create mode 100644 src/third_party/vulkan/loader/vk_loader_extensions.c create mode 100644 src/third_party/vulkan/loader/vk_loader_extensions.h create mode 100644 src/third_party/vulkan/loader/vk_loader_layer.h create mode 100644 src/third_party/vulkan/loader/vk_loader_platform.h create mode 100644 src/third_party/vulkan/loader/vk_object_types.h create mode 100644 src/third_party/vulkan/loader/wsi.c create mode 100644 src/third_party/vulkan/loader/wsi.h create mode 100644 src/third_party/vulkan/vk_enum_string_helper.h create mode 100644 src/third_party/vulkan/vk_icd.h create mode 100644 src/third_party/vulkan/vk_layer.h create mode 100644 src/third_party/vulkan/vk_platform.h create mode 100644 src/third_party/vulkan/vk_sdk_platform.h create mode 100644 src/third_party/vulkan/vulkan.h create mode 100644 src/third_party/vulkan/vulkan.hpp create mode 100644 src/third_party/vulkan/vulkan_android.h create mode 100644 src/third_party/vulkan/vulkan_beta.h create mode 100644 src/third_party/vulkan/vulkan_core.h create mode 100644 src/third_party/vulkan/vulkan_directfb.h create mode 100644 src/third_party/vulkan/vulkan_fuchsia.h create mode 100644 src/third_party/vulkan/vulkan_ggp.h create mode 100644 src/third_party/vulkan/vulkan_ios.h create mode 100644 src/third_party/vulkan/vulkan_macos.h create mode 100644 src/third_party/vulkan/vulkan_metal.h create mode 100644 src/third_party/vulkan/vulkan_vi.h create mode 100644 src/third_party/vulkan/vulkan_wayland.h create mode 100644 src/third_party/vulkan/vulkan_win32.h create mode 100644 src/third_party/vulkan/vulkan_xcb.h create mode 100644 src/third_party/vulkan/vulkan_xlib.h create mode 100644 src/third_party/vulkan/vulkan_xlib_xrandr.h diff --git a/build/linux/Makefile b/build/linux/Makefile index 681a1af..1e14d30 100644 --- a/build/linux/Makefile +++ b/build/linux/Makefile @@ -25,7 +25,7 @@ INTERMEDIATE_DIR := $(OUTPUT_DIR)/obj BUILD_DIR := $(INTERMEDIATE_DIR)/$(BUILD) ARFLAGS = r -LDFLAGS = -lX11 -lGL -pthread -lasound +LDFLAGS = -lX11 -lGL -pthread -lasound -ldl # Always enable debug information. CFLAGS += -g @@ -56,6 +56,14 @@ CXXFLAGS = $(CFLAGS) # Enable C++17 CXXFLAGS += -std=c++17 +# Vulkan config +CFLAGS += -DVK_USE_PLATFORM_XLIB_KHR +CFLAGS += -DVULKAN_NON_CMAKE_BUILD +CFLAGS += -DSYSCONFDIR='"/etc"' +CFLAGS += -DFALLBACK_DATA_DIRS='"/usr/local/share:/usr/share"' +CFLAGS += -DFALLBACK_CONFIG_DIRS='"/etc/xdg"' +CFLAGS += -DHAVE_SECURE_GETENV + # --- Internal functions --- app_exe = $(OUTPUT_DIR)/$(1)_$(ARCH)_$(BUILD) objs_from_src = $(patsubst $(SRC_ROOT)/%, $(BUILD_DIR)/%.o, $(basename $(1))) @@ -109,11 +117,66 @@ GLTEST_SRC := \ $(SRC_ROOT)/engine/sound_player.cc \ $(SRC_ROOT)/engine/sound.cc \ $(SRC_ROOT)/third_party/glew/glew.c \ + $(SRC_ROOT)/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/GenericCodeGen/Link.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/attribute.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/Constant.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/Initialize.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/intermOut.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/iomapper.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/limits.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/parseConst.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/reflection.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/Scan.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/MachineIndependent/Versions.cpp \ + $(SRC_ROOT)/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp \ + $(SRC_ROOT)/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/disassemble.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/doc.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/GlslangToSpv.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/InReadableOrder.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/Logger.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/SpvBuilder.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/SpvPostProcess.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/SPVRemapper.cpp \ + $(SRC_ROOT)/third_party/glslang/SPIRV/SpvTools.cpp \ + $(SRC_ROOT)/third_party/glslang/StandAlone/ResourceLimits.cpp \ $(SRC_ROOT)/third_party/jsoncpp/jsoncpp.cc \ + $(SRC_ROOT)/third_party/spirv-reflect/spirv_reflect.c \ $(SRC_ROOT)/third_party/texture_compressor/dxt_encoder_internals.cc \ $(SRC_ROOT)/third_party/texture_compressor/dxt_encoder.cc \ $(SRC_ROOT)/third_party/texture_compressor/texture_compressor_etc1.cc \ - $(SRC_ROOT)/third_party/texture_compressor/texture_compressor.cc + $(SRC_ROOT)/third_party/texture_compressor/texture_compressor.cc \ + $(SRC_ROOT)/third_party/vma/vk_mem_alloc.cpp \ + $(SRC_ROOT)/third_party/vulkan/loader/cJSON.c \ + $(SRC_ROOT)/third_party/vulkan/loader/debug_utils.c \ + $(SRC_ROOT)/third_party/vulkan/loader/dev_ext_trampoline.c \ + $(SRC_ROOT)/third_party/vulkan/loader/extension_manual.c \ + $(SRC_ROOT)/third_party/vulkan/loader/loader.c \ + $(SRC_ROOT)/third_party/vulkan/loader/murmurhash.c \ + $(SRC_ROOT)/third_party/vulkan/loader/phys_dev_ext.c \ + $(SRC_ROOT)/third_party/vulkan/loader/trampoline.c \ + $(SRC_ROOT)/third_party/vulkan/loader/unknown_ext_chain.c \ + $(SRC_ROOT)/third_party/vulkan/loader/wsi.c GLTEST_EXE := $(call app_exe,gltest) GLTEST_OBJS := $(call objs_from_src, $(GLTEST_SRC)) diff --git a/src/third_party/BUILD.gn b/src/third_party/BUILD.gn index f469035..c3982ff 100644 --- a/src/third_party/BUILD.gn +++ b/src/third_party/BUILD.gn @@ -2,6 +2,8 @@ source_set("third_party") { sources = [ "jsoncpp/json.h", "jsoncpp/jsoncpp.cc", + "minimp3/minimp3_ex.h", + "minimp3/minimp3.h", "stb/stb_image.h", "stb/stb_truetype.h", "texture_compressor/dxt_encoder_implementation_autogen.h", @@ -13,6 +15,60 @@ source_set("third_party") { "texture_compressor/texture_compressor_etc1.h", "texture_compressor/texture_compressor.cc", "texture_compressor/texture_compressor.h", + "glslang/glslang/CInterface/glslang_c_interface.cpp", + "glslang/glslang/GenericCodeGen/CodeGen.cpp", + "glslang/glslang/GenericCodeGen/Link.cpp", + "glslang/glslang/MachineIndependent/attribute.cpp", + "glslang/glslang/MachineIndependent/Constant.cpp", + "glslang/glslang/MachineIndependent/glslang_tab.cpp", + "glslang/glslang/MachineIndependent/InfoSink.cpp", + "glslang/glslang/MachineIndependent/Initialize.cpp", + "glslang/glslang/MachineIndependent/Intermediate.cpp", + "glslang/glslang/MachineIndependent/intermOut.cpp", + "glslang/glslang/MachineIndependent/IntermTraverse.cpp", + "glslang/glslang/MachineIndependent/iomapper.cpp", + "glslang/glslang/MachineIndependent/limits.cpp", + "glslang/glslang/MachineIndependent/linkValidate.cpp", + "glslang/glslang/MachineIndependent/parseConst.cpp", + "glslang/glslang/MachineIndependent/ParseContextBase.cpp", + "glslang/glslang/MachineIndependent/ParseHelper.cpp", + "glslang/glslang/MachineIndependent/PoolAlloc.cpp", + "glslang/glslang/MachineIndependent/preprocessor/Pp.cpp", + "glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp", + "glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp", + "glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp", + "glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp", + "glslang/glslang/MachineIndependent/propagateNoContraction.cpp", + "glslang/glslang/MachineIndependent/reflection.cpp", + "glslang/glslang/MachineIndependent/RemoveTree.cpp", + "glslang/glslang/MachineIndependent/Scan.cpp", + "glslang/glslang/MachineIndependent/ShaderLang.cpp", + "glslang/glslang/MachineIndependent/SymbolTable.cpp", + "glslang/glslang/MachineIndependent/Versions.cpp", + "glslang/glslang/OSDependent/Unix/ossource.cpp", + "glslang/OGLCompilersDLL/InitializeDll.cpp", + "glslang/SPIRV/CInterface/spirv_c_interface.cpp", + "glslang/SPIRV/disassemble.cpp", + "glslang/SPIRV/doc.cpp", + "glslang/SPIRV/GlslangToSpv.cpp", + "glslang/SPIRV/InReadableOrder.cpp", + "glslang/SPIRV/Logger.cpp", + "glslang/SPIRV/SpvBuilder.cpp", + "glslang/SPIRV/SpvPostProcess.cpp", + "glslang/SPIRV/SPVRemapper.cpp", + "glslang/SPIRV/SpvTools.cpp", + "glslang/StandAlone/ResourceLimits.cpp", + "spirv-reflect/spirv_reflect.c", + "vma/vk_mem_alloc.cpp", + ] + + defines = [ + "VK_USE_PLATFORM_XLIB_KHR", + "VULKAN_NON_CMAKE_BUILD", + "SYSCONFDIR=\"/etc\"", + "FALLBACK_DATA_DIRS=\"/usr/local/share:/usr/share\"", + "FALLBACK_CONFIG_DIRS=\"/etc/xdg\"", + "HAVE_SECURE_GETENV", ] # ldflags = [] @@ -23,11 +79,21 @@ source_set("third_party") { "glew/glew.c", "glew/glew.h", "glew/glxew.h", + "vulkan/loader/cJSON.c", + "vulkan/loader/debug_utils.c", + "vulkan/loader/dev_ext_trampoline.c", + "vulkan/loader/extension_manual.c", + "vulkan/loader/loader.c", + "vulkan/loader/murmurhash.c", + "vulkan/loader/phys_dev_ext.c", + "vulkan/loader/trampoline.c", + "vulkan/loader/unknown_ext_chain.c", + "vulkan/loader/wsi.c", ] # ldflags += [ "-L/usr/X11R6/lib" ] -# libs += [ "X11", "rt", "pthread" ] + libs = [ "dl" ] } if (target_os == "android") { @@ -36,8 +102,6 @@ source_set("third_party") { "android/gl3stub.h", "android/GLContext.cpp", "android/GLContext.h", - "minimp3/minimp3_ex.h", - "minimp3/minimp3.h", "minizip/ioapi.c", "minizip/ioapi.h", "minizip/unzip.c", @@ -155,10 +219,6 @@ source_set("third_party") { "texture_compressor/texture_compressor_etc1_neon.cc", "texture_compressor/texture_compressor_etc1_neon.h", ] - - ldflags += [ "-L/usr/X11R6/lib" ] - - libs += [ "X11", "rt", "pthread" ] } deps = [] diff --git a/src/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp b/src/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp new file mode 100644 index 0000000..abea910 --- /dev/null +++ b/src/third_party/glslang/OGLCompilersDLL/InitializeDll.cpp @@ -0,0 +1,165 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#define SH_EXPORTING + +#include + +#include "InitializeDll.h" +#include "../glslang/Include/InitializeGlobals.h" +#include "../glslang/Public/ShaderLang.h" +#include "../glslang/Include/PoolAlloc.h" + +namespace glslang { + +OS_TLSIndex ThreadInitializeIndex = OS_INVALID_TLS_INDEX; + +// Per-process initialization. +// Needs to be called at least once before parsing, etc. is done. +// Will also do thread initialization for the calling thread; other +// threads will need to do that explicitly. +bool InitProcess() +{ + glslang::GetGlobalLock(); + + if (ThreadInitializeIndex != OS_INVALID_TLS_INDEX) { + // + // Function is re-entrant. + // + + glslang::ReleaseGlobalLock(); + return true; + } + + ThreadInitializeIndex = OS_AllocTLSIndex(); + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "InitProcess(): Failed to allocate TLS area for init flag"); + + glslang::ReleaseGlobalLock(); + return false; + } + + if (! InitializePoolIndex()) { + assert(0 && "InitProcess(): Failed to initialize global pool"); + + glslang::ReleaseGlobalLock(); + return false; + } + + if (! InitThread()) { + assert(0 && "InitProcess(): Failed to initialize thread"); + + glslang::ReleaseGlobalLock(); + return false; + } + + glslang::ReleaseGlobalLock(); + return true; +} + +// Per-thread scoped initialization. +// Must be called at least once by each new thread sharing the +// symbol tables, etc., needed to parse. +bool InitThread() +{ + // + // This function is re-entrant + // + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "InitThread(): Process hasn't been initalised."); + return false; + } + + if (OS_GetTLSValue(ThreadInitializeIndex) != 0) + return true; + + if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) { + assert(0 && "InitThread(): Unable to set init flag."); + return false; + } + + glslang::SetThreadPoolAllocator(nullptr); + + return true; +} + +// Not necessary to call this: InitThread() is reentrant, and the need +// to do per thread tear down has been removed. +// +// This is kept, with memory management removed, to satisfy any exiting +// calls to it that rely on it. +bool DetachThread() +{ + bool success = true; + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) + return true; + + // + // Function is re-entrant and this thread may not have been initialized. + // + if (OS_GetTLSValue(ThreadInitializeIndex) != 0) { + if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) { + assert(0 && "DetachThread(): Unable to clear init flag."); + success = false; + } + } + + return success; +} + +// Not necessary to call this: InitProcess() is reentrant. +// +// This is kept, with memory management removed, to satisfy any exiting +// calls to it that rely on it. +// +// Users of glslang should call shFinalize() or glslang::FinalizeProcess() for +// process-scoped memory tear down. +bool DetachProcess() +{ + bool success = true; + + if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) + return true; + + success = DetachThread(); + + OS_FreeTLSIndex(ThreadInitializeIndex); + ThreadInitializeIndex = OS_INVALID_TLS_INDEX; + + return success; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/OGLCompilersDLL/InitializeDll.h b/src/third_party/glslang/OGLCompilersDLL/InitializeDll.h new file mode 100644 index 0000000..661cee4 --- /dev/null +++ b/src/third_party/glslang/OGLCompilersDLL/InitializeDll.h @@ -0,0 +1,49 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef __INITIALIZEDLL_H +#define __INITIALIZEDLL_H + +#include "../glslang/OSDependent/osinclude.h" + +namespace glslang { + +bool InitProcess(); +bool InitThread(); +bool DetachThread(); // not called from standalone, perhaps other tools rely on parts of it +bool DetachProcess(); // not called from standalone, perhaps other tools rely on parts of it + +} // end namespace glslang + +#endif // __INITIALIZEDLL_H + diff --git a/src/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp b/src/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp new file mode 100644 index 0000000..0543b10 --- /dev/null +++ b/src/third_party/glslang/SPIRV/CInterface/spirv_c_interface.cpp @@ -0,0 +1,110 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#include "../../glslang/Include/glslang_c_interface.h" + +#include "../GlslangToSpv.h" +#include "../Logger.h" +#include "../SpvTools.h" + +typedef struct glslang_program_s { + glslang::TProgram* program; + std::vector spirv; + std::string loggerMessages; +} glslang_program_t; + +static EShLanguage c_shader_stage(glslang_stage_t stage) +{ + switch (stage) { + case GLSLANG_STAGE_VERTEX: + return EShLangVertex; + case GLSLANG_STAGE_TESSCONTROL: + return EShLangTessControl; + case GLSLANG_STAGE_TESSEVALUATION: + return EShLangTessEvaluation; + case GLSLANG_STAGE_GEOMETRY: + return EShLangGeometry; + case GLSLANG_STAGE_FRAGMENT: + return EShLangFragment; + case GLSLANG_STAGE_COMPUTE: + return EShLangCompute; + case GLSLANG_STAGE_RAYGEN_NV: + return EShLangRayGen; + case GLSLANG_STAGE_INTERSECT_NV: + return EShLangIntersect; + case GLSLANG_STAGE_ANYHIT_NV: + return EShLangAnyHit; + case GLSLANG_STAGE_CLOSESTHIT_NV: + return EShLangClosestHit; + case GLSLANG_STAGE_MISS_NV: + return EShLangMiss; + case GLSLANG_STAGE_CALLABLE_NV: + return EShLangCallable; + case GLSLANG_STAGE_TASK_NV: + return EShLangTaskNV; + case GLSLANG_STAGE_MESH_NV: + return EShLangMeshNV; + default: + break; + } + return EShLangCount; +} + +GLSLANG_EXPORT void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage) +{ + spv::SpvBuildLogger logger; + glslang::SpvOptions spvOptions; + spvOptions.validate = true; + + const glslang::TIntermediate* intermediate = program->program->getIntermediate(c_shader_stage(stage)); + + glslang::GlslangToSpv(*intermediate, program->spirv, &logger, &spvOptions); + + program->loggerMessages = logger.getAllMessages(); +} + +GLSLANG_EXPORT size_t glslang_program_SPIRV_get_size(glslang_program_t* program) { return program->spirv.size(); } + +GLSLANG_EXPORT void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int* out) +{ + memcpy(out, program->spirv.data(), program->spirv.size() * sizeof(unsigned int)); +} + +GLSLANG_EXPORT unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program) +{ + return program->spirv.data(); +} + +GLSLANG_EXPORT const char* glslang_program_SPIRV_get_messages(glslang_program_t* program) +{ + return program->loggerMessages.empty() ? nullptr : program->loggerMessages.c_str(); +} diff --git a/src/third_party/glslang/SPIRV/GLSL.ext.AMD.h b/src/third_party/glslang/SPIRV/GLSL.ext.AMD.h new file mode 100644 index 0000000..009d2f1 --- /dev/null +++ b/src/third_party/glslang/SPIRV/GLSL.ext.AMD.h @@ -0,0 +1,108 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextAMD_H +#define GLSLextAMD_H + +static const int GLSLextAMDVersion = 100; +static const int GLSLextAMDRevision = 7; + +// SPV_AMD_shader_ballot +static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot"; + +enum ShaderBallotAMD { + ShaderBallotBadAMD = 0, // Don't use + + SwizzleInvocationsAMD = 1, + SwizzleInvocationsMaskedAMD = 2, + WriteInvocationAMD = 3, + MbcntAMD = 4, + + ShaderBallotCountAMD +}; + +// SPV_AMD_shader_trinary_minmax +static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax"; + +enum ShaderTrinaryMinMaxAMD { + ShaderTrinaryMinMaxBadAMD = 0, // Don't use + + FMin3AMD = 1, + UMin3AMD = 2, + SMin3AMD = 3, + FMax3AMD = 4, + UMax3AMD = 5, + SMax3AMD = 6, + FMid3AMD = 7, + UMid3AMD = 8, + SMid3AMD = 9, + + ShaderTrinaryMinMaxCountAMD +}; + +// SPV_AMD_shader_explicit_vertex_parameter +static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter"; + +enum ShaderExplicitVertexParameterAMD { + ShaderExplicitVertexParameterBadAMD = 0, // Don't use + + InterpolateAtVertexAMD = 1, + + ShaderExplicitVertexParameterCountAMD +}; + +// SPV_AMD_gcn_shader +static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader"; + +enum GcnShaderAMD { + GcnShaderBadAMD = 0, // Don't use + + CubeFaceIndexAMD = 1, + CubeFaceCoordAMD = 2, + TimeAMD = 3, + + GcnShaderCountAMD +}; + +// SPV_AMD_gpu_shader_half_float +static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float"; + +// SPV_AMD_texture_gather_bias_lod +static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod"; + +// SPV_AMD_gpu_shader_int16 +static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16"; + +// SPV_AMD_shader_image_load_store_lod +static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod"; + +// SPV_AMD_shader_fragment_mask +static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask"; + +// SPV_AMD_gpu_shader_half_float_fetch +static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch"; + +#endif // #ifndef GLSLextAMD_H diff --git a/src/third_party/glslang/SPIRV/GLSL.ext.EXT.h b/src/third_party/glslang/SPIRV/GLSL.ext.EXT.h new file mode 100644 index 0000000..6eb0eee --- /dev/null +++ b/src/third_party/glslang/SPIRV/GLSL.ext.EXT.h @@ -0,0 +1,40 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextEXT_H +#define GLSLextEXT_H + +static const int GLSLextEXTVersion = 100; +static const int GLSLextEXTRevision = 2; + +static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export"; +static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer"; +static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered"; +static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density"; +static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation"; +static const char* const E_SPV_EXT_shader_atomic_float_add = "SPV_EXT_shader_atomic_float_add"; + +#endif // #ifndef GLSLextEXT_H diff --git a/src/third_party/glslang/SPIRV/GLSL.ext.KHR.h b/src/third_party/glslang/SPIRV/GLSL.ext.KHR.h new file mode 100644 index 0000000..d783a8f --- /dev/null +++ b/src/third_party/glslang/SPIRV/GLSL.ext.KHR.h @@ -0,0 +1,51 @@ +/* +** Copyright (c) 2014-2020 The Khronos Group Inc. +** Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextKHR_H +#define GLSLextKHR_H + +static const int GLSLextKHRVersion = 100; +static const int GLSLextKHRRevision = 2; + +static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot"; +static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote"; +static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group"; +static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview"; +static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters"; +static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage"; +static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage"; +static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class"; +static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage"; +static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model"; +static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer"; +static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer"; +static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock"; +static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock"; +static const char* const E_SPV_KHR_non_semantic_info = "SPV_KHR_non_semantic_info"; +static const char* const E_SPV_KHR_ray_tracing = "SPV_KHR_ray_tracing"; +static const char* const E_SPV_KHR_ray_query = "SPV_KHR_ray_query"; +#endif // #ifndef GLSLextKHR_H diff --git a/src/third_party/glslang/SPIRV/GLSL.ext.NV.h b/src/third_party/glslang/SPIRV/GLSL.ext.NV.h new file mode 100644 index 0000000..50146da --- /dev/null +++ b/src/third_party/glslang/SPIRV/GLSL.ext.NV.h @@ -0,0 +1,81 @@ +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLextNV_H +#define GLSLextNV_H + +enum BuiltIn; +enum Decoration; +enum Op; +enum Capability; + +static const int GLSLextNVVersion = 100; +static const int GLSLextNVRevision = 11; + +//SPV_NV_sample_mask_override_coverage +const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage"; + +//SPV_NV_geometry_shader_passthrough +const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough"; + +//SPV_NV_viewport_array2 +const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2"; +const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array"; + +//SPV_NV_stereo_view_rendering +const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering"; + +//SPV_NVX_multiview_per_view_attributes +const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes"; + +//SPV_NV_shader_subgroup_partitioned +const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned"; + +//SPV_NV_fragment_shader_barycentric +const char* const E_SPV_NV_fragment_shader_barycentric = "SPV_NV_fragment_shader_barycentric"; + +//SPV_NV_compute_shader_derivatives +const char* const E_SPV_NV_compute_shader_derivatives = "SPV_NV_compute_shader_derivatives"; + +//SPV_NV_shader_image_footprint +const char* const E_SPV_NV_shader_image_footprint = "SPV_NV_shader_image_footprint"; + +//SPV_NV_mesh_shader +const char* const E_SPV_NV_mesh_shader = "SPV_NV_mesh_shader"; + +//SPV_NV_raytracing +const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing"; + +//SPV_NV_shading_rate +const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate"; + +//SPV_NV_cooperative_matrix +const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix"; + +//SPV_NV_shader_sm_builtins +const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins"; + +#endif // #ifndef GLSLextNV_H diff --git a/src/third_party/glslang/SPIRV/GLSL.std.450.h b/src/third_party/glslang/SPIRV/GLSL.std.450.h new file mode 100644 index 0000000..df31092 --- /dev/null +++ b/src/third_party/glslang/SPIRV/GLSL.std.450.h @@ -0,0 +1,131 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 1; + +enum GLSLstd450 { + GLSLstd450Bad = 0, // Don't use + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, // second operand needs an OpVariable to write to + GLSLstd450ModfStruct = 36, // no OpVariable operand + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, // Reserved + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to + GLSLstd450FrexpStruct = 52, // no OpVariable operand + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif // #ifndef GLSLstd450_H diff --git a/src/third_party/glslang/SPIRV/GlslangToSpv.cpp b/src/third_party/glslang/SPIRV/GlslangToSpv.cpp new file mode 100644 index 0000000..b86793e --- /dev/null +++ b/src/third_party/glslang/SPIRV/GlslangToSpv.cpp @@ -0,0 +1,8784 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// translate them to SPIR-V. +// + +#include "spirv.hpp" +#include "GlslangToSpv.h" +#include "SpvBuilder.h" +namespace spv { + #include "GLSL.std.450.h" + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + #include "NonSemanticDebugPrintf.h" +} + +// Glslang includes +#include "../glslang/MachineIndependent/localintermediate.h" +#include "../glslang/MachineIndependent/SymbolTable.h" +#include "../glslang/Include/Common.h" + +// Build-time generated includes +#include "../glslang/build_info.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace { + +namespace { +class SpecConstantOpModeGuard { +public: + SpecConstantOpModeGuard(spv::Builder* builder) + : builder_(builder) { + previous_flag_ = builder->isInSpecConstCodeGenMode(); + } + ~SpecConstantOpModeGuard() { + previous_flag_ ? builder_->setToSpecConstCodeGenMode() + : builder_->setToNormalCodeGenMode(); + } + void turnOnSpecConstantOpMode() { + builder_->setToSpecConstCodeGenMode(); + } + +private: + spv::Builder* builder_; + bool previous_flag_; +}; + +struct OpDecorations { + public: + OpDecorations(spv::Decoration precision, spv::Decoration noContraction, spv::Decoration nonUniform) : + precision(precision) +#ifndef GLSLANG_WEB + , + noContraction(noContraction), + nonUniform(nonUniform) +#endif + { } + + spv::Decoration precision; + +#ifdef GLSLANG_WEB + void addNoContraction(spv::Builder&, spv::Id) const { } + void addNonUniform(spv::Builder&, spv::Id) const { } +#else + void addNoContraction(spv::Builder& builder, spv::Id t) { builder.addDecoration(t, noContraction); } + void addNonUniform(spv::Builder& builder, spv::Id t) { builder.addDecoration(t, nonUniform); } + protected: + spv::Decoration noContraction; + spv::Decoration nonUniform; +#endif + +}; + +} // namespace + +// +// The main holder of information for translating glslang to SPIR-V. +// +// Derives from the AST walking base class. +// +class TGlslangToSpvTraverser : public glslang::TIntermTraverser { +public: + TGlslangToSpvTraverser(unsigned int spvVersion, const glslang::TIntermediate*, spv::SpvBuildLogger* logger, + glslang::SpvOptions& options); + virtual ~TGlslangToSpvTraverser() { } + + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*); + bool visitBinary(glslang::TVisit, glslang::TIntermBinary*); + void visitConstantUnion(glslang::TIntermConstantUnion*); + bool visitSelection(glslang::TVisit, glslang::TIntermSelection*); + bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch*); + void visitSymbol(glslang::TIntermSymbol* symbol); + bool visitUnary(glslang::TVisit, glslang::TIntermUnary*); + bool visitLoop(glslang::TVisit, glslang::TIntermLoop*); + bool visitBranch(glslang::TVisit visit, glslang::TIntermBranch*); + + void finishSpv(); + void dumpSpv(std::vector& out); + +protected: + TGlslangToSpvTraverser(TGlslangToSpvTraverser&); + TGlslangToSpvTraverser& operator=(TGlslangToSpvTraverser&); + + spv::Decoration TranslateInterpolationDecoration(const glslang::TQualifier& qualifier); + spv::Decoration TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier); + spv::Decoration TranslateNonUniformDecoration(const glslang::TQualifier& qualifier); + spv::Builder::AccessChain::CoherentFlags TranslateCoherent(const glslang::TType& type); + spv::MemoryAccessMask TranslateMemoryAccess(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::ImageOperandsMask TranslateImageOperands(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::Scope TranslateMemoryScope(const spv::Builder::AccessChain::CoherentFlags &coherentFlags); + spv::BuiltIn TranslateBuiltInDecoration(glslang::TBuiltInVariable, bool memberDeclaration); + spv::ImageFormat TranslateImageFormat(const glslang::TType& type); + spv::SelectionControlMask TranslateSelectionControl(const glslang::TIntermSelection&) const; + spv::SelectionControlMask TranslateSwitchControl(const glslang::TIntermSwitch&) const; + spv::LoopControlMask TranslateLoopControl(const glslang::TIntermLoop&, std::vector& operands) const; + spv::StorageClass TranslateStorageClass(const glslang::TType&); + void addIndirectionIndexCapabilities(const glslang::TType& baseType, const glslang::TType& indexType); + spv::Id createSpvVariable(const glslang::TIntermSymbol*, spv::Id forcedType); + spv::Id getSampledType(const glslang::TSampler&); + spv::Id getInvertedSwizzleType(const glslang::TIntermTyped&); + spv::Id createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped&, spv::Id parentResult); + void convertSwizzle(const glslang::TIntermAggregate&, std::vector& swizzle); + spv::Id convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly = false); + spv::Id convertGlslangToSpvType(const glslang::TType& type, glslang::TLayoutPacking, const glslang::TQualifier&, + bool lastBufferBlockMember, bool forwardReferenceOnly = false); + bool filterMember(const glslang::TType& member); + spv::Id convertGlslangStructToSpvType(const glslang::TType&, const glslang::TTypeList* glslangStruct, + glslang::TLayoutPacking, const glslang::TQualifier&); + void decorateStructType(const glslang::TType&, const glslang::TTypeList* glslangStruct, glslang::TLayoutPacking, + const glslang::TQualifier&, spv::Id); + spv::Id makeArraySizeId(const glslang::TArraySizes&, int dim); + spv::Id accessChainLoad(const glslang::TType& type); + void accessChainStore(const glslang::TType& type, spv::Id rvalue); + void multiTypeStore(const glslang::TType&, spv::Id rValue); + glslang::TLayoutPacking getExplicitLayout(const glslang::TType& type) const; + int getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking, glslang::TLayoutMatrix); + int getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking, glslang::TLayoutMatrix); + void updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, int& currentOffset, + int& nextOffset, glslang::TLayoutPacking, glslang::TLayoutMatrix); + void declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember); + + bool isShaderEntryPoint(const glslang::TIntermAggregate* node); + bool writableParam(glslang::TStorageQualifier) const; + bool originalParam(glslang::TStorageQualifier, const glslang::TType&, bool implicitThisParam); + void makeFunctions(const glslang::TIntermSequence&); + void makeGlobalInitializers(const glslang::TIntermSequence&); + void visitFunctions(const glslang::TIntermSequence&); + void handleFunctionEntry(const glslang::TIntermAggregate* node); + void translateArguments(const glslang::TIntermAggregate& node, std::vector& arguments, + spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + void translateArguments(glslang::TIntermUnary& node, std::vector& arguments); + spv::Id createImageTextureFunctionCall(glslang::TIntermOperator* node); + spv::Id handleUserFunctionCall(const glslang::TIntermAggregate*); + + spv::Id createBinaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right, + glslang::TBasicType typeProxy, bool reduceComparison = true); + spv::Id createBinaryMatrixOperation(spv::Op, OpDecorations&, spv::Id typeId, spv::Id left, spv::Id right); + spv::Id createUnaryOperation(glslang::TOperator op, OpDecorations&, spv::Id typeId, spv::Id operand, + glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + spv::Id createUnaryMatrixOperation(spv::Op op, OpDecorations&, spv::Id typeId, spv::Id operand, + glslang::TBasicType typeProxy); + spv::Id createConversion(glslang::TOperator op, OpDecorations&, spv::Id destTypeId, spv::Id operand, + glslang::TBasicType typeProxy); + spv::Id createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize); + spv::Id makeSmearedConstant(spv::Id constant, int vectorSize); + spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags); + spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector& operands, + glslang::TBasicType typeProxy); + spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, + spv::Id typeId, std::vector& operands); + spv::Id createSubgroupOperation(glslang::TOperator op, spv::Id typeId, std::vector& operands, + glslang::TBasicType typeProxy); + spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy); + spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId); + spv::Id getSymbolId(const glslang::TIntermSymbol* node); + void addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier & qualifier); + spv::Id createSpvConstant(const glslang::TIntermTyped&); + spv::Id createSpvConstantFromConstUnionArray(const glslang::TType& type, const glslang::TConstUnionArray&, + int& nextConst, bool specConstant); + bool isTrivialLeaf(const glslang::TIntermTyped* node); + bool isTrivial(const glslang::TIntermTyped* node); + spv::Id createShortCircuit(glslang::TOperator, glslang::TIntermTyped& left, glslang::TIntermTyped& right); + spv::Id getExtBuiltins(const char* name); + std::pair getForcedType(glslang::TBuiltInVariable builtIn, const glslang::TType&); + spv::Id translateForcedType(spv::Id object); + spv::Id createCompositeConstruct(spv::Id typeId, std::vector constituents); + + glslang::SpvOptions& options; + spv::Function* shaderEntry; + spv::Function* currentFunction; + spv::Instruction* entryPoint; + int sequenceDepth; + + spv::SpvBuildLogger* logger; + + // There is a 1:1 mapping between a spv builder and a module; this is thread safe + spv::Builder builder; + bool inEntryPoint; + bool entryPointTerminated; + bool linkageOnly; // true when visiting the set of objects in the AST present only for + // establishing interface, whether or not they were statically used + std::set iOSet; // all input/output variables from either static use or declaration of interface + const glslang::TIntermediate* glslangIntermediate; + bool nanMinMaxClamp; // true if use NMin/NMax/NClamp instead of FMin/FMax/FClamp + spv::Id stdBuiltins; + spv::Id nonSemanticDebugPrintf; + std::unordered_map extBuiltinMap; + + std::unordered_map symbolValues; + std::unordered_set rValueParameters; // set of formal function parameters passed as rValues, + // rather than a pointer + std::unordered_map functionMap; + std::unordered_map structMap[glslang::ElpCount][glslang::ElmCount]; + // for mapping glslang block indices to spv indices (e.g., due to hidden members): + std::unordered_map> memberRemapper; + // for mapping glslang symbol struct to symbol Id + std::unordered_map glslangTypeToIdMap; + std::stack breakForLoop; // false means break for switch + std::unordered_map counterOriginator; + // Map pointee types for EbtReference to their forward pointers + std::map forwardPointers; + // Type forcing, for when SPIR-V wants a different type than the AST, + // requiring local translation to and from SPIR-V type on every access. + // Maps AST-required-type-id> + std::unordered_map forceType; +}; + +// +// Helper functions for translating glslang representations to SPIR-V enumerants. +// + +// Translate glslang profile to SPIR-V source language. +spv::SourceLanguage TranslateSourceLanguage(glslang::EShSource source, EProfile profile) +{ +#ifdef GLSLANG_WEB + return spv::SourceLanguageESSL; +#elif defined(GLSLANG_ANGLE) + return spv::SourceLanguageGLSL; +#endif + + switch (source) { + case glslang::EShSourceGlsl: + switch (profile) { + case ENoProfile: + case ECoreProfile: + case ECompatibilityProfile: + return spv::SourceLanguageGLSL; + case EEsProfile: + return spv::SourceLanguageESSL; + default: + return spv::SourceLanguageUnknown; + } + case glslang::EShSourceHlsl: + return spv::SourceLanguageHLSL; + default: + return spv::SourceLanguageUnknown; + } +} + +// Translate glslang language (stage) to SPIR-V execution model. +spv::ExecutionModel TranslateExecutionModel(EShLanguage stage) +{ + switch (stage) { + case EShLangVertex: return spv::ExecutionModelVertex; + case EShLangFragment: return spv::ExecutionModelFragment; + case EShLangCompute: return spv::ExecutionModelGLCompute; +#ifndef GLSLANG_WEB + case EShLangTessControl: return spv::ExecutionModelTessellationControl; + case EShLangTessEvaluation: return spv::ExecutionModelTessellationEvaluation; + case EShLangGeometry: return spv::ExecutionModelGeometry; + case EShLangRayGen: return spv::ExecutionModelRayGenerationKHR; + case EShLangIntersect: return spv::ExecutionModelIntersectionKHR; + case EShLangAnyHit: return spv::ExecutionModelAnyHitKHR; + case EShLangClosestHit: return spv::ExecutionModelClosestHitKHR; + case EShLangMiss: return spv::ExecutionModelMissKHR; + case EShLangCallable: return spv::ExecutionModelCallableKHR; + case EShLangTaskNV: return spv::ExecutionModelTaskNV; + case EShLangMeshNV: return spv::ExecutionModelMeshNV; +#endif + default: + assert(0); + return spv::ExecutionModelFragment; + } +} + +// Translate glslang sampler type to SPIR-V dimensionality. +spv::Dim TranslateDimensionality(const glslang::TSampler& sampler) +{ + switch (sampler.dim) { + case glslang::Esd1D: return spv::Dim1D; + case glslang::Esd2D: return spv::Dim2D; + case glslang::Esd3D: return spv::Dim3D; + case glslang::EsdCube: return spv::DimCube; + case glslang::EsdRect: return spv::DimRect; + case glslang::EsdBuffer: return spv::DimBuffer; + case glslang::EsdSubpass: return spv::DimSubpassData; + default: + assert(0); + return spv::Dim2D; + } +} + +// Translate glslang precision to SPIR-V precision decorations. +spv::Decoration TranslatePrecisionDecoration(glslang::TPrecisionQualifier glslangPrecision) +{ + switch (glslangPrecision) { + case glslang::EpqLow: return spv::DecorationRelaxedPrecision; + case glslang::EpqMedium: return spv::DecorationRelaxedPrecision; + default: + return spv::NoPrecision; + } +} + +// Translate glslang type to SPIR-V precision decorations. +spv::Decoration TranslatePrecisionDecoration(const glslang::TType& type) +{ + return TranslatePrecisionDecoration(type.getQualifier().precision); +} + +// Translate glslang type to SPIR-V block decorations. +spv::Decoration TranslateBlockDecoration(const glslang::TType& type, bool useStorageBuffer) +{ + if (type.getBasicType() == glslang::EbtBlock) { + switch (type.getQualifier().storage) { + case glslang::EvqUniform: return spv::DecorationBlock; + case glslang::EvqBuffer: return useStorageBuffer ? spv::DecorationBlock : spv::DecorationBufferBlock; + case glslang::EvqVaryingIn: return spv::DecorationBlock; + case glslang::EvqVaryingOut: return spv::DecorationBlock; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: return spv::DecorationBlock; + case glslang::EvqPayloadIn: return spv::DecorationBlock; + case glslang::EvqHitAttr: return spv::DecorationBlock; + case glslang::EvqCallableData: return spv::DecorationBlock; + case glslang::EvqCallableDataIn: return spv::DecorationBlock; +#endif + default: + assert(0); + break; + } + } + + return spv::DecorationMax; +} + +// Translate glslang type to SPIR-V memory decorations. +void TranslateMemoryDecoration(const glslang::TQualifier& qualifier, std::vector& memory, + bool useVulkanMemoryModel) +{ + if (!useVulkanMemoryModel) { + if (qualifier.isCoherent()) + memory.push_back(spv::DecorationCoherent); + if (qualifier.isVolatile()) { + memory.push_back(spv::DecorationVolatile); + memory.push_back(spv::DecorationCoherent); + } + } + if (qualifier.isRestrict()) + memory.push_back(spv::DecorationRestrict); + if (qualifier.isReadOnly()) + memory.push_back(spv::DecorationNonWritable); + if (qualifier.isWriteOnly()) + memory.push_back(spv::DecorationNonReadable); +} + +// Translate glslang type to SPIR-V layout decorations. +spv::Decoration TranslateLayoutDecoration(const glslang::TType& type, glslang::TLayoutMatrix matrixLayout) +{ + if (type.isMatrix()) { + switch (matrixLayout) { + case glslang::ElmRowMajor: + return spv::DecorationRowMajor; + case glslang::ElmColumnMajor: + return spv::DecorationColMajor; + default: + // opaque layouts don't need a majorness + return spv::DecorationMax; + } + } else { + switch (type.getBasicType()) { + default: + return spv::DecorationMax; + break; + case glslang::EbtBlock: + switch (type.getQualifier().storage) { + case glslang::EvqUniform: + case glslang::EvqBuffer: + switch (type.getQualifier().layoutPacking) { + case glslang::ElpShared: return spv::DecorationGLSLShared; + case glslang::ElpPacked: return spv::DecorationGLSLPacked; + default: + return spv::DecorationMax; + } + case glslang::EvqVaryingIn: + case glslang::EvqVaryingOut: + if (type.getQualifier().isTaskMemory()) { + switch (type.getQualifier().layoutPacking) { + case glslang::ElpShared: return spv::DecorationGLSLShared; + case glslang::ElpPacked: return spv::DecorationGLSLPacked; + default: break; + } + } else { + assert(type.getQualifier().layoutPacking == glslang::ElpNone); + } + return spv::DecorationMax; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: + case glslang::EvqPayloadIn: + case glslang::EvqHitAttr: + case glslang::EvqCallableData: + case glslang::EvqCallableDataIn: + return spv::DecorationMax; +#endif + default: + assert(0); + return spv::DecorationMax; + } + } + } +} + +// Translate glslang type to SPIR-V interpolation decorations. +// Returns spv::DecorationMax when no decoration +// should be applied. +spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.smooth) + // Smooth decoration doesn't exist in SPIR-V 1.0 + return spv::DecorationMax; + else if (qualifier.isNonPerspective()) + return spv::DecorationNoPerspective; + else if (qualifier.flat) + return spv::DecorationFlat; + else if (qualifier.isExplicitInterpolation()) { + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::DecorationExplicitInterpAMD; + } + else + return spv::DecorationMax; +} + +// Translate glslang type to SPIR-V auxiliary storage decorations. +// Returns spv::DecorationMax when no decoration +// should be applied. +spv::Decoration TGlslangToSpvTraverser::TranslateAuxiliaryStorageDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.centroid) + return spv::DecorationCentroid; +#ifndef GLSLANG_WEB + else if (qualifier.patch) + return spv::DecorationPatch; + else if (qualifier.sample) { + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::DecorationSample; + } +#endif + + return spv::DecorationMax; +} + +// If glslang type is invariant, return SPIR-V invariant decoration. +spv::Decoration TranslateInvariantDecoration(const glslang::TQualifier& qualifier) +{ + if (qualifier.invariant) + return spv::DecorationInvariant; + else + return spv::DecorationMax; +} + +// If glslang type is noContraction, return SPIR-V NoContraction decoration. +spv::Decoration TranslateNoContractionDecoration(const glslang::TQualifier& qualifier) +{ +#ifndef GLSLANG_WEB + if (qualifier.isNoContraction()) + return spv::DecorationNoContraction; + else +#endif + return spv::DecorationMax; +} + +// If glslang type is nonUniform, return SPIR-V NonUniform decoration. +spv::Decoration TGlslangToSpvTraverser::TranslateNonUniformDecoration(const glslang::TQualifier& qualifier) +{ +#ifndef GLSLANG_WEB + if (qualifier.isNonUniform()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderNonUniformEXT); + return spv::DecorationNonUniformEXT; + } else +#endif + return spv::DecorationMax; +} + +spv::MemoryAccessMask TGlslangToSpvTraverser::TranslateMemoryAccess( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::MemoryAccessMask mask = spv::MemoryAccessMaskNone; + +#ifndef GLSLANG_WEB + if (!glslangIntermediate->usingVulkanMemoryModel() || coherentFlags.isImage) + return mask; + + if (coherentFlags.isVolatile() || coherentFlags.anyCoherent()) { + mask = mask | spv::MemoryAccessMakePointerAvailableKHRMask | + spv::MemoryAccessMakePointerVisibleKHRMask; + } + + if (coherentFlags.nonprivate) { + mask = mask | spv::MemoryAccessNonPrivatePointerKHRMask; + } + if (coherentFlags.volatil) { + mask = mask | spv::MemoryAccessVolatileMask; + } + if (mask != spv::MemoryAccessMaskNone) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } +#endif + + return mask; +} + +spv::ImageOperandsMask TGlslangToSpvTraverser::TranslateImageOperands( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + +#ifndef GLSLANG_WEB + if (!glslangIntermediate->usingVulkanMemoryModel()) + return mask; + + if (coherentFlags.volatil || + coherentFlags.anyCoherent()) { + mask = mask | spv::ImageOperandsMakeTexelAvailableKHRMask | + spv::ImageOperandsMakeTexelVisibleKHRMask; + } + if (coherentFlags.nonprivate) { + mask = mask | spv::ImageOperandsNonPrivateTexelKHRMask; + } + if (coherentFlags.volatil) { + mask = mask | spv::ImageOperandsVolatileTexelKHRMask; + } + if (mask != spv::ImageOperandsMaskNone) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } +#endif + + return mask; +} + +spv::Builder::AccessChain::CoherentFlags TGlslangToSpvTraverser::TranslateCoherent(const glslang::TType& type) +{ + spv::Builder::AccessChain::CoherentFlags flags = {}; +#ifndef GLSLANG_WEB + flags.coherent = type.getQualifier().coherent; + flags.devicecoherent = type.getQualifier().devicecoherent; + flags.queuefamilycoherent = type.getQualifier().queuefamilycoherent; + // shared variables are implicitly workgroupcoherent in GLSL. + flags.workgroupcoherent = type.getQualifier().workgroupcoherent || + type.getQualifier().storage == glslang::EvqShared; + flags.subgroupcoherent = type.getQualifier().subgroupcoherent; + flags.shadercallcoherent = type.getQualifier().shadercallcoherent; + flags.volatil = type.getQualifier().volatil; + // *coherent variables are implicitly nonprivate in GLSL + flags.nonprivate = type.getQualifier().nonprivate || + flags.anyCoherent() || + flags.volatil; + flags.isImage = type.getBasicType() == glslang::EbtSampler; +#endif + return flags; +} + +spv::Scope TGlslangToSpvTraverser::TranslateMemoryScope( + const spv::Builder::AccessChain::CoherentFlags &coherentFlags) +{ + spv::Scope scope = spv::ScopeMax; + +#ifndef GLSLANG_WEB + if (coherentFlags.volatil || coherentFlags.coherent) { + // coherent defaults to Device scope in the old model, QueueFamilyKHR scope in the new model + scope = glslangIntermediate->usingVulkanMemoryModel() ? spv::ScopeQueueFamilyKHR : spv::ScopeDevice; + } else if (coherentFlags.devicecoherent) { + scope = spv::ScopeDevice; + } else if (coherentFlags.queuefamilycoherent) { + scope = spv::ScopeQueueFamilyKHR; + } else if (coherentFlags.workgroupcoherent) { + scope = spv::ScopeWorkgroup; + } else if (coherentFlags.subgroupcoherent) { + scope = spv::ScopeSubgroup; + } else if (coherentFlags.shadercallcoherent) { + scope = spv::ScopeShaderCallKHR; + } + if (glslangIntermediate->usingVulkanMemoryModel() && scope == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } +#endif + + return scope; +} + +// Translate a glslang built-in variable to a SPIR-V built in decoration. Also generate +// associated capabilities when required. For some built-in variables, a capability +// is generated only when using the variable in an executable instruction, but not when +// just declaring a struct member variable with it. This is true for PointSize, +// ClipDistance, and CullDistance. +spv::BuiltIn TGlslangToSpvTraverser::TranslateBuiltInDecoration(glslang::TBuiltInVariable builtIn, + bool memberDeclaration) +{ + switch (builtIn) { + case glslang::EbvPointSize: +#ifndef GLSLANG_WEB + // Defer adding the capability until the built-in is actually used. + if (! memberDeclaration) { + switch (glslangIntermediate->getStage()) { + case EShLangGeometry: + builder.addCapability(spv::CapabilityGeometryPointSize); + break; + case EShLangTessControl: + case EShLangTessEvaluation: + builder.addCapability(spv::CapabilityTessellationPointSize); + break; + default: + break; + } + } +#endif + return spv::BuiltInPointSize; + + case glslang::EbvPosition: return spv::BuiltInPosition; + case glslang::EbvVertexId: return spv::BuiltInVertexId; + case glslang::EbvInstanceId: return spv::BuiltInInstanceId; + case glslang::EbvVertexIndex: return spv::BuiltInVertexIndex; + case glslang::EbvInstanceIndex: return spv::BuiltInInstanceIndex; + + case glslang::EbvFragCoord: return spv::BuiltInFragCoord; + case glslang::EbvPointCoord: return spv::BuiltInPointCoord; + case glslang::EbvFace: return spv::BuiltInFrontFacing; + case glslang::EbvFragDepth: return spv::BuiltInFragDepth; + + case glslang::EbvNumWorkGroups: return spv::BuiltInNumWorkgroups; + case glslang::EbvWorkGroupSize: return spv::BuiltInWorkgroupSize; + case glslang::EbvWorkGroupId: return spv::BuiltInWorkgroupId; + case glslang::EbvLocalInvocationId: return spv::BuiltInLocalInvocationId; + case glslang::EbvLocalInvocationIndex: return spv::BuiltInLocalInvocationIndex; + case glslang::EbvGlobalInvocationId: return spv::BuiltInGlobalInvocationId; + +#ifndef GLSLANG_WEB + // These *Distance capabilities logically belong here, but if the member is declared and + // then never used, consumers of SPIR-V prefer the capability not be declared. + // They are now generated when used, rather than here when declared. + // Potentially, the specification should be more clear what the minimum + // use needed is to trigger the capability. + // + case glslang::EbvClipDistance: + if (!memberDeclaration) + builder.addCapability(spv::CapabilityClipDistance); + return spv::BuiltInClipDistance; + + case glslang::EbvCullDistance: + if (!memberDeclaration) + builder.addCapability(spv::CapabilityCullDistance); + return spv::BuiltInCullDistance; + + case glslang::EbvViewportIndex: + builder.addCapability(spv::CapabilityMultiViewport); + if (glslangIntermediate->getStage() == EShLangVertex || + glslangIntermediate->getStage() == EShLangTessControl || + glslangIntermediate->getStage() == EShLangTessEvaluation) { + + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } + return spv::BuiltInViewportIndex; + + case glslang::EbvSampleId: + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::BuiltInSampleId; + + case glslang::EbvSamplePosition: + builder.addCapability(spv::CapabilitySampleRateShading); + return spv::BuiltInSamplePosition; + + case glslang::EbvSampleMask: + return spv::BuiltInSampleMask; + + case glslang::EbvLayer: + if (glslangIntermediate->getStage() == EShLangMeshNV) { + return spv::BuiltInLayer; + } + builder.addCapability(spv::CapabilityGeometry); + if (glslangIntermediate->getStage() == EShLangVertex || + glslangIntermediate->getStage() == EShLangTessControl || + glslangIntermediate->getStage() == EShLangTessEvaluation) { + + builder.addIncorporatedExtension(spv::E_SPV_EXT_shader_viewport_index_layer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityShaderViewportIndexLayerEXT); + } + return spv::BuiltInLayer; + + case glslang::EbvBaseVertex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInBaseVertex; + + case glslang::EbvBaseInstance: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInBaseInstance; + + case glslang::EbvDrawId: + builder.addIncorporatedExtension(spv::E_SPV_KHR_shader_draw_parameters, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDrawParameters); + return spv::BuiltInDrawIndex; + + case glslang::EbvPrimitiveId: + if (glslangIntermediate->getStage() == EShLangFragment) + builder.addCapability(spv::CapabilityGeometry); + return spv::BuiltInPrimitiveId; + + case glslang::EbvFragStencilRef: + builder.addExtension(spv::E_SPV_EXT_shader_stencil_export); + builder.addCapability(spv::CapabilityStencilExportEXT); + return spv::BuiltInFragStencilRefEXT; + + case glslang::EbvInvocationId: return spv::BuiltInInvocationId; + case glslang::EbvTessLevelInner: return spv::BuiltInTessLevelInner; + case glslang::EbvTessLevelOuter: return spv::BuiltInTessLevelOuter; + case glslang::EbvTessCoord: return spv::BuiltInTessCoord; + case glslang::EbvPatchVertices: return spv::BuiltInPatchVertices; + case glslang::EbvHelperInvocation: return spv::BuiltInHelperInvocation; + + case glslang::EbvSubGroupSize: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupSize; + + case glslang::EbvSubGroupInvocation: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLocalInvocationId; + + case glslang::EbvSubGroupEqMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupEqMask; + + case glslang::EbvSubGroupGeMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupGeMask; + + case glslang::EbvSubGroupGtMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupGtMask; + + case glslang::EbvSubGroupLeMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLeMask; + + case glslang::EbvSubGroupLtMask: + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + return spv::BuiltInSubgroupLtMask; + + case glslang::EbvNumSubgroups: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInNumSubgroups; + + case glslang::EbvSubgroupID: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupId; + + case glslang::EbvSubgroupSize2: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupSize; + + case glslang::EbvSubgroupInvocation2: + builder.addCapability(spv::CapabilityGroupNonUniform); + return spv::BuiltInSubgroupLocalInvocationId; + + case glslang::EbvSubgroupEqMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupEqMask; + + case glslang::EbvSubgroupGeMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupGeMask; + + case glslang::EbvSubgroupGtMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupGtMask; + + case glslang::EbvSubgroupLeMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupLeMask; + + case glslang::EbvSubgroupLtMask2: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + return spv::BuiltInSubgroupLtMask; + + case glslang::EbvBaryCoordNoPersp: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspAMD; + + case glslang::EbvBaryCoordNoPerspCentroid: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspCentroidAMD; + + case glslang::EbvBaryCoordNoPerspSample: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordNoPerspSampleAMD; + + case glslang::EbvBaryCoordSmooth: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothAMD; + + case glslang::EbvBaryCoordSmoothCentroid: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothCentroidAMD; + + case glslang::EbvBaryCoordSmoothSample: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordSmoothSampleAMD; + + case glslang::EbvBaryCoordPullModel: + builder.addExtension(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + return spv::BuiltInBaryCoordPullModelAMD; + + case glslang::EbvDeviceIndex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_device_group, spv::Spv_1_3); + builder.addCapability(spv::CapabilityDeviceGroup); + return spv::BuiltInDeviceIndex; + + case glslang::EbvViewIndex: + builder.addIncorporatedExtension(spv::E_SPV_KHR_multiview, spv::Spv_1_3); + builder.addCapability(spv::CapabilityMultiView); + return spv::BuiltInViewIndex; + + case glslang::EbvFragSizeEXT: + builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density); + builder.addCapability(spv::CapabilityFragmentDensityEXT); + return spv::BuiltInFragSizeEXT; + + case glslang::EbvFragInvocationCountEXT: + builder.addExtension(spv::E_SPV_EXT_fragment_invocation_density); + builder.addCapability(spv::CapabilityFragmentDensityEXT); + return spv::BuiltInFragInvocationCountEXT; + + case glslang::EbvViewportMaskNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_viewport_array2); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + } + return spv::BuiltInViewportMaskNV; + case glslang::EbvSecondaryPositionNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + } + return spv::BuiltInSecondaryPositionNV; + case glslang::EbvSecondaryViewportMaskNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + } + return spv::BuiltInSecondaryViewportMaskNV; + case glslang::EbvPositionPerViewNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes); + builder.addCapability(spv::CapabilityPerViewAttributesNV); + } + return spv::BuiltInPositionPerViewNV; + case glslang::EbvViewportMaskPerViewNV: + if (!memberDeclaration) { + builder.addExtension(spv::E_SPV_NVX_multiview_per_view_attributes); + builder.addCapability(spv::CapabilityPerViewAttributesNV); + } + return spv::BuiltInViewportMaskPerViewNV; + case glslang::EbvFragFullyCoveredNV: + builder.addExtension(spv::E_SPV_EXT_fragment_fully_covered); + builder.addCapability(spv::CapabilityFragmentFullyCoveredEXT); + return spv::BuiltInFullyCoveredEXT; + case glslang::EbvFragmentSizeNV: + builder.addExtension(spv::E_SPV_NV_shading_rate); + builder.addCapability(spv::CapabilityShadingRateNV); + return spv::BuiltInFragmentSizeNV; + case glslang::EbvInvocationsPerPixelNV: + builder.addExtension(spv::E_SPV_NV_shading_rate); + builder.addCapability(spv::CapabilityShadingRateNV); + return spv::BuiltInInvocationsPerPixelNV; + + // ray tracing + case glslang::EbvLaunchId: + return spv::BuiltInLaunchIdKHR; + case glslang::EbvLaunchSize: + return spv::BuiltInLaunchSizeKHR; + case glslang::EbvWorldRayOrigin: + return spv::BuiltInWorldRayOriginKHR; + case glslang::EbvWorldRayDirection: + return spv::BuiltInWorldRayDirectionKHR; + case glslang::EbvObjectRayOrigin: + return spv::BuiltInObjectRayOriginKHR; + case glslang::EbvObjectRayDirection: + return spv::BuiltInObjectRayDirectionKHR; + case glslang::EbvRayTmin: + return spv::BuiltInRayTminKHR; + case glslang::EbvRayTmax: + return spv::BuiltInRayTmaxKHR; + case glslang::EbvInstanceCustomIndex: + return spv::BuiltInInstanceCustomIndexKHR; + case glslang::EbvHitT: + return spv::BuiltInHitTKHR; + case glslang::EbvHitKind: + return spv::BuiltInHitKindKHR; + case glslang::EbvObjectToWorld: + case glslang::EbvObjectToWorld3x4: + return spv::BuiltInObjectToWorldKHR; + case glslang::EbvWorldToObject: + case glslang::EbvWorldToObject3x4: + return spv::BuiltInWorldToObjectKHR; + case glslang::EbvIncomingRayFlags: + return spv::BuiltInIncomingRayFlagsKHR; + case glslang::EbvGeometryIndex: + return spv::BuiltInRayGeometryIndexKHR; + + // barycentrics + case glslang::EbvBaryCoordNV: + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + return spv::BuiltInBaryCoordNV; + case glslang::EbvBaryCoordNoPerspNV: + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + return spv::BuiltInBaryCoordNoPerspNV; + + // mesh shaders + case glslang::EbvTaskCountNV: + return spv::BuiltInTaskCountNV; + case glslang::EbvPrimitiveCountNV: + return spv::BuiltInPrimitiveCountNV; + case glslang::EbvPrimitiveIndicesNV: + return spv::BuiltInPrimitiveIndicesNV; + case glslang::EbvClipDistancePerViewNV: + return spv::BuiltInClipDistancePerViewNV; + case glslang::EbvCullDistancePerViewNV: + return spv::BuiltInCullDistancePerViewNV; + case glslang::EbvLayerPerViewNV: + return spv::BuiltInLayerPerViewNV; + case glslang::EbvMeshViewCountNV: + return spv::BuiltInMeshViewCountNV; + case glslang::EbvMeshViewIndicesNV: + return spv::BuiltInMeshViewIndicesNV; + + // sm builtins + case glslang::EbvWarpsPerSM: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInWarpsPerSMNV; + case glslang::EbvSMCount: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInSMCountNV; + case glslang::EbvWarpID: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInWarpIDNV; + case glslang::EbvSMID: + builder.addExtension(spv::E_SPV_NV_shader_sm_builtins); + builder.addCapability(spv::CapabilityShaderSMBuiltinsNV); + return spv::BuiltInSMIDNV; +#endif + + default: + return spv::BuiltInMax; + } +} + +// Translate glslang image layout format to SPIR-V image format. +spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TType& type) +{ + assert(type.getBasicType() == glslang::EbtSampler); + +#ifdef GLSLANG_WEB + return spv::ImageFormatUnknown; +#endif + + // Check for capabilities + switch (type.getQualifier().getFormat()) { + case glslang::ElfRg32f: + case glslang::ElfRg16f: + case glslang::ElfR11fG11fB10f: + case glslang::ElfR16f: + case glslang::ElfRgba16: + case glslang::ElfRgb10A2: + case glslang::ElfRg16: + case glslang::ElfRg8: + case glslang::ElfR16: + case glslang::ElfR8: + case glslang::ElfRgba16Snorm: + case glslang::ElfRg16Snorm: + case glslang::ElfRg8Snorm: + case glslang::ElfR16Snorm: + case glslang::ElfR8Snorm: + + case glslang::ElfRg32i: + case glslang::ElfRg16i: + case glslang::ElfRg8i: + case glslang::ElfR16i: + case glslang::ElfR8i: + + case glslang::ElfRgb10a2ui: + case glslang::ElfRg32ui: + case glslang::ElfRg16ui: + case glslang::ElfRg8ui: + case glslang::ElfR16ui: + case glslang::ElfR8ui: + builder.addCapability(spv::CapabilityStorageImageExtendedFormats); + break; + + default: + break; + } + + // do the translation + switch (type.getQualifier().getFormat()) { + case glslang::ElfNone: return spv::ImageFormatUnknown; + case glslang::ElfRgba32f: return spv::ImageFormatRgba32f; + case glslang::ElfRgba16f: return spv::ImageFormatRgba16f; + case glslang::ElfR32f: return spv::ImageFormatR32f; + case glslang::ElfRgba8: return spv::ImageFormatRgba8; + case glslang::ElfRgba8Snorm: return spv::ImageFormatRgba8Snorm; + case glslang::ElfRg32f: return spv::ImageFormatRg32f; + case glslang::ElfRg16f: return spv::ImageFormatRg16f; + case glslang::ElfR11fG11fB10f: return spv::ImageFormatR11fG11fB10f; + case glslang::ElfR16f: return spv::ImageFormatR16f; + case glslang::ElfRgba16: return spv::ImageFormatRgba16; + case glslang::ElfRgb10A2: return spv::ImageFormatRgb10A2; + case glslang::ElfRg16: return spv::ImageFormatRg16; + case glslang::ElfRg8: return spv::ImageFormatRg8; + case glslang::ElfR16: return spv::ImageFormatR16; + case glslang::ElfR8: return spv::ImageFormatR8; + case glslang::ElfRgba16Snorm: return spv::ImageFormatRgba16Snorm; + case glslang::ElfRg16Snorm: return spv::ImageFormatRg16Snorm; + case glslang::ElfRg8Snorm: return spv::ImageFormatRg8Snorm; + case glslang::ElfR16Snorm: return spv::ImageFormatR16Snorm; + case glslang::ElfR8Snorm: return spv::ImageFormatR8Snorm; + case glslang::ElfRgba32i: return spv::ImageFormatRgba32i; + case glslang::ElfRgba16i: return spv::ImageFormatRgba16i; + case glslang::ElfRgba8i: return spv::ImageFormatRgba8i; + case glslang::ElfR32i: return spv::ImageFormatR32i; + case glslang::ElfRg32i: return spv::ImageFormatRg32i; + case glslang::ElfRg16i: return spv::ImageFormatRg16i; + case glslang::ElfRg8i: return spv::ImageFormatRg8i; + case glslang::ElfR16i: return spv::ImageFormatR16i; + case glslang::ElfR8i: return spv::ImageFormatR8i; + case glslang::ElfRgba32ui: return spv::ImageFormatRgba32ui; + case glslang::ElfRgba16ui: return spv::ImageFormatRgba16ui; + case glslang::ElfRgba8ui: return spv::ImageFormatRgba8ui; + case glslang::ElfR32ui: return spv::ImageFormatR32ui; + case glslang::ElfRg32ui: return spv::ImageFormatRg32ui; + case glslang::ElfRg16ui: return spv::ImageFormatRg16ui; + case glslang::ElfRgb10a2ui: return spv::ImageFormatRgb10a2ui; + case glslang::ElfRg8ui: return spv::ImageFormatRg8ui; + case glslang::ElfR16ui: return spv::ImageFormatR16ui; + case glslang::ElfR8ui: return spv::ImageFormatR8ui; + default: return spv::ImageFormatMax; + } +} + +spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSelectionControl( + const glslang::TIntermSelection& selectionNode) const +{ + if (selectionNode.getFlatten()) + return spv::SelectionControlFlattenMask; + if (selectionNode.getDontFlatten()) + return spv::SelectionControlDontFlattenMask; + return spv::SelectionControlMaskNone; +} + +spv::SelectionControlMask TGlslangToSpvTraverser::TranslateSwitchControl(const glslang::TIntermSwitch& switchNode) + const +{ + if (switchNode.getFlatten()) + return spv::SelectionControlFlattenMask; + if (switchNode.getDontFlatten()) + return spv::SelectionControlDontFlattenMask; + return spv::SelectionControlMaskNone; +} + +// return a non-0 dependency if the dependency argument must be set +spv::LoopControlMask TGlslangToSpvTraverser::TranslateLoopControl(const glslang::TIntermLoop& loopNode, + std::vector& operands) const +{ + spv::LoopControlMask control = spv::LoopControlMaskNone; + + if (loopNode.getDontUnroll()) + control = control | spv::LoopControlDontUnrollMask; + if (loopNode.getUnroll()) + control = control | spv::LoopControlUnrollMask; + if (unsigned(loopNode.getLoopDependency()) == glslang::TIntermLoop::dependencyInfinite) + control = control | spv::LoopControlDependencyInfiniteMask; + else if (loopNode.getLoopDependency() > 0) { + control = control | spv::LoopControlDependencyLengthMask; + operands.push_back((unsigned int)loopNode.getLoopDependency()); + } + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + if (loopNode.getMinIterations() > 0) { + control = control | spv::LoopControlMinIterationsMask; + operands.push_back(loopNode.getMinIterations()); + } + if (loopNode.getMaxIterations() < glslang::TIntermLoop::iterationsInfinite) { + control = control | spv::LoopControlMaxIterationsMask; + operands.push_back(loopNode.getMaxIterations()); + } + if (loopNode.getIterationMultiple() > 1) { + control = control | spv::LoopControlIterationMultipleMask; + operands.push_back(loopNode.getIterationMultiple()); + } + if (loopNode.getPeelCount() > 0) { + control = control | spv::LoopControlPeelCountMask; + operands.push_back(loopNode.getPeelCount()); + } + if (loopNode.getPartialCount() > 0) { + control = control | spv::LoopControlPartialCountMask; + operands.push_back(loopNode.getPartialCount()); + } + } + + return control; +} + +// Translate glslang type to SPIR-V storage class. +spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::TType& type) +{ + if (type.getBasicType() == glslang::EbtRayQuery) + return spv::StorageClassFunction; + if (type.getQualifier().isPipeInput()) + return spv::StorageClassInput; + if (type.getQualifier().isPipeOutput()) + return spv::StorageClassOutput; + + if (glslangIntermediate->getSource() != glslang::EShSourceHlsl || + type.getQualifier().storage == glslang::EvqUniform) { + if (type.isAtomic()) + return spv::StorageClassAtomicCounter; + if (type.containsOpaque()) + return spv::StorageClassUniformConstant; + } + + if (type.getQualifier().isUniformOrBuffer() && + type.getQualifier().isShaderRecord()) { + return spv::StorageClassShaderRecordBufferKHR; + } + + if (glslangIntermediate->usingStorageBuffer() && type.getQualifier().storage == glslang::EvqBuffer) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_storage_buffer_storage_class, spv::Spv_1_3); + return spv::StorageClassStorageBuffer; + } + + if (type.getQualifier().isUniformOrBuffer()) { + if (type.getQualifier().isPushConstant()) + return spv::StorageClassPushConstant; + if (type.getBasicType() == glslang::EbtBlock) + return spv::StorageClassUniform; + return spv::StorageClassUniformConstant; + } + + switch (type.getQualifier().storage) { + case glslang::EvqGlobal: return spv::StorageClassPrivate; + case glslang::EvqConstReadOnly: return spv::StorageClassFunction; + case glslang::EvqTemporary: return spv::StorageClassFunction; + case glslang::EvqShared: return spv::StorageClassWorkgroup; +#ifndef GLSLANG_WEB + case glslang::EvqPayload: return spv::StorageClassRayPayloadKHR; + case glslang::EvqPayloadIn: return spv::StorageClassIncomingRayPayloadKHR; + case glslang::EvqHitAttr: return spv::StorageClassHitAttributeKHR; + case glslang::EvqCallableData: return spv::StorageClassCallableDataKHR; + case glslang::EvqCallableDataIn: return spv::StorageClassIncomingCallableDataKHR; +#endif + default: + assert(0); + break; + } + + return spv::StorageClassFunction; +} + +// Add capabilities pertaining to how an array is indexed. +void TGlslangToSpvTraverser::addIndirectionIndexCapabilities(const glslang::TType& baseType, + const glslang::TType& indexType) +{ +#ifndef GLSLANG_WEB + if (indexType.getQualifier().isNonUniform()) { + // deal with an asserted non-uniform index + // SPV_EXT_descriptor_indexing already added in TranslateNonUniformDecoration + if (baseType.getBasicType() == glslang::EbtSampler) { + if (baseType.getQualifier().hasAttachment()) + builder.addCapability(spv::CapabilityInputAttachmentArrayNonUniformIndexingEXT); + else if (baseType.isImage() && baseType.getSampler().isBuffer()) + builder.addCapability(spv::CapabilityStorageTexelBufferArrayNonUniformIndexingEXT); + else if (baseType.isTexture() && baseType.getSampler().isBuffer()) + builder.addCapability(spv::CapabilityUniformTexelBufferArrayNonUniformIndexingEXT); + else if (baseType.isImage()) + builder.addCapability(spv::CapabilityStorageImageArrayNonUniformIndexingEXT); + else if (baseType.isTexture()) + builder.addCapability(spv::CapabilitySampledImageArrayNonUniformIndexingEXT); + } else if (baseType.getBasicType() == glslang::EbtBlock) { + if (baseType.getQualifier().storage == glslang::EvqBuffer) + builder.addCapability(spv::CapabilityStorageBufferArrayNonUniformIndexingEXT); + else if (baseType.getQualifier().storage == glslang::EvqUniform) + builder.addCapability(spv::CapabilityUniformBufferArrayNonUniformIndexingEXT); + } + } else { + // assume a dynamically uniform index + if (baseType.getBasicType() == glslang::EbtSampler) { + if (baseType.getQualifier().hasAttachment()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityInputAttachmentArrayDynamicIndexingEXT); + } else if (baseType.isImage() && baseType.getSampler().isBuffer()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityStorageTexelBufferArrayDynamicIndexingEXT); + } else if (baseType.isTexture() && baseType.getSampler().isBuffer()) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityUniformTexelBufferArrayDynamicIndexingEXT); + } + } + } +#endif +} + +// Return whether or not the given type is something that should be tied to a +// descriptor set. +bool IsDescriptorResource(const glslang::TType& type) +{ + // uniform and buffer blocks are included, unless it is a push_constant + if (type.getBasicType() == glslang::EbtBlock) + return type.getQualifier().isUniformOrBuffer() && + ! type.getQualifier().isShaderRecord() && + ! type.getQualifier().isPushConstant(); + + // non block... + // basically samplerXXX/subpass/sampler/texture are all included + // if they are the global-scope-class, not the function parameter + // (or local, if they ever exist) class. + if (type.getBasicType() == glslang::EbtSampler || + type.getBasicType() == glslang::EbtAccStruct) + return type.getQualifier().isUniformOrBuffer(); + + // None of the above. + return false; +} + +void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& parent) +{ + if (child.layoutMatrix == glslang::ElmNone) + child.layoutMatrix = parent.layoutMatrix; + + if (parent.invariant) + child.invariant = true; + if (parent.flat) + child.flat = true; + if (parent.centroid) + child.centroid = true; +#ifndef GLSLANG_WEB + if (parent.nopersp) + child.nopersp = true; + if (parent.explicitInterp) + child.explicitInterp = true; + if (parent.perPrimitiveNV) + child.perPrimitiveNV = true; + if (parent.perViewNV) + child.perViewNV = true; + if (parent.perTaskNV) + child.perTaskNV = true; + if (parent.patch) + child.patch = true; + if (parent.sample) + child.sample = true; + if (parent.coherent) + child.coherent = true; + if (parent.devicecoherent) + child.devicecoherent = true; + if (parent.queuefamilycoherent) + child.queuefamilycoherent = true; + if (parent.workgroupcoherent) + child.workgroupcoherent = true; + if (parent.subgroupcoherent) + child.subgroupcoherent = true; + if (parent.shadercallcoherent) + child.shadercallcoherent = true; + if (parent.nonprivate) + child.nonprivate = true; + if (parent.volatil) + child.volatil = true; + if (parent.restrict) + child.restrict = true; + if (parent.readonly) + child.readonly = true; + if (parent.writeonly) + child.writeonly = true; +#endif +} + +bool HasNonLayoutQualifiers(const glslang::TType& type, const glslang::TQualifier& qualifier) +{ + // This should list qualifiers that simultaneous satisfy: + // - struct members might inherit from a struct declaration + // (note that non-block structs don't explicitly inherit, + // only implicitly, meaning no decoration involved) + // - affect decorations on the struct members + // (note smooth does not, and expecting something like volatile + // to effect the whole object) + // - are not part of the offset/st430/etc or row/column-major layout + return qualifier.invariant || (qualifier.hasLocation() && type.getBasicType() == glslang::EbtBlock); +} + +// +// Implement the TGlslangToSpvTraverser class. +// + +TGlslangToSpvTraverser::TGlslangToSpvTraverser(unsigned int spvVersion, + const glslang::TIntermediate* glslangIntermediate, + spv::SpvBuildLogger* buildLogger, glslang::SpvOptions& options) : + TIntermTraverser(true, false, true), + options(options), + shaderEntry(nullptr), currentFunction(nullptr), + sequenceDepth(0), logger(buildLogger), + builder(spvVersion, (glslang::GetKhronosToolId() << 16) | glslang::GetSpirvGeneratorVersion(), logger), + inEntryPoint(false), entryPointTerminated(false), linkageOnly(false), + glslangIntermediate(glslangIntermediate), + nanMinMaxClamp(glslangIntermediate->getNanMinMaxClamp()), + nonSemanticDebugPrintf(0) +{ + spv::ExecutionModel executionModel = TranslateExecutionModel(glslangIntermediate->getStage()); + + builder.clearAccessChain(); + builder.setSource(TranslateSourceLanguage(glslangIntermediate->getSource(), glslangIntermediate->getProfile()), + glslangIntermediate->getVersion()); + + if (options.generateDebugInfo) { + builder.setEmitOpLines(); + builder.setSourceFile(glslangIntermediate->getSourceFile()); + + // Set the source shader's text. If for SPV version 1.0, include + // a preamble in comments stating the OpModuleProcessed instructions. + // Otherwise, emit those as actual instructions. + std::string text; + const std::vector& processes = glslangIntermediate->getProcesses(); + for (int p = 0; p < (int)processes.size(); ++p) { + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1) { + text.append("// OpModuleProcessed "); + text.append(processes[p]); + text.append("\n"); + } else + builder.addModuleProcessed(processes[p]); + } + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_1 && (int)processes.size() > 0) + text.append("#line 1\n"); + text.append(glslangIntermediate->getSourceText()); + builder.setSourceText(text); + // Pass name and text for all included files + const std::map& include_txt = glslangIntermediate->getIncludeText(); + for (auto iItr = include_txt.begin(); iItr != include_txt.end(); ++iItr) + builder.addInclude(iItr->first, iItr->second); + } + stdBuiltins = builder.import("GLSL.std.450"); + + spv::AddressingModel addressingModel = spv::AddressingModelLogical; + spv::MemoryModel memoryModel = spv::MemoryModelGLSL450; + + if (glslangIntermediate->usingPhysicalStorageBuffer()) { + addressingModel = spv::AddressingModelPhysicalStorageBuffer64EXT; + builder.addIncorporatedExtension(spv::E_SPV_EXT_physical_storage_buffer, spv::Spv_1_5); + builder.addCapability(spv::CapabilityPhysicalStorageBufferAddressesEXT); + } + if (glslangIntermediate->usingVulkanMemoryModel()) { + memoryModel = spv::MemoryModelVulkanKHR; + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + builder.addIncorporatedExtension(spv::E_SPV_KHR_vulkan_memory_model, spv::Spv_1_5); + } + builder.setMemoryModel(addressingModel, memoryModel); + + if (glslangIntermediate->usingVariablePointers()) { + builder.addCapability(spv::CapabilityVariablePointers); + } + + shaderEntry = builder.makeEntryPoint(glslangIntermediate->getEntryPointName().c_str()); + entryPoint = builder.addEntryPoint(executionModel, shaderEntry, glslangIntermediate->getEntryPointName().c_str()); + + // Add the source extensions + const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions(); + for (auto it = sourceExtensions.begin(); it != sourceExtensions.end(); ++it) + builder.addSourceExtension(it->c_str()); + + // Add the top-level modes for this shader. + + if (glslangIntermediate->getXfbMode()) { + builder.addCapability(spv::CapabilityTransformFeedback); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeXfb); + } + + if (glslangIntermediate->getLayoutPrimitiveCulling()) { + builder.addCapability(spv::CapabilityRayTraversalPrimitiveCullingProvisionalKHR); + } + + unsigned int mode; + switch (glslangIntermediate->getStage()) { + case EShLangVertex: + builder.addCapability(spv::CapabilityShader); + break; + + case EShLangFragment: + builder.addCapability(spv::CapabilityShader); + if (glslangIntermediate->getPixelCenterInteger()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModePixelCenterInteger); + + if (glslangIntermediate->getOriginUpperLeft()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginUpperLeft); + else + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOriginLowerLeft); + + if (glslangIntermediate->getEarlyFragmentTests()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeEarlyFragmentTests); + + if (glslangIntermediate->getPostDepthCoverage()) { + builder.addCapability(spv::CapabilitySampleMaskPostDepthCoverage); + builder.addExecutionMode(shaderEntry, spv::ExecutionModePostDepthCoverage); + builder.addExtension(spv::E_SPV_KHR_post_depth_coverage); + } + + if (glslangIntermediate->getDepth() != glslang::EldUnchanged && glslangIntermediate->isDepthReplacing()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDepthReplacing); + +#ifndef GLSLANG_WEB + + switch(glslangIntermediate->getDepth()) { + case glslang::EldGreater: mode = spv::ExecutionModeDepthGreater; break; + case glslang::EldLess: mode = spv::ExecutionModeDepthLess; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + switch (glslangIntermediate->getInterlockOrdering()) { + case glslang::EioPixelInterlockOrdered: mode = spv::ExecutionModePixelInterlockOrderedEXT; + break; + case glslang::EioPixelInterlockUnordered: mode = spv::ExecutionModePixelInterlockUnorderedEXT; + break; + case glslang::EioSampleInterlockOrdered: mode = spv::ExecutionModeSampleInterlockOrderedEXT; + break; + case glslang::EioSampleInterlockUnordered: mode = spv::ExecutionModeSampleInterlockUnorderedEXT; + break; + case glslang::EioShadingRateInterlockOrdered: mode = spv::ExecutionModeShadingRateInterlockOrderedEXT; + break; + case glslang::EioShadingRateInterlockUnordered: mode = spv::ExecutionModeShadingRateInterlockUnorderedEXT; + break; + default: mode = spv::ExecutionModeMax; + break; + } + if (mode != spv::ExecutionModeMax) { + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + if (mode == spv::ExecutionModeShadingRateInterlockOrderedEXT || + mode == spv::ExecutionModeShadingRateInterlockUnorderedEXT) { + builder.addCapability(spv::CapabilityFragmentShaderShadingRateInterlockEXT); + } else if (mode == spv::ExecutionModePixelInterlockOrderedEXT || + mode == spv::ExecutionModePixelInterlockUnorderedEXT) { + builder.addCapability(spv::CapabilityFragmentShaderPixelInterlockEXT); + } else { + builder.addCapability(spv::CapabilityFragmentShaderSampleInterlockEXT); + } + builder.addExtension(spv::E_SPV_EXT_fragment_shader_interlock); + } +#endif + break; + + case EShLangCompute: + builder.addCapability(spv::CapabilityShader); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0), + glslangIntermediate->getLocalSize(1), + glslangIntermediate->getLocalSize(2)); + if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupQuads) { + builder.addCapability(spv::CapabilityComputeDerivativeGroupQuadsNV); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupQuadsNV); + builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives); + } else if (glslangIntermediate->getLayoutDerivativeModeNone() == glslang::LayoutDerivativeGroupLinear) { + builder.addCapability(spv::CapabilityComputeDerivativeGroupLinearNV); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeDerivativeGroupLinearNV); + builder.addExtension(spv::E_SPV_NV_compute_shader_derivatives); + } + break; +#ifndef GLSLANG_WEB + case EShLangTessEvaluation: + case EShLangTessControl: + builder.addCapability(spv::CapabilityTessellation); + + glslang::TLayoutGeometry primitive; + + if (glslangIntermediate->getStage() == EShLangTessControl) { + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, + glslangIntermediate->getVertices()); + primitive = glslangIntermediate->getOutputPrimitive(); + } else { + primitive = glslangIntermediate->getInputPrimitive(); + } + + switch (primitive) { + case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break; + case glslang::ElgQuads: mode = spv::ExecutionModeQuads; break; + case glslang::ElgIsolines: mode = spv::ExecutionModeIsolines; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + switch (glslangIntermediate->getVertexSpacing()) { + case glslang::EvsEqual: mode = spv::ExecutionModeSpacingEqual; break; + case glslang::EvsFractionalEven: mode = spv::ExecutionModeSpacingFractionalEven; break; + case glslang::EvsFractionalOdd: mode = spv::ExecutionModeSpacingFractionalOdd; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + switch (glslangIntermediate->getVertexOrder()) { + case glslang::EvoCw: mode = spv::ExecutionModeVertexOrderCw; break; + case glslang::EvoCcw: mode = spv::ExecutionModeVertexOrderCcw; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + if (glslangIntermediate->getPointMode()) + builder.addExecutionMode(shaderEntry, spv::ExecutionModePointMode); + break; + + case EShLangGeometry: + builder.addCapability(spv::CapabilityGeometry); + switch (glslangIntermediate->getInputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeInputPoints; break; + case glslang::ElgLines: mode = spv::ExecutionModeInputLines; break; + case glslang::ElgLinesAdjacency: mode = spv::ExecutionModeInputLinesAdjacency; break; + case glslang::ElgTriangles: mode = spv::ExecutionModeTriangles; break; + case glslang::ElgTrianglesAdjacency: mode = spv::ExecutionModeInputTrianglesAdjacency; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + + builder.addExecutionMode(shaderEntry, spv::ExecutionModeInvocations, glslangIntermediate->getInvocations()); + + switch (glslangIntermediate->getOutputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break; + case glslang::ElgLineStrip: mode = spv::ExecutionModeOutputLineStrip; break; + case glslang::ElgTriangleStrip: mode = spv::ExecutionModeOutputTriangleStrip; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, glslangIntermediate->getVertices()); + break; + + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + { + auto& extensions = glslangIntermediate->getRequestedExtensions(); + if (extensions.find("GL_NV_ray_tracing") == extensions.end()) { + builder.addCapability(spv::CapabilityRayTracingProvisionalKHR); + builder.addExtension("SPV_KHR_ray_tracing"); + } + else { + builder.addCapability(spv::CapabilityRayTracingNV); + builder.addExtension("SPV_NV_ray_tracing"); + } + break; + } + case EShLangTaskNV: + case EShLangMeshNV: + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeLocalSize, glslangIntermediate->getLocalSize(0), + glslangIntermediate->getLocalSize(1), + glslangIntermediate->getLocalSize(2)); + if (glslangIntermediate->getStage() == EShLangMeshNV) { + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputVertices, + glslangIntermediate->getVertices()); + builder.addExecutionMode(shaderEntry, spv::ExecutionModeOutputPrimitivesNV, + glslangIntermediate->getPrimitives()); + + switch (glslangIntermediate->getOutputPrimitive()) { + case glslang::ElgPoints: mode = spv::ExecutionModeOutputPoints; break; + case glslang::ElgLines: mode = spv::ExecutionModeOutputLinesNV; break; + case glslang::ElgTriangles: mode = spv::ExecutionModeOutputTrianglesNV; break; + default: mode = spv::ExecutionModeMax; break; + } + if (mode != spv::ExecutionModeMax) + builder.addExecutionMode(shaderEntry, (spv::ExecutionMode)mode); + } + break; +#endif + + default: + break; + } +} + +// Finish creating SPV, after the traversal is complete. +void TGlslangToSpvTraverser::finishSpv() +{ + // Finish the entry point function + if (! entryPointTerminated) { + builder.setBuildPoint(shaderEntry->getLastBlock()); + builder.leaveFunction(); + } + + // finish off the entry-point SPV instruction by adding the Input/Output + for (auto it = iOSet.cbegin(); it != iOSet.cend(); ++it) + entryPoint->addIdOperand(*it); + + // Add capabilities, extensions, remove unneeded decorations, etc., + // based on the resulting SPIR-V. + // Note: WebGPU code generation must have the opportunity to aggressively + // prune unreachable merge blocks and continue targets. + builder.postProcess(); +} + +// Write the SPV into 'out'. +void TGlslangToSpvTraverser::dumpSpv(std::vector& out) +{ + builder.dump(out); +} + +// +// Implement the traversal functions. +// +// Return true from interior nodes to have the external traversal +// continue on to children. Return false if children were +// already processed. +// + +// +// Symbols can turn into +// - uniform/input reads +// - output writes +// - complex lvalue base setups: foo.bar[3].... , where we see foo and start up an access chain +// - something simple that degenerates into the last bullet +// +void TGlslangToSpvTraverser::visitSymbol(glslang::TIntermSymbol* symbol) +{ + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (symbol->getType().isStruct()) + glslangTypeToIdMap[symbol->getType().getStruct()] = symbol->getId(); + + if (symbol->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + // getSymbolId() will set up all the IO decorations on the first call. + // Formal function parameters were mapped during makeFunctions(). + spv::Id id = getSymbolId(symbol); + + if (builder.isPointer(id)) { + if (!symbol->getType().getQualifier().isParamInput() && + !symbol->getType().getQualifier().isParamOutput()) { + // Include all "static use" and "linkage only" interface variables on the OpEntryPoint instruction + // Consider adding to the OpEntryPoint interface list. + // Only looking at structures if they have at least one member. + if (!symbol->getType().isStruct() || symbol->getType().getStruct()->size() > 0) { + spv::StorageClass sc = builder.getStorageClass(id); + // Before SPIR-V 1.4, we only want to include Input and Output. + // Starting with SPIR-V 1.4, we want all globals. + if ((glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4 && builder.isGlobalStorage(id)) || + (sc == spv::StorageClassInput || sc == spv::StorageClassOutput)) { + iOSet.insert(id); + } + } + } + + // If the SPIR-V type is required to be different than the AST type + // (for ex SubgroupMasks or 3x4 ObjectToWorld/WorldToObject matrices), + // translate now from the SPIR-V type to the AST type, for the consuming + // operation. + // Note this turns it from an l-value to an r-value. + // Currently, all symbols needing this are inputs; avoid the map lookup when non-input. + if (symbol->getType().getQualifier().storage == glslang::EvqVaryingIn) + id = translateForcedType(id); + } + + // Only process non-linkage-only nodes for generating actual static uses + if (! linkageOnly || symbol->getQualifier().isSpecConstant()) { + // Prepare to generate code for the access + + // L-value chains will be computed left to right. We're on the symbol now, + // which is the left-most part of the access chain, so now is "clear" time, + // followed by setting the base. + builder.clearAccessChain(); + + // For now, we consider all user variables as being in memory, so they are pointers, + // except for + // A) R-Value arguments to a function, which are an intermediate object. + // See comments in handleUserFunctionCall(). + // B) Specialization constants (normal constants don't even come in as a variable), + // These are also pure R-values. + // C) R-Values from type translation, see above call to translateForcedType() + glslang::TQualifier qualifier = symbol->getQualifier(); + if (qualifier.isSpecConstant() || rValueParameters.find(symbol->getId()) != rValueParameters.end() || + !builder.isPointerType(builder.getTypeId(id))) + builder.setAccessChainRValue(id); + else + builder.setAccessChainLValue(id); + } + +#ifdef ENABLE_HLSL + // Process linkage-only nodes for any special additional interface work. + if (linkageOnly) { + if (glslangIntermediate->getHlslFunctionality1()) { + // Map implicit counter buffers to their originating buffers, which should have been + // seen by now, given earlier pruning of unused counters, and preservation of order + // of declaration. + if (symbol->getType().getQualifier().isUniformOrBuffer()) { + if (!glslangIntermediate->hasCounterBufferName(symbol->getName())) { + // Save possible originating buffers for counter buffers, keyed by + // making the potential counter-buffer name. + std::string keyName = symbol->getName().c_str(); + keyName = glslangIntermediate->addCounterBufferName(keyName); + counterOriginator[keyName] = symbol; + } else { + // Handle a counter buffer, by finding the saved originating buffer. + std::string keyName = symbol->getName().c_str(); + auto it = counterOriginator.find(keyName); + if (it != counterOriginator.end()) { + id = getSymbolId(it->second); + if (id != spv::NoResult) { + spv::Id counterId = getSymbolId(symbol); + if (counterId != spv::NoResult) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addDecorationId(id, spv::DecorationHlslCounterBufferGOOGLE, counterId); + } + } + } + } + } + } + } +#endif +} + +bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::TIntermBinary* node) +{ + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + if (node->getLeft()->getAsSymbolNode() != nullptr && node->getLeft()->getType().isStruct()) { + glslangTypeToIdMap[node->getLeft()->getType().getStruct()] = node->getLeft()->getAsSymbolNode()->getId(); + } + if (node->getRight()->getAsSymbolNode() != nullptr && node->getRight()->getType().isStruct()) { + glslangTypeToIdMap[node->getRight()->getType().getStruct()] = node->getRight()->getAsSymbolNode()->getId(); + } + + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + // First, handle special cases + switch (node->getOp()) { + case glslang::EOpAssign: + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + case glslang::EOpAndAssign: + case glslang::EOpInclusiveOrAssign: + case glslang::EOpExclusiveOrAssign: + case glslang::EOpLeftShiftAssign: + case glslang::EOpRightShiftAssign: + // A bin-op assign "a += b" means the same thing as "a = a + b" + // where a is evaluated before b. For a simple assignment, GLSL + // says to evaluate the left before the right. So, always, left + // node then right node. + { + // get the left l-value, save it away + builder.clearAccessChain(); + node->getLeft()->traverse(this); + spv::Builder::AccessChain lValue = builder.getAccessChain(); + + // evaluate the right + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id rValue = accessChainLoad(node->getRight()->getType()); + + if (node->getOp() != glslang::EOpAssign) { + // the left is also an r-value + builder.setAccessChain(lValue); + spv::Id leftRValue = accessChainLoad(node->getLeft()->getType()); + + // do the operation + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + rValue = createBinaryOperation(node->getOp(), decorations, + convertGlslangToSpvType(node->getType()), leftRValue, rValue, + node->getType().getBasicType()); + + // these all need their counterparts in createBinaryOperation() + assert(rValue != spv::NoResult); + } + + // store the result + builder.setAccessChain(lValue); + multiTypeStore(node->getLeft()->getType(), rValue); + + // assignments are expressions having an rValue after they are evaluated... + builder.clearAccessChain(); + builder.setAccessChainRValue(rValue); + } + return false; + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + { + // Structure, array, matrix, or vector indirection with statically known index. + // Get the left part of the access chain. + node->getLeft()->traverse(this); + + // Add the next element in the chain + + const int glslangIndex = node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (! node->getLeft()->getType().isArray() && + node->getLeft()->getType().isVector() && + node->getOp() == glslang::EOpIndexDirect) { + // This is essentially a hard-coded vector swizzle of size 1, + // so short circuit the access-chain stuff with a swizzle. + std::vector swizzle; + swizzle.push_back(glslangIndex); + int dummySize; + builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar( + node->getLeft()->getType(), dummySize)); + } else { + + // Load through a block reference is performed with a dot operator that + // is mapped to EOpIndexDirectStruct. When we get to the actual reference, + // do a load and reset the access chain. + if (node->getLeft()->isReference() && + !node->getLeft()->getType().isArray() && + node->getOp() == glslang::EOpIndexDirectStruct) + { + spv::Id left = accessChainLoad(node->getLeft()->getType()); + builder.clearAccessChain(); + builder.setAccessChainLValue(left); + } + + int spvIndex = glslangIndex; + if (node->getLeft()->getBasicType() == glslang::EbtBlock && + node->getOp() == glslang::EOpIndexDirectStruct) + { + // This may be, e.g., an anonymous block-member selection, which generally need + // index remapping due to hidden members in anonymous blocks. + int glslangId = glslangTypeToIdMap[node->getLeft()->getType().getStruct()]; + if (memberRemapper.find(glslangId) != memberRemapper.end()) { + std::vector& remapper = memberRemapper[glslangId]; + assert(remapper.size() > 0); + spvIndex = remapper[glslangIndex]; + } + } + + // normal case for indexing array or structure or block + builder.accessChainPush(builder.makeIntConstant(spvIndex), + TranslateCoherent(node->getLeft()->getType()), + node->getLeft()->getType().getBufferReferenceAlignment()); + + // Add capabilities here for accessing PointSize and clip/cull distance. + // We have deferred generation of associated capabilities until now. + if (node->getLeft()->getType().isStruct() && ! node->getLeft()->getType().isArray()) + declareUseOfStructMember(*(node->getLeft()->getType().getStruct()), glslangIndex); + } + } + return false; + case glslang::EOpIndexIndirect: + { + // Array, matrix, or vector indirection with variable index. + // Will use native SPIR-V access-chain for and array indirection; + // matrices are arrays of vectors, so will also work for a matrix. + // Will use the access chain's 'component' for variable index into a vector. + + // This adapter is building access chains left to right. + // Set up the access chain to the left. + node->getLeft()->traverse(this); + + // save it so that computing the right side doesn't trash it + spv::Builder::AccessChain partial = builder.getAccessChain(); + + // compute the next index in the chain + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id index = accessChainLoad(node->getRight()->getType()); + + addIndirectionIndexCapabilities(node->getLeft()->getType(), node->getRight()->getType()); + + // restore the saved access chain + builder.setAccessChain(partial); + + if (! node->getLeft()->getType().isArray() && node->getLeft()->getType().isVector()) { + int dummySize; + builder.accessChainPushComponent(index, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), + dummySize)); + } else + builder.accessChainPush(index, TranslateCoherent(node->getLeft()->getType()), + node->getLeft()->getType().getBufferReferenceAlignment()); + } + return false; + case glslang::EOpVectorSwizzle: + { + node->getLeft()->traverse(this); + std::vector swizzle; + convertSwizzle(*node->getRight()->getAsAggregate(), swizzle); + int dummySize; + builder.accessChainPushSwizzle(swizzle, convertGlslangToSpvType(node->getLeft()->getType()), + TranslateCoherent(node->getLeft()->getType()), + glslangIntermediate->getBaseAlignmentScalar(node->getLeft()->getType(), + dummySize)); + } + return false; + case glslang::EOpMatrixSwizzle: + logger->missingFunctionality("matrix swizzle"); + return true; + case glslang::EOpLogicalOr: + case glslang::EOpLogicalAnd: + { + + // These may require short circuiting, but can sometimes be done as straight + // binary operations. The right operand must be short circuited if it has + // side effects, and should probably be if it is complex. + if (isTrivial(node->getRight()->getAsTyped())) + break; // handle below as a normal binary operation + // otherwise, we need to do dynamic short circuiting on the right operand + spv::Id result = createShortCircuit(node->getOp(), *node->getLeft()->getAsTyped(), + *node->getRight()->getAsTyped()); + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } + return false; + default: + break; + } + + // Assume generic binary op... + + // get right operand + builder.clearAccessChain(); + node->getLeft()->traverse(this); + spv::Id left = accessChainLoad(node->getLeft()->getType()); + + // get left operand + builder.clearAccessChain(); + node->getRight()->traverse(this); + spv::Id right = accessChainLoad(node->getRight()->getType()); + + // get result + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + spv::Id result = createBinaryOperation(node->getOp(), decorations, + convertGlslangToSpvType(node->getType()), left, right, + node->getLeft()->getType().getBasicType()); + + builder.clearAccessChain(); + if (! result) { + logger->missingFunctionality("unknown glslang binary operation"); + return true; // pick up a child as the place-holder result + } else { + builder.setAccessChainRValue(result); + return false; + } +} + +// Figure out what, if any, type changes are needed when accessing a specific built-in. +// Returns . +// Also see comment for 'forceType', regarding tracking SPIR-V-required types. +std::pair TGlslangToSpvTraverser::getForcedType(glslang::TBuiltInVariable glslangBuiltIn, + const glslang::TType& glslangType) +{ + switch(glslangBuiltIn) + { + case glslang::EbvSubGroupEqMask: + case glslang::EbvSubGroupGeMask: + case glslang::EbvSubGroupGtMask: + case glslang::EbvSubGroupLeMask: + case glslang::EbvSubGroupLtMask: { + // these require changing a 64-bit scaler -> a vector of 32-bit components + if (glslangType.isVector()) + break; + std::pair ret(builder.makeVectorType(builder.makeUintType(32), 4), + builder.makeUintType(64)); + return ret; + } + // There are no SPIR-V builtins defined for these and map onto original non-transposed + // builtins. During visitBinary we insert a transpose + case glslang::EbvWorldToObject3x4: + case glslang::EbvObjectToWorld3x4: { + spv::Id mat43 = builder.makeMatrixType(builder.makeFloatType(32), 4, 3); + spv::Id mat34 = builder.makeMatrixType(builder.makeFloatType(32), 3, 4); + std::pair ret(mat43, mat34); + return ret; + } + default: + break; + } + + std::pair ret(spv::NoType, spv::NoType); + return ret; +} + +// For an object previously identified (see getForcedType() and forceType) +// as needing type translations, do the translation needed for a load, turning +// an L-value into in R-value. +spv::Id TGlslangToSpvTraverser::translateForcedType(spv::Id object) +{ + const auto forceIt = forceType.find(object); + if (forceIt == forceType.end()) + return object; + + spv::Id desiredTypeId = forceIt->second; + spv::Id objectTypeId = builder.getTypeId(object); + assert(builder.isPointerType(objectTypeId)); + objectTypeId = builder.getContainedTypeId(objectTypeId); + if (builder.isVectorType(objectTypeId) && + builder.getScalarTypeWidth(builder.getContainedTypeId(objectTypeId)) == 32) { + if (builder.getScalarTypeWidth(desiredTypeId) == 64) { + // handle 32-bit v.xy* -> 64-bit + builder.clearAccessChain(); + builder.setAccessChainLValue(object); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + std::vector components; + components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 0)); + components.push_back(builder.createCompositeExtract(object, builder.getContainedTypeId(objectTypeId), 1)); + + spv::Id vecType = builder.makeVectorType(builder.getContainedTypeId(objectTypeId), 2); + return builder.createUnaryOp(spv::OpBitcast, desiredTypeId, + builder.createCompositeConstruct(vecType, components)); + } else { + logger->missingFunctionality("forcing 32-bit vector type to non 64-bit scalar"); + } + } else if (builder.isMatrixType(objectTypeId)) { + // There are no SPIR-V builtins defined for 3x4 variants of ObjectToWorld/WorldToObject + // and we insert a transpose after loading the original non-transposed builtins + builder.clearAccessChain(); + builder.setAccessChainLValue(object); + object = builder.accessChainLoad(spv::NoPrecision, spv::DecorationMax, objectTypeId); + return builder.createUnaryOp(spv::OpTranspose, desiredTypeId, object); + + } else { + logger->missingFunctionality("forcing non 32-bit vector type"); + } + + return object; +} + +bool TGlslangToSpvTraverser::visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) +{ + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id result = spv::NoResult; + + // try texturing first + result = createImageTextureFunctionCall(node); + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; // done with this node + } + + // Non-texturing. + + if (node->getOp() == glslang::EOpArrayLength) { + // Quite special; won't want to evaluate the operand. + + // Currently, the front-end does not allow .length() on an array until it is sized, + // except for the last block membeor of an SSBO. + // TODO: If this changes, link-time sized arrays might show up here, and need their + // size extracted. + + // Normal .length() would have been constant folded by the front-end. + // So, this has to be block.lastMember.length(). + // SPV wants "block" and member number as the operands, go get them. + + spv::Id length; + if (node->getOperand()->getType().isCoopMat()) { + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id typeId = convertGlslangToSpvType(node->getOperand()->getType()); + assert(builder.isCooperativeMatrixType(typeId)); + + length = builder.createCooperativeMatrixLength(typeId); + } else { + glslang::TIntermTyped* block = node->getOperand()->getAsBinaryNode()->getLeft(); + block->traverse(this); + unsigned int member = node->getOperand()->getAsBinaryNode()->getRight()->getAsConstantUnion() + ->getConstArray()[0].getUConst(); + length = builder.createArrayLength(builder.accessChainGetLValue(), member); + } + + // GLSL semantics say the result of .length() is an int, while SPIR-V says + // signedness must be 0. So, convert from SPIR-V unsigned back to GLSL's + // AST expectation of a signed result. + if (glslangIntermediate->getSource() == glslang::EShSourceGlsl) { + if (builder.isInSpecConstCodeGenMode()) { + length = builder.createBinOp(spv::OpIAdd, builder.makeIntType(32), length, builder.makeIntConstant(0)); + } else { + length = builder.createUnaryOp(spv::OpBitcast, builder.makeIntType(32), length); + } + } + + builder.clearAccessChain(); + builder.setAccessChainRValue(length); + + return false; + } + + // Start by evaluating the operand + + // Does it need a swizzle inversion? If so, evaluation is inverted; + // operate first on the swizzle base, then apply the swizzle. + spv::Id invertedType = spv::NoType; + auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? + invertedType : convertGlslangToSpvType(node->getType()); }; + if (node->getOp() == glslang::EOpInterpolateAtCentroid) + invertedType = getInvertedSwizzleType(*node->getOperand()); + + builder.clearAccessChain(); + TIntermNode *operandNode; + if (invertedType != spv::NoType) + operandNode = node->getOperand()->getAsBinaryNode()->getLeft(); + else + operandNode = node->getOperand(); + + operandNode->traverse(this); + + spv::Id operand = spv::NoResult; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpAtomicCounterIncrement || + node->getOp() == glslang::EOpAtomicCounterDecrement || + node->getOp() == glslang::EOpAtomicCounter || + node->getOp() == glslang::EOpInterpolateAtCentroid || + node->getOp() == glslang::EOpRayQueryProceed || + node->getOp() == glslang::EOpRayQueryGetRayTMin || + node->getOp() == glslang::EOpRayQueryGetRayFlags || + node->getOp() == glslang::EOpRayQueryGetWorldRayOrigin || + node->getOp() == glslang::EOpRayQueryGetWorldRayDirection || + node->getOp() == glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque || + node->getOp() == glslang::EOpRayQueryTerminate || + node->getOp() == glslang::EOpRayQueryConfirmIntersection) { + operand = builder.accessChainGetLValue(); // Special case l-value operands + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(operandNode->getAsTyped()->getType()); + } else +#endif + { + operand = accessChainLoad(node->getOperand()->getType()); + } + + OpDecorations decorations = { TranslatePrecisionDecoration(node->getOperationPrecision()), + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + + // it could be a conversion + if (! result) + result = createConversion(node->getOp(), decorations, resultType(), operand, + node->getOperand()->getBasicType()); + + // if not, then possibly an operation + if (! result) + result = createUnaryOperation(node->getOp(), decorations, resultType(), operand, + node->getOperand()->getBasicType(), lvalueCoherentFlags); + + if (result) { + if (invertedType) { + result = createInvertedSwizzle(decorations.precision, *node->getOperand(), result); + decorations.addNonUniform(builder, result); + } + + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; // done with this node + } + + // it must be a special case, check... + switch (node->getOp()) { + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + { + // we need the integer value "1" or the floating point "1.0" to add/subtract + spv::Id one = 0; + if (node->getBasicType() == glslang::EbtFloat) + one = builder.makeFloatConstant(1.0F); +#ifndef GLSLANG_WEB + else if (node->getBasicType() == glslang::EbtDouble) + one = builder.makeDoubleConstant(1.0); + else if (node->getBasicType() == glslang::EbtFloat16) + one = builder.makeFloat16Constant(1.0F); + else if (node->getBasicType() == glslang::EbtInt8 || node->getBasicType() == glslang::EbtUint8) + one = builder.makeInt8Constant(1); + else if (node->getBasicType() == glslang::EbtInt16 || node->getBasicType() == glslang::EbtUint16) + one = builder.makeInt16Constant(1); + else if (node->getBasicType() == glslang::EbtInt64 || node->getBasicType() == glslang::EbtUint64) + one = builder.makeInt64Constant(1); +#endif + else + one = builder.makeIntConstant(1); + glslang::TOperator op; + if (node->getOp() == glslang::EOpPreIncrement || + node->getOp() == glslang::EOpPostIncrement) + op = glslang::EOpAdd; + else + op = glslang::EOpSub; + + spv::Id result = createBinaryOperation(op, decorations, + convertGlslangToSpvType(node->getType()), operand, one, + node->getType().getBasicType()); + assert(result != spv::NoResult); + + // The result of operation is always stored, but conditionally the + // consumed result. The consumed result is always an r-value. + builder.accessChainStore(result); + builder.clearAccessChain(); + if (node->getOp() == glslang::EOpPreIncrement || + node->getOp() == glslang::EOpPreDecrement) + builder.setAccessChainRValue(result); + else + builder.setAccessChainRValue(operand); + } + + return false; + +#ifndef GLSLANG_WEB + case glslang::EOpEmitStreamVertex: + builder.createNoResultOp(spv::OpEmitStreamVertex, operand); + return false; + case glslang::EOpEndStreamPrimitive: + builder.createNoResultOp(spv::OpEndStreamPrimitive, operand); + return false; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR, operand); + return false; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR, operand); + return false; +#endif + + default: + logger->missingFunctionality("unknown glslang unary"); + return true; // pick up operand as placeholder result + } +} + +// Construct a composite object, recursively copying members if their types don't match +spv::Id TGlslangToSpvTraverser::createCompositeConstruct(spv::Id resultTypeId, std::vector constituents) +{ + for (int c = 0; c < (int)constituents.size(); ++c) { + spv::Id& constituent = constituents[c]; + spv::Id lType = builder.getContainedTypeId(resultTypeId, c); + spv::Id rType = builder.getTypeId(constituent); + if (lType != rType) { + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + constituent = builder.createUnaryOp(spv::OpCopyLogical, lType, constituent); + } else if (builder.isStructType(rType)) { + std::vector rTypeConstituents; + int numrTypeConstituents = builder.getNumTypeConstituents(rType); + for (int i = 0; i < numrTypeConstituents; ++i) { + rTypeConstituents.push_back(builder.createCompositeExtract(constituent, + builder.getContainedTypeId(rType, i), i)); + } + constituents[c] = createCompositeConstruct(lType, rTypeConstituents); + } else { + assert(builder.isArrayType(rType)); + std::vector rTypeConstituents; + int numrTypeConstituents = builder.getNumTypeConstituents(rType); + + spv::Id elementRType = builder.getContainedTypeId(rType); + for (int i = 0; i < numrTypeConstituents; ++i) { + rTypeConstituents.push_back(builder.createCompositeExtract(constituent, elementRType, i)); + } + constituents[c] = createCompositeConstruct(lType, rTypeConstituents); + } + } + } + return builder.createCompositeConstruct(resultTypeId, constituents); +} + +bool TGlslangToSpvTraverser::visitAggregate(glslang::TVisit visit, glslang::TIntermAggregate* node) +{ + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + + spv::Id result = spv::NoResult; + spv::Id invertedType = spv::NoType; // to use to override the natural type of the node + std::vector complexLvalues; // for holding swizzling l-values too complex for + // SPIR-V, for an out parameter + std::vector temporaryLvalues; // temporaries to pass, as proxies for complexLValues + + auto resultType = [&invertedType, &node, this](){ return invertedType != spv::NoType ? + invertedType : + convertGlslangToSpvType(node->getType()); }; + + // try texturing + result = createImageTextureFunctionCall(node); + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; + } +#ifndef GLSLANG_WEB + else if (node->getOp() == glslang::EOpImageStore || + node->getOp() == glslang::EOpImageStoreLod || + node->getOp() == glslang::EOpImageAtomicStore) { + // "imageStore" is a special case, which has no result + return false; + } +#endif + + glslang::TOperator binOp = glslang::EOpNull; + bool reduceComparison = true; + bool isMatrix = false; + bool noReturnValue = false; + bool atomic = false; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + + assert(node->getOp()); + + spv::Decoration precision = TranslatePrecisionDecoration(node->getOperationPrecision()); + + switch (node->getOp()) { + case glslang::EOpSequence: + { + if (preVisit) + ++sequenceDepth; + else + --sequenceDepth; + + if (sequenceDepth == 1) { + // If this is the parent node of all the functions, we want to see them + // early, so all call points have actual SPIR-V functions to reference. + // In all cases, still let the traverser visit the children for us. + makeFunctions(node->getAsAggregate()->getSequence()); + + // Also, we want all globals initializers to go into the beginning of the entry point, before + // anything else gets there, so visit out of order, doing them all now. + makeGlobalInitializers(node->getAsAggregate()->getSequence()); + + // Initializers are done, don't want to visit again, but functions and link objects need to be processed, + // so do them manually. + visitFunctions(node->getAsAggregate()->getSequence()); + + return false; + } + + return true; + } + case glslang::EOpLinkerObjects: + { + if (visit == glslang::EvPreVisit) + linkageOnly = true; + else + linkageOnly = false; + + return true; + } + case glslang::EOpComma: + { + // processing from left to right naturally leaves the right-most + // lying around in the access chain + glslang::TIntermSequence& glslangOperands = node->getSequence(); + for (int i = 0; i < (int)glslangOperands.size(); ++i) + glslangOperands[i]->traverse(this); + + return false; + } + case glslang::EOpFunction: + if (visit == glslang::EvPreVisit) { + if (isShaderEntryPoint(node)) { + inEntryPoint = true; + builder.setBuildPoint(shaderEntry->getLastBlock()); + currentFunction = shaderEntry; + } else { + handleFunctionEntry(node); + } + } else { + if (inEntryPoint) + entryPointTerminated = true; + builder.leaveFunction(); + inEntryPoint = false; + } + + return true; + case glslang::EOpParameters: + // Parameters will have been consumed by EOpFunction processing, but not + // the body, so we still visited the function node's children, making this + // child redundant. + return false; + case glslang::EOpFunctionCall: + { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + if (node->isUserDefined()) + result = handleUserFunctionCall(node); + if (result) { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } else + logger->missingFunctionality("missing user function; linker needs to catch that"); + + return false; + } + case glslang::EOpConstructMat2x2: + case glslang::EOpConstructMat2x3: + case glslang::EOpConstructMat2x4: + case glslang::EOpConstructMat3x2: + case glslang::EOpConstructMat3x3: + case glslang::EOpConstructMat3x4: + case glslang::EOpConstructMat4x2: + case glslang::EOpConstructMat4x3: + case glslang::EOpConstructMat4x4: + case glslang::EOpConstructDMat2x2: + case glslang::EOpConstructDMat2x3: + case glslang::EOpConstructDMat2x4: + case glslang::EOpConstructDMat3x2: + case glslang::EOpConstructDMat3x3: + case glslang::EOpConstructDMat3x4: + case glslang::EOpConstructDMat4x2: + case glslang::EOpConstructDMat4x3: + case glslang::EOpConstructDMat4x4: + case glslang::EOpConstructIMat2x2: + case glslang::EOpConstructIMat2x3: + case glslang::EOpConstructIMat2x4: + case glslang::EOpConstructIMat3x2: + case glslang::EOpConstructIMat3x3: + case glslang::EOpConstructIMat3x4: + case glslang::EOpConstructIMat4x2: + case glslang::EOpConstructIMat4x3: + case glslang::EOpConstructIMat4x4: + case glslang::EOpConstructUMat2x2: + case glslang::EOpConstructUMat2x3: + case glslang::EOpConstructUMat2x4: + case glslang::EOpConstructUMat3x2: + case glslang::EOpConstructUMat3x3: + case glslang::EOpConstructUMat3x4: + case glslang::EOpConstructUMat4x2: + case glslang::EOpConstructUMat4x3: + case glslang::EOpConstructUMat4x4: + case glslang::EOpConstructBMat2x2: + case glslang::EOpConstructBMat2x3: + case glslang::EOpConstructBMat2x4: + case glslang::EOpConstructBMat3x2: + case glslang::EOpConstructBMat3x3: + case glslang::EOpConstructBMat3x4: + case glslang::EOpConstructBMat4x2: + case glslang::EOpConstructBMat4x3: + case glslang::EOpConstructBMat4x4: + case glslang::EOpConstructF16Mat2x2: + case glslang::EOpConstructF16Mat2x3: + case glslang::EOpConstructF16Mat2x4: + case glslang::EOpConstructF16Mat3x2: + case glslang::EOpConstructF16Mat3x3: + case glslang::EOpConstructF16Mat3x4: + case glslang::EOpConstructF16Mat4x2: + case glslang::EOpConstructF16Mat4x3: + case glslang::EOpConstructF16Mat4x4: + isMatrix = true; + // fall through + case glslang::EOpConstructFloat: + case glslang::EOpConstructVec2: + case glslang::EOpConstructVec3: + case glslang::EOpConstructVec4: + case glslang::EOpConstructDouble: + case glslang::EOpConstructDVec2: + case glslang::EOpConstructDVec3: + case glslang::EOpConstructDVec4: + case glslang::EOpConstructFloat16: + case glslang::EOpConstructF16Vec2: + case glslang::EOpConstructF16Vec3: + case glslang::EOpConstructF16Vec4: + case glslang::EOpConstructBool: + case glslang::EOpConstructBVec2: + case glslang::EOpConstructBVec3: + case glslang::EOpConstructBVec4: + case glslang::EOpConstructInt8: + case glslang::EOpConstructI8Vec2: + case glslang::EOpConstructI8Vec3: + case glslang::EOpConstructI8Vec4: + case glslang::EOpConstructUint8: + case glslang::EOpConstructU8Vec2: + case glslang::EOpConstructU8Vec3: + case glslang::EOpConstructU8Vec4: + case glslang::EOpConstructInt16: + case glslang::EOpConstructI16Vec2: + case glslang::EOpConstructI16Vec3: + case glslang::EOpConstructI16Vec4: + case glslang::EOpConstructUint16: + case glslang::EOpConstructU16Vec2: + case glslang::EOpConstructU16Vec3: + case glslang::EOpConstructU16Vec4: + case glslang::EOpConstructInt: + case glslang::EOpConstructIVec2: + case glslang::EOpConstructIVec3: + case glslang::EOpConstructIVec4: + case glslang::EOpConstructUint: + case glslang::EOpConstructUVec2: + case glslang::EOpConstructUVec3: + case glslang::EOpConstructUVec4: + case glslang::EOpConstructInt64: + case glslang::EOpConstructI64Vec2: + case glslang::EOpConstructI64Vec3: + case glslang::EOpConstructI64Vec4: + case glslang::EOpConstructUint64: + case glslang::EOpConstructU64Vec2: + case glslang::EOpConstructU64Vec3: + case glslang::EOpConstructU64Vec4: + case glslang::EOpConstructStruct: + case glslang::EOpConstructTextureSampler: + case glslang::EOpConstructReference: + case glslang::EOpConstructCooperativeMatrix: + { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + std::vector arguments; + translateArguments(*node, arguments, lvalueCoherentFlags); + spv::Id constructed; + if (node->getOp() == glslang::EOpConstructTextureSampler) + constructed = builder.createOp(spv::OpSampledImage, resultType(), arguments); + else if (node->getOp() == glslang::EOpConstructStruct || + node->getOp() == glslang::EOpConstructCooperativeMatrix || + node->getType().isArray()) { + std::vector constituents; + for (int c = 0; c < (int)arguments.size(); ++c) + constituents.push_back(arguments[c]); + constructed = createCompositeConstruct(resultType(), constituents); + } else if (isMatrix) + constructed = builder.createMatrixConstructor(precision, arguments, resultType()); + else + constructed = builder.createConstructor(precision, arguments, resultType()); + + builder.clearAccessChain(); + builder.setAccessChainRValue(constructed); + + return false; + } + + // These six are component-wise compares with component-wise results. + // Forward on to createBinaryOperation(), requesting a vector result. + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpVectorEqual: + case glslang::EOpVectorNotEqual: + { + // Map the operation to a binary + binOp = node->getOp(); + reduceComparison = false; + switch (node->getOp()) { + case glslang::EOpVectorEqual: binOp = glslang::EOpVectorEqual; break; + case glslang::EOpVectorNotEqual: binOp = glslang::EOpVectorNotEqual; break; + default: binOp = node->getOp(); break; + } + + break; + } + case glslang::EOpMul: + // component-wise matrix multiply + binOp = glslang::EOpMul; + break; + case glslang::EOpOuterProduct: + // two vectors multiplied to make a matrix + binOp = glslang::EOpOuterProduct; + break; + case glslang::EOpDot: + { + // for scalar dot product, use multiply + glslang::TIntermSequence& glslangOperands = node->getSequence(); + if (glslangOperands[0]->getAsTyped()->getVectorSize() == 1) + binOp = glslang::EOpMul; + break; + } + case glslang::EOpMod: + // when an aggregate, this is the floating-point mod built-in function, + // which can be emitted by the one in createBinaryOperation() + binOp = glslang::EOpMod; + break; + + case glslang::EOpEmitVertex: + case glslang::EOpEndPrimitive: + case glslang::EOpBarrier: + case glslang::EOpMemoryBarrier: + case glslang::EOpMemoryBarrierAtomicCounter: + case glslang::EOpMemoryBarrierBuffer: + case glslang::EOpMemoryBarrierImage: + case glslang::EOpMemoryBarrierShared: + case glslang::EOpGroupMemoryBarrier: + case glslang::EOpDeviceMemoryBarrier: + case glslang::EOpAllMemoryBarrierWithGroupSync: + case glslang::EOpDeviceMemoryBarrierWithGroupSync: + case glslang::EOpWorkgroupMemoryBarrier: + case glslang::EOpWorkgroupMemoryBarrierWithGroupSync: + case glslang::EOpSubgroupBarrier: + case glslang::EOpSubgroupMemoryBarrier: + case glslang::EOpSubgroupMemoryBarrierBuffer: + case glslang::EOpSubgroupMemoryBarrierImage: + case glslang::EOpSubgroupMemoryBarrierShared: + noReturnValue = true; + // These all have 0 operands and will naturally finish up in the code below for 0 operands + break; + + case glslang::EOpAtomicAdd: + case glslang::EOpAtomicMin: + case glslang::EOpAtomicMax: + case glslang::EOpAtomicAnd: + case glslang::EOpAtomicOr: + case glslang::EOpAtomicXor: + case glslang::EOpAtomicExchange: + case glslang::EOpAtomicCompSwap: + atomic = true; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpAtomicStore: + noReturnValue = true; + // fallthrough + case glslang::EOpAtomicLoad: + atomic = true; + break; + + case glslang::EOpAtomicCounterAdd: + case glslang::EOpAtomicCounterSubtract: + case glslang::EOpAtomicCounterMin: + case glslang::EOpAtomicCounterMax: + case glslang::EOpAtomicCounterAnd: + case glslang::EOpAtomicCounterOr: + case glslang::EOpAtomicCounterXor: + case glslang::EOpAtomicCounterExchange: + case glslang::EOpAtomicCounterCompSwap: + builder.addExtension("SPV_KHR_shader_atomic_counter_ops"); + builder.addCapability(spv::CapabilityAtomicStorageOps); + atomic = true; + break; + + case glslang::EOpAbsDifference: + case glslang::EOpAddSaturate: + case glslang::EOpSubSaturate: + case glslang::EOpAverage: + case glslang::EOpAverageRounded: + case glslang::EOpMul32x16: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + binOp = node->getOp(); + break; + + case glslang::EOpIgnoreIntersection: + case glslang::EOpTerminateRay: + case glslang::EOpTrace: + case glslang::EOpExecuteCallable: + case glslang::EOpWritePackedPrimitiveIndices4x8NV: + noReturnValue = true; + break; + case glslang::EOpRayQueryInitialize: + case glslang::EOpRayQueryTerminate: + case glslang::EOpRayQueryGenerateIntersection: + case glslang::EOpRayQueryConfirmIntersection: + builder.addExtension("SPV_KHR_ray_query"); + builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + noReturnValue = true; + break; + case glslang::EOpRayQueryProceed: + case glslang::EOpRayQueryGetIntersectionType: + case glslang::EOpRayQueryGetRayTMin: + case glslang::EOpRayQueryGetRayFlags: + case glslang::EOpRayQueryGetIntersectionT: + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + case glslang::EOpRayQueryGetIntersectionInstanceId: + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + case glslang::EOpRayQueryGetIntersectionBarycentrics: + case glslang::EOpRayQueryGetIntersectionFrontFace: + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + case glslang::EOpRayQueryGetWorldRayDirection: + case glslang::EOpRayQueryGetWorldRayOrigin: + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + case glslang::EOpRayQueryGetIntersectionWorldToObject: + builder.addExtension("SPV_KHR_ray_query"); + builder.addCapability(spv::CapabilityRayQueryProvisionalKHR); + break; + case glslang::EOpCooperativeMatrixLoad: + case glslang::EOpCooperativeMatrixStore: + noReturnValue = true; + break; + case glslang::EOpBeginInvocationInterlock: + case glslang::EOpEndInvocationInterlock: + builder.addExtension(spv::E_SPV_EXT_fragment_shader_interlock); + noReturnValue = true; + break; +#endif + + case glslang::EOpDebugPrintf: + noReturnValue = true; + break; + + default: + break; + } + + // + // See if it maps to a regular operation. + // + if (binOp != glslang::EOpNull) { + glslang::TIntermTyped* left = node->getSequence()[0]->getAsTyped(); + glslang::TIntermTyped* right = node->getSequence()[1]->getAsTyped(); + assert(left && right); + + builder.clearAccessChain(); + left->traverse(this); + spv::Id leftId = accessChainLoad(left->getType()); + + builder.clearAccessChain(); + right->traverse(this); + spv::Id rightId = accessChainLoad(right->getType()); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + OpDecorations decorations = { precision, + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + result = createBinaryOperation(binOp, decorations, + resultType(), leftId, rightId, + left->getType().getBasicType(), reduceComparison); + + // code above should only make binOp that exists in createBinaryOperation + assert(result != spv::NoResult); + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + + return false; + } + + // + // Create the list of operands. + // + glslang::TIntermSequence& glslangOperands = node->getSequence(); + std::vector operands; + std::vector memoryAccessOperands; + for (int arg = 0; arg < (int)glslangOperands.size(); ++arg) { + // special case l-value operands; there are just a few + bool lvalue = false; + switch (node->getOp()) { + case glslang::EOpModf: + if (arg == 1) + lvalue = true; + break; + + case glslang::EOpRayQueryInitialize: + case glslang::EOpRayQueryTerminate: + case glslang::EOpRayQueryConfirmIntersection: + case glslang::EOpRayQueryProceed: + case glslang::EOpRayQueryGenerateIntersection: + case glslang::EOpRayQueryGetIntersectionType: + case glslang::EOpRayQueryGetIntersectionT: + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + case glslang::EOpRayQueryGetIntersectionInstanceId: + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + case glslang::EOpRayQueryGetIntersectionBarycentrics: + case glslang::EOpRayQueryGetIntersectionFrontFace: + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + case glslang::EOpRayQueryGetIntersectionWorldToObject: + if (arg == 0) + lvalue = true; + break; + + case glslang::EOpAtomicAdd: + case glslang::EOpAtomicMin: + case glslang::EOpAtomicMax: + case glslang::EOpAtomicAnd: + case glslang::EOpAtomicOr: + case glslang::EOpAtomicXor: + case glslang::EOpAtomicExchange: + case glslang::EOpAtomicCompSwap: + if (arg == 0) + lvalue = true; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpFrexp: + if (arg == 1) + lvalue = true; + break; + case glslang::EOpInterpolateAtSample: + case glslang::EOpInterpolateAtOffset: + case glslang::EOpInterpolateAtVertex: + if (arg == 0) { + lvalue = true; + + // Does it need a swizzle inversion? If so, evaluation is inverted; + // operate first on the swizzle base, then apply the swizzle. + // That is, we transform + // + // interpolate(v.zy) -> interpolate(v).zy + // + if (glslangOperands[0]->getAsOperator() && + glslangOperands[0]->getAsOperator()->getOp() == glslang::EOpVectorSwizzle) + invertedType = convertGlslangToSpvType( + glslangOperands[0]->getAsBinaryNode()->getLeft()->getType()); + } + break; + case glslang::EOpAtomicLoad: + case glslang::EOpAtomicStore: + case glslang::EOpAtomicCounterAdd: + case glslang::EOpAtomicCounterSubtract: + case glslang::EOpAtomicCounterMin: + case glslang::EOpAtomicCounterMax: + case glslang::EOpAtomicCounterAnd: + case glslang::EOpAtomicCounterOr: + case glslang::EOpAtomicCounterXor: + case glslang::EOpAtomicCounterExchange: + case glslang::EOpAtomicCounterCompSwap: + if (arg == 0) + lvalue = true; + break; + case glslang::EOpAddCarry: + case glslang::EOpSubBorrow: + if (arg == 2) + lvalue = true; + break; + case glslang::EOpUMulExtended: + case glslang::EOpIMulExtended: + if (arg >= 2) + lvalue = true; + break; + case glslang::EOpCooperativeMatrixLoad: + if (arg == 0 || arg == 1) + lvalue = true; + break; + case glslang::EOpCooperativeMatrixStore: + if (arg == 1) + lvalue = true; + break; +#endif + default: + break; + } + builder.clearAccessChain(); + if (invertedType != spv::NoType && arg == 0) + glslangOperands[0]->getAsBinaryNode()->getLeft()->traverse(this); + else + glslangOperands[arg]->traverse(this); + +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpCooperativeMatrixLoad || + node->getOp() == glslang::EOpCooperativeMatrixStore) { + + if (arg == 1) { + // fold "element" parameter into the access chain + spv::Builder::AccessChain save = builder.getAccessChain(); + builder.clearAccessChain(); + glslangOperands[2]->traverse(this); + + spv::Id elementId = accessChainLoad(glslangOperands[2]->getAsTyped()->getType()); + + builder.setAccessChain(save); + + // Point to the first element of the array. + builder.accessChainPush(elementId, + TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()), + glslangOperands[arg]->getAsTyped()->getType().getBufferReferenceAlignment()); + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + unsigned int alignment = builder.getAccessChain().alignment; + + int memoryAccess = TranslateMemoryAccess(coherentFlags); + if (node->getOp() == glslang::EOpCooperativeMatrixLoad) + memoryAccess &= ~spv::MemoryAccessMakePointerAvailableKHRMask; + if (node->getOp() == glslang::EOpCooperativeMatrixStore) + memoryAccess &= ~spv::MemoryAccessMakePointerVisibleKHRMask; + if (builder.getStorageClass(builder.getAccessChain().base) == + spv::StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + memoryAccessOperands.push_back(spv::IdImmediate(false, memoryAccess)); + + if (memoryAccess & spv::MemoryAccessAlignedMask) { + memoryAccessOperands.push_back(spv::IdImmediate(false, alignment)); + } + + if (memoryAccess & + (spv::MemoryAccessMakePointerAvailableKHRMask | spv::MemoryAccessMakePointerVisibleKHRMask)) { + memoryAccessOperands.push_back(spv::IdImmediate(true, + builder.makeUintConstant(TranslateMemoryScope(coherentFlags)))); + } + } else if (arg == 2) { + continue; + } + } +#endif + + // for l-values, pass the address, for r-values, pass the value + if (lvalue) { + if (invertedType == spv::NoType && !builder.isSpvLvalue()) { + // SPIR-V cannot represent an l-value containing a swizzle that doesn't + // reduce to a simple access chain. So, we need a temporary vector to + // receive the result, and must later swizzle that into the original + // l-value. + complexLvalues.push_back(builder.getAccessChain()); + temporaryLvalues.push_back(builder.createVariable( + spv::NoPrecision, spv::StorageClassFunction, + builder.accessChainGetInferredType(), "swizzleTemp")); + operands.push_back(temporaryLvalues.back()); + } else { + operands.push_back(builder.accessChainGetLValue()); + } + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(glslangOperands[arg]->getAsTyped()->getType()); + } else { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + glslang::TOperator glslangOp = node->getOp(); + if (arg == 1 && + (glslangOp == glslang::EOpRayQueryGetIntersectionType || + glslangOp == glslang::EOpRayQueryGetIntersectionT || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceCustomIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceId || + glslangOp == glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset || + glslangOp == glslang::EOpRayQueryGetIntersectionGeometryIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionPrimitiveIndex || + glslangOp == glslang::EOpRayQueryGetIntersectionBarycentrics || + glslangOp == glslang::EOpRayQueryGetIntersectionFrontFace || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectRayDirection || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectRayOrigin || + glslangOp == glslang::EOpRayQueryGetIntersectionObjectToWorld || + glslangOp == glslang::EOpRayQueryGetIntersectionWorldToObject + )) { + bool cond = glslangOperands[arg]->getAsConstantUnion()->getConstArray()[0].getBConst(); + operands.push_back(builder.makeIntConstant(cond ? 1 : 0)); + } + else { + operands.push_back(accessChainLoad(glslangOperands[arg]->getAsTyped()->getType())); + } + + } + } + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); +#ifndef GLSLANG_WEB + if (node->getOp() == glslang::EOpCooperativeMatrixLoad) { + std::vector idImmOps; + + idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf + idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride + idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor + idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end()); + // get the pointee type + spv::Id typeId = builder.getContainedTypeId(builder.getTypeId(operands[0])); + assert(builder.isCooperativeMatrixType(typeId)); + // do the op + spv::Id result = builder.createOp(spv::OpCooperativeMatrixLoadNV, typeId, idImmOps); + // store the result to the pointer (out param 'm') + builder.createStore(result, operands[0]); + result = 0; + } else if (node->getOp() == glslang::EOpCooperativeMatrixStore) { + std::vector idImmOps; + + idImmOps.push_back(spv::IdImmediate(true, operands[1])); // buf + idImmOps.push_back(spv::IdImmediate(true, operands[0])); // object + idImmOps.push_back(spv::IdImmediate(true, operands[2])); // stride + idImmOps.push_back(spv::IdImmediate(true, operands[3])); // colMajor + idImmOps.insert(idImmOps.end(), memoryAccessOperands.begin(), memoryAccessOperands.end()); + + builder.createNoResultOp(spv::OpCooperativeMatrixStoreNV, idImmOps); + result = 0; + } else +#endif + if (atomic) { + // Handle all atomics + result = createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType(), + lvalueCoherentFlags); + } else if (node->getOp() == glslang::EOpDebugPrintf) { + if (!nonSemanticDebugPrintf) { + nonSemanticDebugPrintf = builder.import("NonSemantic.DebugPrintf"); + } + result = builder.createBuiltinCall(builder.makeVoidType(), nonSemanticDebugPrintf, spv::NonSemanticDebugPrintfDebugPrintf, operands); + builder.addExtension(spv::E_SPV_KHR_non_semantic_info); + } else { + // Pass through to generic operations. + switch (glslangOperands.size()) { + case 0: + result = createNoArgOperation(node->getOp(), precision, resultType()); + break; + case 1: + { + OpDecorations decorations = { precision, + TranslateNoContractionDecoration(node->getType().getQualifier()), + TranslateNonUniformDecoration(node->getType().getQualifier()) }; + result = createUnaryOperation( + node->getOp(), decorations, + resultType(), operands.front(), + glslangOperands[0]->getAsTyped()->getBasicType(), lvalueCoherentFlags); + } + break; + default: + result = createMiscOperation(node->getOp(), precision, resultType(), operands, node->getBasicType()); + break; + } + + if (invertedType != spv::NoResult) + result = createInvertedSwizzle(precision, *glslangOperands[0]->getAsBinaryNode(), result); + + for (unsigned int i = 0; i < temporaryLvalues.size(); ++i) { + builder.setAccessChain(complexLvalues[i]); + builder.accessChainStore(builder.createLoad(temporaryLvalues[i], spv::NoPrecision)); + } + } + + if (noReturnValue) + return false; + + if (! result) { + logger->missingFunctionality("unknown glslang aggregate"); + return true; // pick up a child as a placeholder operand + } else { + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + return false; + } +} + +// This path handles both if-then-else and ?: +// The if-then-else has a node type of void, while +// ?: has either a void or a non-void node type +// +// Leaving the result, when not void: +// GLSL only has r-values as the result of a :?, but +// if we have an l-value, that can be more efficient if it will +// become the base of a complex r-value expression, because the +// next layer copies r-values into memory to use the access-chain mechanism +bool TGlslangToSpvTraverser::visitSelection(glslang::TVisit /* visit */, glslang::TIntermSelection* node) +{ + // see if OpSelect can handle it + const auto isOpSelectable = [&]() { + if (node->getBasicType() == glslang::EbtVoid) + return false; + // OpSelect can do all other types starting with SPV 1.4 + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4) { + // pre-1.4, only scalars and vectors can be handled + if ((!node->getType().isScalar() && !node->getType().isVector())) + return false; + } + return true; + }; + + // See if it simple and safe, or required, to execute both sides. + // Crucially, side effects must be either semantically required or avoided, + // and there are performance trade-offs. + // Return true if required or a good idea (and safe) to execute both sides, + // false otherwise. + const auto bothSidesPolicy = [&]() -> bool { + // do we have both sides? + if (node->getTrueBlock() == nullptr || + node->getFalseBlock() == nullptr) + return false; + + // required? (unless we write additional code to look for side effects + // and make performance trade-offs if none are present) + if (!node->getShortCircuit()) + return true; + + // if not required to execute both, decide based on performance/practicality... + + if (!isOpSelectable()) + return false; + + assert(node->getType() == node->getTrueBlock() ->getAsTyped()->getType() && + node->getType() == node->getFalseBlock()->getAsTyped()->getType()); + + // return true if a single operand to ? : is okay for OpSelect + const auto operandOkay = [](glslang::TIntermTyped* node) { + return node->getAsSymbolNode() || node->getType().getQualifier().isConstant(); + }; + + return operandOkay(node->getTrueBlock() ->getAsTyped()) && + operandOkay(node->getFalseBlock()->getAsTyped()); + }; + + spv::Id result = spv::NoResult; // upcoming result selecting between trueValue and falseValue + // emit the condition before doing anything with selection + node->getCondition()->traverse(this); + spv::Id condition = accessChainLoad(node->getCondition()->getType()); + + // Find a way of executing both sides and selecting the right result. + const auto executeBothSides = [&]() -> void { + // execute both sides + node->getTrueBlock()->traverse(this); + spv::Id trueValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()); + node->getFalseBlock()->traverse(this); + spv::Id falseValue = accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + // done if void + if (node->getBasicType() == glslang::EbtVoid) + return; + + // emit code to select between trueValue and falseValue + + // see if OpSelect can handle it + if (isOpSelectable()) { + // Emit OpSelect for this selection. + + // smear condition to vector, if necessary (AST is always scalar) + // Before 1.4, smear like for mix(), starting with 1.4, keep it scalar + if (glslangIntermediate->getSpv().spv < glslang::EShTargetSpv_1_4 && builder.isVector(trueValue)) { + condition = builder.smearScalar(spv::NoPrecision, condition, + builder.makeVectorType(builder.makeBoolType(), + builder.getNumComponents(trueValue))); + } + + // OpSelect + result = builder.createTriOp(spv::OpSelect, + convertGlslangToSpvType(node->getType()), condition, + trueValue, falseValue); + + builder.clearAccessChain(); + builder.setAccessChainRValue(result); + } else { + // We need control flow to select the result. + // TODO: Once SPIR-V OpSelect allows arbitrary types, eliminate this path. + result = builder.createVariable(TranslatePrecisionDecoration(node->getType()), + spv::StorageClassFunction, convertGlslangToSpvType(node->getType())); + + // Selection control: + const spv::SelectionControlMask control = TranslateSelectionControl(*node); + + // make an "if" based on the value created by the condition + spv::Builder::If ifBuilder(condition, control, builder); + + // emit the "then" statement + builder.createStore(trueValue, result); + ifBuilder.makeBeginElse(); + // emit the "else" statement + builder.createStore(falseValue, result); + + // finish off the control flow + ifBuilder.makeEndIf(); + + builder.clearAccessChain(); + builder.setAccessChainLValue(result); + } + }; + + // Execute the one side needed, as per the condition + const auto executeOneSide = [&]() { + // Always emit control flow. + if (node->getBasicType() != glslang::EbtVoid) { + result = builder.createVariable(TranslatePrecisionDecoration(node->getType()), spv::StorageClassFunction, + convertGlslangToSpvType(node->getType())); + } + + // Selection control: + const spv::SelectionControlMask control = TranslateSelectionControl(*node); + + // make an "if" based on the value created by the condition + spv::Builder::If ifBuilder(condition, control, builder); + + // emit the "then" statement + if (node->getTrueBlock() != nullptr) { + node->getTrueBlock()->traverse(this); + if (result != spv::NoResult) + builder.createStore(accessChainLoad(node->getTrueBlock()->getAsTyped()->getType()), result); + } + + if (node->getFalseBlock() != nullptr) { + ifBuilder.makeBeginElse(); + // emit the "else" statement + node->getFalseBlock()->traverse(this); + if (result != spv::NoResult) + builder.createStore(accessChainLoad(node->getFalseBlock()->getAsTyped()->getType()), result); + } + + // finish off the control flow + ifBuilder.makeEndIf(); + + if (result != spv::NoResult) { + builder.clearAccessChain(); + builder.setAccessChainLValue(result); + } + }; + + // Try for OpSelect (or a requirement to execute both sides) + if (bothSidesPolicy()) { + SpecConstantOpModeGuard spec_constant_op_mode_setter(&builder); + if (node->getType().getQualifier().isSpecConstant()) + spec_constant_op_mode_setter.turnOnSpecConstantOpMode(); + executeBothSides(); + } else + executeOneSide(); + + return false; +} + +bool TGlslangToSpvTraverser::visitSwitch(glslang::TVisit /* visit */, glslang::TIntermSwitch* node) +{ + // emit and get the condition before doing anything with switch + node->getCondition()->traverse(this); + spv::Id selector = accessChainLoad(node->getCondition()->getAsTyped()->getType()); + + // Selection control: + const spv::SelectionControlMask control = TranslateSwitchControl(*node); + + // browse the children to sort out code segments + int defaultSegment = -1; + std::vector codeSegments; + glslang::TIntermSequence& sequence = node->getBody()->getSequence(); + std::vector caseValues; + std::vector valueIndexToSegment(sequence.size()); // note: probably not all are used, it is an overestimate + for (glslang::TIntermSequence::iterator c = sequence.begin(); c != sequence.end(); ++c) { + TIntermNode* child = *c; + if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpDefault) + defaultSegment = (int)codeSegments.size(); + else if (child->getAsBranchNode() && child->getAsBranchNode()->getFlowOp() == glslang::EOpCase) { + valueIndexToSegment[caseValues.size()] = (int)codeSegments.size(); + caseValues.push_back(child->getAsBranchNode()->getExpression()->getAsConstantUnion() + ->getConstArray()[0].getIConst()); + } else + codeSegments.push_back(child); + } + + // handle the case where the last code segment is missing, due to no code + // statements between the last case and the end of the switch statement + if ((caseValues.size() && (int)codeSegments.size() == valueIndexToSegment[caseValues.size() - 1]) || + (int)codeSegments.size() == defaultSegment) + codeSegments.push_back(nullptr); + + // make the switch statement + std::vector segmentBlocks; // returned, as the blocks allocated in the call + builder.makeSwitch(selector, control, (int)codeSegments.size(), caseValues, valueIndexToSegment, defaultSegment, + segmentBlocks); + + // emit all the code in the segments + breakForLoop.push(false); + for (unsigned int s = 0; s < codeSegments.size(); ++s) { + builder.nextSwitchSegment(segmentBlocks, s); + if (codeSegments[s]) + codeSegments[s]->traverse(this); + else + builder.addSwitchBreak(); + } + breakForLoop.pop(); + + builder.endSwitch(segmentBlocks); + + return false; +} + +void TGlslangToSpvTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node) +{ + int nextConst = 0; + spv::Id constant = createSpvConstantFromConstUnionArray(node->getType(), node->getConstArray(), nextConst, false); + + builder.clearAccessChain(); + builder.setAccessChainRValue(constant); +} + +bool TGlslangToSpvTraverser::visitLoop(glslang::TVisit /* visit */, glslang::TIntermLoop* node) +{ + auto blocks = builder.makeNewLoop(); + builder.createBranch(&blocks.head); + + // Loop control: + std::vector operands; + const spv::LoopControlMask control = TranslateLoopControl(*node, operands); + + // Spec requires back edges to target header blocks, and every header block + // must dominate its merge block. Make a header block first to ensure these + // conditions are met. By definition, it will contain OpLoopMerge, followed + // by a block-ending branch. But we don't want to put any other body/test + // instructions in it, since the body/test may have arbitrary instructions, + // including merges of its own. + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + builder.setBuildPoint(&blocks.head); + builder.createLoopMerge(&blocks.merge, &blocks.continue_target, control, operands); + if (node->testFirst() && node->getTest()) { + spv::Block& test = builder.makeNewBlock(); + builder.createBranch(&test); + + builder.setBuildPoint(&test); + node->getTest()->traverse(this); + spv::Id condition = accessChainLoad(node->getTest()->getType()); + builder.createConditionalBranch(condition, &blocks.body, &blocks.merge); + + builder.setBuildPoint(&blocks.body); + breakForLoop.push(true); + if (node->getBody()) + node->getBody()->traverse(this); + builder.createBranch(&blocks.continue_target); + breakForLoop.pop(); + + builder.setBuildPoint(&blocks.continue_target); + if (node->getTerminal()) + node->getTerminal()->traverse(this); + builder.createBranch(&blocks.head); + } else { + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + builder.createBranch(&blocks.body); + + breakForLoop.push(true); + builder.setBuildPoint(&blocks.body); + if (node->getBody()) + node->getBody()->traverse(this); + builder.createBranch(&blocks.continue_target); + breakForLoop.pop(); + + builder.setBuildPoint(&blocks.continue_target); + if (node->getTerminal()) + node->getTerminal()->traverse(this); + if (node->getTest()) { + node->getTest()->traverse(this); + spv::Id condition = + accessChainLoad(node->getTest()->getType()); + builder.createConditionalBranch(condition, &blocks.head, &blocks.merge); + } else { + // TODO: unless there was a break/return/discard instruction + // somewhere in the body, this is an infinite loop, so we should + // issue a warning. + builder.createBranch(&blocks.head); + } + } + builder.setBuildPoint(&blocks.merge); + builder.closeLoop(); + return false; +} + +bool TGlslangToSpvTraverser::visitBranch(glslang::TVisit /* visit */, glslang::TIntermBranch* node) +{ + if (node->getExpression()) + node->getExpression()->traverse(this); + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + switch (node->getFlowOp()) { + case glslang::EOpKill: + builder.makeDiscard(); + break; + case glslang::EOpBreak: + if (breakForLoop.top()) + builder.createLoopExit(); + else + builder.addSwitchBreak(); + break; + case glslang::EOpContinue: + builder.createLoopContinue(); + break; + case glslang::EOpReturn: + if (node->getExpression() != nullptr) { + const glslang::TType& glslangReturnType = node->getExpression()->getType(); + spv::Id returnId = accessChainLoad(glslangReturnType); + if (builder.getTypeId(returnId) != currentFunction->getReturnType() || + TranslatePrecisionDecoration(glslangReturnType) != currentFunction->getReturnPrecision()) { + builder.clearAccessChain(); + spv::Id copyId = builder.createVariable(currentFunction->getReturnPrecision(), + spv::StorageClassFunction, currentFunction->getReturnType()); + builder.setAccessChainLValue(copyId); + multiTypeStore(glslangReturnType, returnId); + returnId = builder.createLoad(copyId, currentFunction->getReturnPrecision()); + } + builder.makeReturn(false, returnId); + } else + builder.makeReturn(false); + + builder.clearAccessChain(); + break; + +#ifndef GLSLANG_WEB + case glslang::EOpDemote: + builder.createNoResultOp(spv::OpDemoteToHelperInvocationEXT); + builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation); + builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT); + break; +#endif + + default: + assert(0); + break; + } + + return false; +} + +spv::Id TGlslangToSpvTraverser::createSpvVariable(const glslang::TIntermSymbol* node, spv::Id forcedType) +{ + // First, steer off constants, which are not SPIR-V variables, but + // can still have a mapping to a SPIR-V Id. + // This includes specialization constants. + if (node->getQualifier().isConstant()) { + spv::Id result = createSpvConstant(*node); + if (result != spv::NoResult) + return result; + } + + // Now, handle actual variables + spv::StorageClass storageClass = TranslateStorageClass(node->getType()); + spv::Id spvType = forcedType == spv::NoType ? convertGlslangToSpvType(node->getType()) + : forcedType; + + const bool contains16BitType = node->getType().contains16BitFloat() || + node->getType().contains16BitInt(); + if (contains16BitType) { + switch (storageClass) { + case spv::StorageClassInput: + case spv::StorageClassOutput: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStorageInputOutput16); + break; + case spv::StorageClassUniform: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + if (node->getType().getQualifier().storage == glslang::EvqBuffer) + builder.addCapability(spv::CapabilityStorageUniformBufferBlock16); + else + builder.addCapability(spv::CapabilityStorageUniform16); + break; +#ifndef GLSLANG_WEB + case spv::StorageClassPushConstant: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStoragePushConstant16); + break; + case spv::StorageClassStorageBuffer: + case spv::StorageClassPhysicalStorageBufferEXT: + builder.addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + builder.addCapability(spv::CapabilityStorageUniformBufferBlock16); + break; +#endif + default: + if (node->getType().contains16BitFloat()) + builder.addCapability(spv::CapabilityFloat16); + if (node->getType().contains16BitInt()) + builder.addCapability(spv::CapabilityInt16); + break; + } + } + + if (node->getType().contains8BitInt()) { + if (storageClass == spv::StorageClassPushConstant) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityStoragePushConstant8); + } else if (storageClass == spv::StorageClassUniform) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityUniformAndStorageBuffer8BitAccess); + } else if (storageClass == spv::StorageClassStorageBuffer) { + builder.addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + builder.addCapability(spv::CapabilityStorageBuffer8BitAccess); + } else { + builder.addCapability(spv::CapabilityInt8); + } + } + + const char* name = node->getName().c_str(); + if (glslang::IsAnonymous(name)) + name = ""; + + spv::Id initializer = spv::NoResult; + + if (node->getType().getQualifier().storage == glslang::EvqUniform && + !node->getConstArray().empty()) { + int nextConst = 0; + initializer = createSpvConstantFromConstUnionArray(node->getType(), + node->getConstArray(), + nextConst, + false /* specConst */); + } + + return builder.createVariable(spv::NoPrecision, storageClass, spvType, name, initializer); +} + +// Return type Id of the sampled type. +spv::Id TGlslangToSpvTraverser::getSampledType(const glslang::TSampler& sampler) +{ + switch (sampler.type) { + case glslang::EbtInt: return builder.makeIntType(32); + case glslang::EbtUint: return builder.makeUintType(32); + case glslang::EbtFloat: return builder.makeFloatType(32); +#ifndef GLSLANG_WEB + case glslang::EbtFloat16: + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float_fetch); + builder.addCapability(spv::CapabilityFloat16ImageAMD); + return builder.makeFloatType(16); +#endif + default: + assert(0); + return builder.makeFloatType(32); + } +} + +// If node is a swizzle operation, return the type that should be used if +// the swizzle base is first consumed by another operation, before the swizzle +// is applied. +spv::Id TGlslangToSpvTraverser::getInvertedSwizzleType(const glslang::TIntermTyped& node) +{ + if (node.getAsOperator() && + node.getAsOperator()->getOp() == glslang::EOpVectorSwizzle) + return convertGlslangToSpvType(node.getAsBinaryNode()->getLeft()->getType()); + else + return spv::NoType; +} + +// When inverting a swizzle with a parent op, this function +// will apply the swizzle operation to a completed parent operation. +spv::Id TGlslangToSpvTraverser::createInvertedSwizzle(spv::Decoration precision, const glslang::TIntermTyped& node, + spv::Id parentResult) +{ + std::vector swizzle; + convertSwizzle(*node.getAsBinaryNode()->getRight()->getAsAggregate(), swizzle); + return builder.createRvalueSwizzle(precision, convertGlslangToSpvType(node.getType()), parentResult, swizzle); +} + +// Convert a glslang AST swizzle node to a swizzle vector for building SPIR-V. +void TGlslangToSpvTraverser::convertSwizzle(const glslang::TIntermAggregate& node, std::vector& swizzle) +{ + const glslang::TIntermSequence& swizzleSequence = node.getSequence(); + for (int i = 0; i < (int)swizzleSequence.size(); ++i) + swizzle.push_back(swizzleSequence[i]->getAsConstantUnion()->getConstArray()[0].getIConst()); +} + +// Convert from a glslang type to an SPV type, by calling into a +// recursive version of this function. This establishes the inherited +// layout state rooted from the top-level type. +spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, bool forwardReferenceOnly) +{ + return convertGlslangToSpvType(type, getExplicitLayout(type), type.getQualifier(), false, forwardReferenceOnly); +} + +// Do full recursive conversion of an arbitrary glslang type to a SPIR-V Id. +// explicitLayout can be kept the same throughout the hierarchical recursive walk. +// Mutually recursive with convertGlslangStructToSpvType(). +spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& type, + glslang::TLayoutPacking explicitLayout, const glslang::TQualifier& qualifier, + bool lastBufferBlockMember, bool forwardReferenceOnly) +{ + spv::Id spvType = spv::NoResult; + + switch (type.getBasicType()) { + case glslang::EbtVoid: + spvType = builder.makeVoidType(); + assert (! type.isArray()); + break; + case glslang::EbtBool: + // "transparent" bool doesn't exist in SPIR-V. The GLSL convention is + // a 32-bit int where non-0 means true. + if (explicitLayout != glslang::ElpNone) + spvType = builder.makeUintType(32); + else + spvType = builder.makeBoolType(); + break; + case glslang::EbtInt: + spvType = builder.makeIntType(32); + break; + case glslang::EbtUint: + spvType = builder.makeUintType(32); + break; + case glslang::EbtFloat: + spvType = builder.makeFloatType(32); + break; +#ifndef GLSLANG_WEB + case glslang::EbtDouble: + spvType = builder.makeFloatType(64); + break; + case glslang::EbtFloat16: + spvType = builder.makeFloatType(16); + break; + case glslang::EbtInt8: + spvType = builder.makeIntType(8); + break; + case glslang::EbtUint8: + spvType = builder.makeUintType(8); + break; + case glslang::EbtInt16: + spvType = builder.makeIntType(16); + break; + case glslang::EbtUint16: + spvType = builder.makeUintType(16); + break; + case glslang::EbtInt64: + spvType = builder.makeIntType(64); + break; + case glslang::EbtUint64: + spvType = builder.makeUintType(64); + break; + case glslang::EbtAtomicUint: + builder.addCapability(spv::CapabilityAtomicStorage); + spvType = builder.makeUintType(32); + break; + case glslang::EbtAccStruct: + spvType = builder.makeAccelerationStructureType(); + break; + case glslang::EbtRayQuery: + spvType = builder.makeRayQueryType(); + break; + case glslang::EbtReference: + { + // Make the forward pointer, then recurse to convert the structure type, then + // patch up the forward pointer with a real pointer type. + if (forwardPointers.find(type.getReferentType()) == forwardPointers.end()) { + spv::Id forwardId = builder.makeForwardPointer(spv::StorageClassPhysicalStorageBufferEXT); + forwardPointers[type.getReferentType()] = forwardId; + } + spvType = forwardPointers[type.getReferentType()]; + if (!forwardReferenceOnly) { + spv::Id referentType = convertGlslangToSpvType(*type.getReferentType()); + builder.makePointerFromForwardPointer(spv::StorageClassPhysicalStorageBufferEXT, + forwardPointers[type.getReferentType()], + referentType); + } + } + break; +#endif + case glslang::EbtSampler: + { + const glslang::TSampler& sampler = type.getSampler(); + if (sampler.isPureSampler()) { + spvType = builder.makeSamplerType(); + } else { + // an image is present, make its type + spvType = builder.makeImageType(getSampledType(sampler), TranslateDimensionality(sampler), + sampler.isShadow(), sampler.isArrayed(), sampler.isMultiSample(), + sampler.isImageClass() ? 2 : 1, TranslateImageFormat(type)); + if (sampler.isCombined()) { + // already has both image and sampler, make the combined type + spvType = builder.makeSampledImageType(spvType); + } + } + } + break; + case glslang::EbtStruct: + case glslang::EbtBlock: + { + // If we've seen this struct type, return it + const glslang::TTypeList* glslangMembers = type.getStruct(); + + // Try to share structs for different layouts, but not yet for other + // kinds of qualification (primarily not yet including interpolant qualification). + if (! HasNonLayoutQualifiers(type, qualifier)) + spvType = structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers]; + if (spvType != spv::NoResult) + break; + + // else, we haven't seen it... + if (type.getBasicType() == glslang::EbtBlock) + memberRemapper[glslangTypeToIdMap[glslangMembers]].resize(glslangMembers->size()); + spvType = convertGlslangStructToSpvType(type, glslangMembers, explicitLayout, qualifier); + } + break; + case glslang::EbtString: + // no type used for OpString + return 0; + default: + assert(0); + break; + } + + if (type.isMatrix()) + spvType = builder.makeMatrixType(spvType, type.getMatrixCols(), type.getMatrixRows()); + else { + // If this variable has a vector element count greater than 1, create a SPIR-V vector + if (type.getVectorSize() > 1) + spvType = builder.makeVectorType(spvType, type.getVectorSize()); + } + + if (type.isCoopMat()) { + builder.addCapability(spv::CapabilityCooperativeMatrixNV); + builder.addExtension(spv::E_SPV_NV_cooperative_matrix); + if (type.getBasicType() == glslang::EbtFloat16) + builder.addCapability(spv::CapabilityFloat16); + if (type.getBasicType() == glslang::EbtUint8 || + type.getBasicType() == glslang::EbtInt8) { + builder.addCapability(spv::CapabilityInt8); + } + + spv::Id scope = makeArraySizeId(*type.getTypeParameters(), 1); + spv::Id rows = makeArraySizeId(*type.getTypeParameters(), 2); + spv::Id cols = makeArraySizeId(*type.getTypeParameters(), 3); + + spvType = builder.makeCooperativeMatrixType(spvType, scope, rows, cols); + } + + if (type.isArray()) { + int stride = 0; // keep this 0 unless doing an explicit layout; 0 will mean no decoration, no stride + + // Do all but the outer dimension + if (type.getArraySizes()->getNumDims() > 1) { + // We need to decorate array strides for types needing explicit layout, except blocks. + if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) { + // Use a dummy glslang type for querying internal strides of + // arrays of arrays, but using just a one-dimensional array. + glslang::TType simpleArrayType(type, 0); // deference type of the array + while (simpleArrayType.getArraySizes()->getNumDims() > 1) + simpleArrayType.getArraySizes()->dereference(); + + // Will compute the higher-order strides here, rather than making a whole + // pile of types and doing repetitive recursion on their contents. + stride = getArrayStride(simpleArrayType, explicitLayout, qualifier.layoutMatrix); + } + + // make the arrays + for (int dim = type.getArraySizes()->getNumDims() - 1; dim > 0; --dim) { + spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), dim), stride); + if (stride > 0) + builder.addDecoration(spvType, spv::DecorationArrayStride, stride); + stride *= type.getArraySizes()->getDimSize(dim); + } + } else { + // single-dimensional array, and don't yet have stride + + // We need to decorate array strides for types needing explicit layout, except blocks. + if (explicitLayout != glslang::ElpNone && type.getBasicType() != glslang::EbtBlock) + stride = getArrayStride(type, explicitLayout, qualifier.layoutMatrix); + } + + // Do the outer dimension, which might not be known for a runtime-sized array. + // (Unsized arrays that survive through linking will be runtime-sized arrays) + if (type.isSizedArray()) + spvType = builder.makeArrayType(spvType, makeArraySizeId(*type.getArraySizes(), 0), stride); + else { +#ifndef GLSLANG_WEB + if (!lastBufferBlockMember) { + builder.addIncorporatedExtension("SPV_EXT_descriptor_indexing", spv::Spv_1_5); + builder.addCapability(spv::CapabilityRuntimeDescriptorArrayEXT); + } +#endif + spvType = builder.makeRuntimeArray(spvType); + } + if (stride > 0) + builder.addDecoration(spvType, spv::DecorationArrayStride, stride); + } + + return spvType; +} + +// TODO: this functionality should exist at a higher level, in creating the AST +// +// Identify interface members that don't have their required extension turned on. +// +bool TGlslangToSpvTraverser::filterMember(const glslang::TType& member) +{ +#ifndef GLSLANG_WEB + auto& extensions = glslangIntermediate->getRequestedExtensions(); + + if (member.getFieldName() == "gl_SecondaryViewportMaskNV" && + extensions.find("GL_NV_stereo_view_rendering") == extensions.end()) + return true; + if (member.getFieldName() == "gl_SecondaryPositionNV" && + extensions.find("GL_NV_stereo_view_rendering") == extensions.end()) + return true; + + if (glslangIntermediate->getStage() != EShLangMeshNV) { + if (member.getFieldName() == "gl_ViewportMask" && + extensions.find("GL_NV_viewport_array2") == extensions.end()) + return true; + if (member.getFieldName() == "gl_PositionPerViewNV" && + extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end()) + return true; + if (member.getFieldName() == "gl_ViewportMaskPerViewNV" && + extensions.find("GL_NVX_multiview_per_view_attributes") == extensions.end()) + return true; + } +#endif + + return false; +}; + +// Do full recursive conversion of a glslang structure (or block) type to a SPIR-V Id. +// explicitLayout can be kept the same throughout the hierarchical recursive walk. +// Mutually recursive with convertGlslangToSpvType(). +spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TType& type, + const glslang::TTypeList* glslangMembers, + glslang::TLayoutPacking explicitLayout, + const glslang::TQualifier& qualifier) +{ + // Create a vector of struct types for SPIR-V to consume + std::vector spvMembers; + int memberDelta = 0; // how much the member's index changes from glslang to SPIR-V, normally 0, + // except sometimes for blocks + std::vector > deferredForwardPointers; + for (int i = 0; i < (int)glslangMembers->size(); i++) { + glslang::TType& glslangMember = *(*glslangMembers)[i].type; + if (glslangMember.hiddenMember()) { + ++memberDelta; + if (type.getBasicType() == glslang::EbtBlock) + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = -1; + } else { + if (type.getBasicType() == glslang::EbtBlock) { + if (filterMember(glslangMember)) { + memberDelta++; + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = -1; + continue; + } + memberRemapper[glslangTypeToIdMap[glslangMembers]][i] = i - memberDelta; + } + // modify just this child's view of the qualifier + glslang::TQualifier memberQualifier = glslangMember.getQualifier(); + InheritQualifiers(memberQualifier, qualifier); + + // manually inherit location + if (! memberQualifier.hasLocation() && qualifier.hasLocation()) + memberQualifier.layoutLocation = qualifier.layoutLocation; + + // recurse + bool lastBufferBlockMember = qualifier.storage == glslang::EvqBuffer && + i == (int)glslangMembers->size() - 1; + + // Make forward pointers for any pointer members, and create a list of members to + // convert to spirv types after creating the struct. + if (glslangMember.isReference()) { + if (forwardPointers.find(glslangMember.getReferentType()) == forwardPointers.end()) { + deferredForwardPointers.push_back(std::make_pair(&glslangMember, memberQualifier)); + } + spvMembers.push_back( + convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, + true)); + } else { + spvMembers.push_back( + convertGlslangToSpvType(glslangMember, explicitLayout, memberQualifier, lastBufferBlockMember, + false)); + } + } + } + + // Make the SPIR-V type + spv::Id spvType = builder.makeStructType(spvMembers, type.getTypeName().c_str()); + if (! HasNonLayoutQualifiers(type, qualifier)) + structMap[explicitLayout][qualifier.layoutMatrix][glslangMembers] = spvType; + + // Decorate it + decorateStructType(type, glslangMembers, explicitLayout, qualifier, spvType); + + for (int i = 0; i < (int)deferredForwardPointers.size(); ++i) { + auto it = deferredForwardPointers[i]; + convertGlslangToSpvType(*it.first, explicitLayout, it.second, false); + } + + return spvType; +} + +void TGlslangToSpvTraverser::decorateStructType(const glslang::TType& type, + const glslang::TTypeList* glslangMembers, + glslang::TLayoutPacking explicitLayout, + const glslang::TQualifier& qualifier, + spv::Id spvType) +{ + // Name and decorate the non-hidden members + int offset = -1; + int locationOffset = 0; // for use within the members of this struct + for (int i = 0; i < (int)glslangMembers->size(); i++) { + glslang::TType& glslangMember = *(*glslangMembers)[i].type; + int member = i; + if (type.getBasicType() == glslang::EbtBlock) { + member = memberRemapper[glslangTypeToIdMap[glslangMembers]][i]; + if (filterMember(glslangMember)) + continue; + } + + // modify just this child's view of the qualifier + glslang::TQualifier memberQualifier = glslangMember.getQualifier(); + InheritQualifiers(memberQualifier, qualifier); + + // using -1 above to indicate a hidden member + if (member < 0) + continue; + + builder.addMemberName(spvType, member, glslangMember.getFieldName().c_str()); + builder.addMemberDecoration(spvType, member, + TranslateLayoutDecoration(glslangMember, memberQualifier.layoutMatrix)); + builder.addMemberDecoration(spvType, member, TranslatePrecisionDecoration(glslangMember)); + // Add interpolation and auxiliary storage decorations only to + // top-level members of Input and Output storage classes + if (type.getQualifier().storage == glslang::EvqVaryingIn || + type.getQualifier().storage == glslang::EvqVaryingOut) { + if (type.getBasicType() == glslang::EbtBlock || + glslangIntermediate->getSource() == glslang::EShSourceHlsl) { + builder.addMemberDecoration(spvType, member, TranslateInterpolationDecoration(memberQualifier)); + builder.addMemberDecoration(spvType, member, TranslateAuxiliaryStorageDecoration(memberQualifier)); +#ifndef GLSLANG_WEB + addMeshNVDecoration(spvType, member, memberQualifier); +#endif + } + } + builder.addMemberDecoration(spvType, member, TranslateInvariantDecoration(memberQualifier)); + +#ifndef GLSLANG_WEB + if (type.getBasicType() == glslang::EbtBlock && + qualifier.storage == glslang::EvqBuffer) { + // Add memory decorations only to top-level members of shader storage block + std::vector memory; + TranslateMemoryDecoration(memberQualifier, memory, glslangIntermediate->usingVulkanMemoryModel()); + for (unsigned int i = 0; i < memory.size(); ++i) + builder.addMemberDecoration(spvType, member, memory[i]); + } + +#endif + + // Location assignment was already completed correctly by the front end, + // just track whether a member needs to be decorated. + // Ignore member locations if the container is an array, as that's + // ill-specified and decisions have been made to not allow this. + if (! type.isArray() && memberQualifier.hasLocation()) + builder.addMemberDecoration(spvType, member, spv::DecorationLocation, memberQualifier.layoutLocation); + + if (qualifier.hasLocation()) // track for upcoming inheritance + locationOffset += glslangIntermediate->computeTypeLocationSize( + glslangMember, glslangIntermediate->getStage()); + + // component, XFB, others + if (glslangMember.getQualifier().hasComponent()) + builder.addMemberDecoration(spvType, member, spv::DecorationComponent, + glslangMember.getQualifier().layoutComponent); + if (glslangMember.getQualifier().hasXfbOffset()) + builder.addMemberDecoration(spvType, member, spv::DecorationOffset, + glslangMember.getQualifier().layoutXfbOffset); + else if (explicitLayout != glslang::ElpNone) { + // figure out what to do with offset, which is accumulating + int nextOffset; + updateMemberOffset(type, glslangMember, offset, nextOffset, explicitLayout, memberQualifier.layoutMatrix); + if (offset >= 0) + builder.addMemberDecoration(spvType, member, spv::DecorationOffset, offset); + offset = nextOffset; + } + + if (glslangMember.isMatrix() && explicitLayout != glslang::ElpNone) + builder.addMemberDecoration(spvType, member, spv::DecorationMatrixStride, + getMatrixStride(glslangMember, explicitLayout, memberQualifier.layoutMatrix)); + + // built-in variable decorations + spv::BuiltIn builtIn = TranslateBuiltInDecoration(glslangMember.getQualifier().builtIn, true); + if (builtIn != spv::BuiltInMax) + builder.addMemberDecoration(spvType, member, spv::DecorationBuiltIn, (int)builtIn); + +#ifndef GLSLANG_WEB + // nonuniform + builder.addMemberDecoration(spvType, member, TranslateNonUniformDecoration(glslangMember.getQualifier())); + + if (glslangIntermediate->getHlslFunctionality1() && memberQualifier.semanticName != nullptr) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE, + memberQualifier.semanticName); + } + + if (builtIn == spv::BuiltInLayer) { + // SPV_NV_viewport_array2 extension + if (glslangMember.getQualifier().layoutViewportRelative){ + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationViewportRelativeNV); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + builder.addExtension(spv::E_SPV_NV_viewport_array2); + } + if (glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset != -2048){ + builder.addMemberDecoration(spvType, member, + (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV, + glslangMember.getQualifier().layoutSecondaryViewportRelativeOffset); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + } + } + if (glslangMember.getQualifier().layoutPassthrough) { + builder.addMemberDecoration(spvType, member, (spv::Decoration)spv::DecorationPassthroughNV); + builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV); + builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough); + } +#endif + } + + // Decorate the structure + builder.addDecoration(spvType, TranslateLayoutDecoration(type, qualifier.layoutMatrix)); + builder.addDecoration(spvType, TranslateBlockDecoration(type, glslangIntermediate->usingStorageBuffer())); +} + +// Turn the expression forming the array size into an id. +// This is not quite trivial, because of specialization constants. +// Sometimes, a raw constant is turned into an Id, and sometimes +// a specialization constant expression is. +spv::Id TGlslangToSpvTraverser::makeArraySizeId(const glslang::TArraySizes& arraySizes, int dim) +{ + // First, see if this is sized with a node, meaning a specialization constant: + glslang::TIntermTyped* specNode = arraySizes.getDimNode(dim); + if (specNode != nullptr) { + builder.clearAccessChain(); + specNode->traverse(this); + return accessChainLoad(specNode->getAsTyped()->getType()); + } + + // Otherwise, need a compile-time (front end) size, get it: + int size = arraySizes.getDimSize(dim); + assert(size > 0); + return builder.makeUintConstant(size); +} + +// Wrap the builder's accessChainLoad to: +// - localize handling of RelaxedPrecision +// - use the SPIR-V inferred type instead of another conversion of the glslang type +// (avoids unnecessary work and possible type punning for structures) +// - do conversion of concrete to abstract type +spv::Id TGlslangToSpvTraverser::accessChainLoad(const glslang::TType& type) +{ + spv::Id nominalTypeId = builder.accessChainGetInferredType(); + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + coherentFlags |= TranslateCoherent(type); + + unsigned int alignment = builder.getAccessChain().alignment; + alignment |= type.getBufferReferenceAlignment(); + + spv::Id loadedId = builder.accessChainLoad(TranslatePrecisionDecoration(type), + TranslateNonUniformDecoration(type.getQualifier()), + nominalTypeId, + spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & ~spv::MemoryAccessMakePointerAvailableKHRMask), + TranslateMemoryScope(coherentFlags), + alignment); + + // Need to convert to abstract types when necessary + if (type.getBasicType() == glslang::EbtBool) { + if (builder.isScalarType(nominalTypeId)) { + // Conversion for bool + spv::Id boolType = builder.makeBoolType(); + if (nominalTypeId != boolType) + loadedId = builder.createBinOp(spv::OpINotEqual, boolType, loadedId, builder.makeUintConstant(0)); + } else if (builder.isVectorType(nominalTypeId)) { + // Conversion for bvec + int vecSize = builder.getNumTypeComponents(nominalTypeId); + spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize); + if (nominalTypeId != bvecType) + loadedId = builder.createBinOp(spv::OpINotEqual, bvecType, loadedId, + makeSmearedConstant(builder.makeUintConstant(0), vecSize)); + } + } + + return loadedId; +} + +// Wrap the builder's accessChainStore to: +// - do conversion of concrete to abstract type +// +// Implicitly uses the existing builder.accessChain as the storage target. +void TGlslangToSpvTraverser::accessChainStore(const glslang::TType& type, spv::Id rvalue) +{ + // Need to convert to abstract types when necessary + if (type.getBasicType() == glslang::EbtBool) { + spv::Id nominalTypeId = builder.accessChainGetInferredType(); + + if (builder.isScalarType(nominalTypeId)) { + // Conversion for bool + spv::Id boolType = builder.makeBoolType(); + if (nominalTypeId != boolType) { + // keep these outside arguments, for determinant order-of-evaluation + spv::Id one = builder.makeUintConstant(1); + spv::Id zero = builder.makeUintConstant(0); + rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero); + } else if (builder.getTypeId(rvalue) != boolType) + rvalue = builder.createBinOp(spv::OpINotEqual, boolType, rvalue, builder.makeUintConstant(0)); + } else if (builder.isVectorType(nominalTypeId)) { + // Conversion for bvec + int vecSize = builder.getNumTypeComponents(nominalTypeId); + spv::Id bvecType = builder.makeVectorType(builder.makeBoolType(), vecSize); + if (nominalTypeId != bvecType) { + // keep these outside arguments, for determinant order-of-evaluation + spv::Id one = makeSmearedConstant(builder.makeUintConstant(1), vecSize); + spv::Id zero = makeSmearedConstant(builder.makeUintConstant(0), vecSize); + rvalue = builder.createTriOp(spv::OpSelect, nominalTypeId, rvalue, one, zero); + } else if (builder.getTypeId(rvalue) != bvecType) + rvalue = builder.createBinOp(spv::OpINotEqual, bvecType, rvalue, + makeSmearedConstant(builder.makeUintConstant(0), vecSize)); + } + } + + spv::Builder::AccessChain::CoherentFlags coherentFlags = builder.getAccessChain().coherentFlags; + coherentFlags |= TranslateCoherent(type); + + unsigned int alignment = builder.getAccessChain().alignment; + alignment |= type.getBufferReferenceAlignment(); + + builder.accessChainStore(rvalue, + spv::MemoryAccessMask(TranslateMemoryAccess(coherentFlags) & + ~spv::MemoryAccessMakePointerVisibleKHRMask), + TranslateMemoryScope(coherentFlags), alignment); +} + +// For storing when types match at the glslang level, but not might match at the +// SPIR-V level. +// +// This especially happens when a single glslang type expands to multiple +// SPIR-V types, like a struct that is used in a member-undecorated way as well +// as in a member-decorated way. +// +// NOTE: This function can handle any store request; if it's not special it +// simplifies to a simple OpStore. +// +// Implicitly uses the existing builder.accessChain as the storage target. +void TGlslangToSpvTraverser::multiTypeStore(const glslang::TType& type, spv::Id rValue) +{ + // we only do the complex path here if it's an aggregate + if (! type.isStruct() && ! type.isArray()) { + accessChainStore(type, rValue); + return; + } + + // and, it has to be a case of type aliasing + spv::Id rType = builder.getTypeId(rValue); + spv::Id lValue = builder.accessChainGetLValue(); + spv::Id lType = builder.getContainedTypeId(builder.getTypeId(lValue)); + if (lType == rType) { + accessChainStore(type, rValue); + return; + } + + // Recursively (as needed) copy an aggregate type to a different aggregate type, + // where the two types were the same type in GLSL. This requires member + // by member copy, recursively. + + // SPIR-V 1.4 added an instruction to do help do this. + if (glslangIntermediate->getSpv().spv >= glslang::EShTargetSpv_1_4) { + // However, bool in uniform space is changed to int, so + // OpCopyLogical does not work for that. + // TODO: It would be more robust to do a full recursive verification of the types satisfying SPIR-V rules. + bool rBool = builder.containsType(builder.getTypeId(rValue), spv::OpTypeBool, 0); + bool lBool = builder.containsType(lType, spv::OpTypeBool, 0); + if (lBool == rBool) { + spv::Id logicalCopy = builder.createUnaryOp(spv::OpCopyLogical, lType, rValue); + accessChainStore(type, logicalCopy); + return; + } + } + + // If an array, copy element by element. + if (type.isArray()) { + glslang::TType glslangElementType(type, 0); + spv::Id elementRType = builder.getContainedTypeId(rType); + for (int index = 0; index < type.getOuterArraySize(); ++index) { + // get the source member + spv::Id elementRValue = builder.createCompositeExtract(rValue, elementRType, index); + + // set up the target storage + builder.clearAccessChain(); + builder.setAccessChainLValue(lValue); + builder.accessChainPush(builder.makeIntConstant(index), TranslateCoherent(type), + type.getBufferReferenceAlignment()); + + // store the member + multiTypeStore(glslangElementType, elementRValue); + } + } else { + assert(type.isStruct()); + + // loop over structure members + const glslang::TTypeList& members = *type.getStruct(); + for (int m = 0; m < (int)members.size(); ++m) { + const glslang::TType& glslangMemberType = *members[m].type; + + // get the source member + spv::Id memberRType = builder.getContainedTypeId(rType, m); + spv::Id memberRValue = builder.createCompositeExtract(rValue, memberRType, m); + + // set up the target storage + builder.clearAccessChain(); + builder.setAccessChainLValue(lValue); + builder.accessChainPush(builder.makeIntConstant(m), TranslateCoherent(type), + type.getBufferReferenceAlignment()); + + // store the member + multiTypeStore(glslangMemberType, memberRValue); + } + } +} + +// Decide whether or not this type should be +// decorated with offsets and strides, and if so +// whether std140 or std430 rules should be applied. +glslang::TLayoutPacking TGlslangToSpvTraverser::getExplicitLayout(const glslang::TType& type) const +{ + // has to be a block + if (type.getBasicType() != glslang::EbtBlock) + return glslang::ElpNone; + + // has to be a uniform or buffer block or task in/out blocks + if (type.getQualifier().storage != glslang::EvqUniform && + type.getQualifier().storage != glslang::EvqBuffer && + !type.getQualifier().isTaskMemory()) + return glslang::ElpNone; + + // return the layout to use + switch (type.getQualifier().layoutPacking) { + case glslang::ElpStd140: + case glslang::ElpStd430: + case glslang::ElpScalar: + return type.getQualifier().layoutPacking; + default: + return glslang::ElpNone; + } +} + +// Given an array type, returns the integer stride required for that array +int TGlslangToSpvTraverser::getArrayStride(const glslang::TType& arrayType, glslang::TLayoutPacking explicitLayout, + glslang::TLayoutMatrix matrixLayout) +{ + int size; + int stride; + glslangIntermediate->getMemberAlignment(arrayType, size, stride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + return stride; +} + +// Given a matrix type, or array (of array) of matrixes type, returns the integer stride required for that matrix +// when used as a member of an interface block +int TGlslangToSpvTraverser::getMatrixStride(const glslang::TType& matrixType, glslang::TLayoutPacking explicitLayout, + glslang::TLayoutMatrix matrixLayout) +{ + glslang::TType elementType; + elementType.shallowCopy(matrixType); + elementType.clearArraySizes(); + + int size; + int stride; + glslangIntermediate->getMemberAlignment(elementType, size, stride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + return stride; +} + +// Given a member type of a struct, realign the current offset for it, and compute +// the next (not yet aligned) offset for the next member, which will get aligned +// on the next call. +// 'currentOffset' should be passed in already initialized, ready to modify, and reflecting +// the migration of data from nextOffset -> currentOffset. It should be -1 on the first call. +// -1 means a non-forced member offset (no decoration needed). +void TGlslangToSpvTraverser::updateMemberOffset(const glslang::TType& structType, const glslang::TType& memberType, + int& currentOffset, int& nextOffset, glslang::TLayoutPacking explicitLayout, glslang::TLayoutMatrix matrixLayout) +{ + // this will get a positive value when deemed necessary + nextOffset = -1; + + // override anything in currentOffset with user-set offset + if (memberType.getQualifier().hasOffset()) + currentOffset = memberType.getQualifier().layoutOffset; + + // It could be that current linker usage in glslang updated all the layoutOffset, + // in which case the following code does not matter. But, that's not quite right + // once cross-compilation unit GLSL validation is done, as the original user + // settings are needed in layoutOffset, and then the following will come into play. + + if (explicitLayout == glslang::ElpNone) { + if (! memberType.getQualifier().hasOffset()) + currentOffset = -1; + + return; + } + + // Getting this far means we need explicit offsets + if (currentOffset < 0) + currentOffset = 0; + + // Now, currentOffset is valid (either 0, or from a previous nextOffset), + // but possibly not yet correctly aligned. + + int memberSize; + int dummyStride; + int memberAlignment = glslangIntermediate->getMemberAlignment(memberType, memberSize, dummyStride, explicitLayout, + matrixLayout == glslang::ElmRowMajor); + + // Adjust alignment for HLSL rules + // TODO: make this consistent in early phases of code: + // adjusting this late means inconsistencies with earlier code, which for reflection is an issue + // Until reflection is brought in sync with these adjustments, don't apply to $Global, + // which is the most likely to rely on reflection, and least likely to rely implicit layouts + if (glslangIntermediate->usingHlslOffsets() && + ! memberType.isArray() && memberType.isVector() && structType.getTypeName().compare("$Global") != 0) { + int dummySize; + int componentAlignment = glslangIntermediate->getBaseAlignmentScalar(memberType, dummySize); + if (componentAlignment <= 4) + memberAlignment = componentAlignment; + } + + // Bump up to member alignment + glslang::RoundToPow2(currentOffset, memberAlignment); + + // Bump up to vec4 if there is a bad straddle + if (explicitLayout != glslang::ElpScalar && glslangIntermediate->improperStraddle(memberType, memberSize, + currentOffset)) + glslang::RoundToPow2(currentOffset, 16); + + nextOffset = currentOffset + memberSize; +} + +void TGlslangToSpvTraverser::declareUseOfStructMember(const glslang::TTypeList& members, int glslangMember) +{ + const glslang::TBuiltInVariable glslangBuiltIn = members[glslangMember].type->getQualifier().builtIn; + switch (glslangBuiltIn) + { + case glslang::EbvPointSize: +#ifndef GLSLANG_WEB + case glslang::EbvClipDistance: + case glslang::EbvCullDistance: + case glslang::EbvViewportMaskNV: + case glslang::EbvSecondaryPositionNV: + case glslang::EbvSecondaryViewportMaskNV: + case glslang::EbvPositionPerViewNV: + case glslang::EbvViewportMaskPerViewNV: + case glslang::EbvTaskCountNV: + case glslang::EbvPrimitiveCountNV: + case glslang::EbvPrimitiveIndicesNV: + case glslang::EbvClipDistancePerViewNV: + case glslang::EbvCullDistancePerViewNV: + case glslang::EbvLayerPerViewNV: + case glslang::EbvMeshViewCountNV: + case glslang::EbvMeshViewIndicesNV: +#endif + // Generate the associated capability. Delegate to TranslateBuiltInDecoration. + // Alternately, we could just call this for any glslang built-in, since the + // capability already guards against duplicates. + TranslateBuiltInDecoration(glslangBuiltIn, false); + break; + default: + // Capabilities were already generated when the struct was declared. + break; + } +} + +bool TGlslangToSpvTraverser::isShaderEntryPoint(const glslang::TIntermAggregate* node) +{ + return node->getName().compare(glslangIntermediate->getEntryPointMangledName().c_str()) == 0; +} + +// Does parameter need a place to keep writes, separate from the original? +// Assumes called after originalParam(), which filters out block/buffer/opaque-based +// qualifiers such that we should have only in/out/inout/constreadonly here. +bool TGlslangToSpvTraverser::writableParam(glslang::TStorageQualifier qualifier) const +{ + assert(qualifier == glslang::EvqIn || + qualifier == glslang::EvqOut || + qualifier == glslang::EvqInOut || + qualifier == glslang::EvqUniform || + qualifier == glslang::EvqConstReadOnly); + return qualifier != glslang::EvqConstReadOnly && + qualifier != glslang::EvqUniform; +} + +// Is parameter pass-by-original? +bool TGlslangToSpvTraverser::originalParam(glslang::TStorageQualifier qualifier, const glslang::TType& paramType, + bool implicitThisParam) +{ + if (implicitThisParam) // implicit this + return true; + if (glslangIntermediate->getSource() == glslang::EShSourceHlsl) + return paramType.getBasicType() == glslang::EbtBlock; + return paramType.containsOpaque() || // sampler, etc. + (paramType.getBasicType() == glslang::EbtBlock && qualifier == glslang::EvqBuffer); // SSBO +} + +// Make all the functions, skeletally, without actually visiting their bodies. +void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslFunctions) +{ + const auto getParamDecorations = [&](std::vector& decorations, const glslang::TType& type, + bool useVulkanMemoryModel) { + spv::Decoration paramPrecision = TranslatePrecisionDecoration(type); + if (paramPrecision != spv::NoPrecision) + decorations.push_back(paramPrecision); + TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel); + if (type.isReference()) { + // Original and non-writable params pass the pointer directly and + // use restrict/aliased, others are stored to a pointer in Function + // memory and use RestrictPointer/AliasedPointer. + if (originalParam(type.getQualifier().storage, type, false) || + !writableParam(type.getQualifier().storage)) { + decorations.push_back(type.getQualifier().isRestrict() ? spv::DecorationRestrict : + spv::DecorationAliased); + } else { + decorations.push_back(type.getQualifier().isRestrict() ? spv::DecorationRestrictPointerEXT : + spv::DecorationAliasedPointerEXT); + } + } + }; + + for (int f = 0; f < (int)glslFunctions.size(); ++f) { + glslang::TIntermAggregate* glslFunction = glslFunctions[f]->getAsAggregate(); + if (! glslFunction || glslFunction->getOp() != glslang::EOpFunction || isShaderEntryPoint(glslFunction)) + continue; + + // We're on a user function. Set up the basic interface for the function now, + // so that it's available to call. Translating the body will happen later. + // + // Typically (except for a "const in" parameter), an address will be passed to the + // function. What it is an address of varies: + // + // - "in" parameters not marked as "const" can be written to without modifying the calling + // argument so that write needs to be to a copy, hence the address of a copy works. + // + // - "const in" parameters can just be the r-value, as no writes need occur. + // + // - "out" and "inout" arguments can't be done as pointers to the calling argument, because + // GLSL has copy-in/copy-out semantics. They can be handled though with a pointer to a copy. + + std::vector paramTypes; + std::vector> paramDecorations; // list of decorations per parameter + glslang::TIntermSequence& parameters = glslFunction->getSequence()[0]->getAsAggregate()->getSequence(); + +#ifdef ENABLE_HLSL + bool implicitThis = (int)parameters.size() > 0 && parameters[0]->getAsSymbolNode()->getName() == + glslangIntermediate->implicitThisName; +#else + bool implicitThis = false; +#endif + + paramDecorations.resize(parameters.size()); + for (int p = 0; p < (int)parameters.size(); ++p) { + const glslang::TType& paramType = parameters[p]->getAsTyped()->getType(); + spv::Id typeId = convertGlslangToSpvType(paramType); + if (originalParam(paramType.getQualifier().storage, paramType, implicitThis && p == 0)) + typeId = builder.makePointer(TranslateStorageClass(paramType), typeId); + else if (writableParam(paramType.getQualifier().storage)) + typeId = builder.makePointer(spv::StorageClassFunction, typeId); + else + rValueParameters.insert(parameters[p]->getAsSymbolNode()->getId()); + getParamDecorations(paramDecorations[p], paramType, glslangIntermediate->usingVulkanMemoryModel()); + paramTypes.push_back(typeId); + } + + spv::Block* functionBlock; + spv::Function *function = builder.makeFunctionEntry(TranslatePrecisionDecoration(glslFunction->getType()), + convertGlslangToSpvType(glslFunction->getType()), + glslFunction->getName().c_str(), paramTypes, + paramDecorations, &functionBlock); + if (implicitThis) + function->setImplicitThis(); + + // Track function to emit/call later + functionMap[glslFunction->getName().c_str()] = function; + + // Set the parameter id's + for (int p = 0; p < (int)parameters.size(); ++p) { + symbolValues[parameters[p]->getAsSymbolNode()->getId()] = function->getParamId(p); + // give a name too + builder.addName(function->getParamId(p), parameters[p]->getAsSymbolNode()->getName().c_str()); + + const glslang::TType& paramType = parameters[p]->getAsTyped()->getType(); + if (paramType.contains8BitInt()) + builder.addCapability(spv::CapabilityInt8); + if (paramType.contains16BitInt()) + builder.addCapability(spv::CapabilityInt16); + if (paramType.contains16BitFloat()) + builder.addCapability(spv::CapabilityFloat16); + } + } +} + +// Process all the initializers, while skipping the functions and link objects +void TGlslangToSpvTraverser::makeGlobalInitializers(const glslang::TIntermSequence& initializers) +{ + builder.setBuildPoint(shaderEntry->getLastBlock()); + for (int i = 0; i < (int)initializers.size(); ++i) { + glslang::TIntermAggregate* initializer = initializers[i]->getAsAggregate(); + if (initializer && initializer->getOp() != glslang::EOpFunction && initializer->getOp() != + glslang::EOpLinkerObjects) { + + // We're on a top-level node that's not a function. Treat as an initializer, whose + // code goes into the beginning of the entry point. + initializer->traverse(this); + } + } +} + +// Process all the functions, while skipping initializers. +void TGlslangToSpvTraverser::visitFunctions(const glslang::TIntermSequence& glslFunctions) +{ + for (int f = 0; f < (int)glslFunctions.size(); ++f) { + glslang::TIntermAggregate* node = glslFunctions[f]->getAsAggregate(); + if (node && (node->getOp() == glslang::EOpFunction || node->getOp() == glslang::EOpLinkerObjects)) + node->traverse(this); + } +} + +void TGlslangToSpvTraverser::handleFunctionEntry(const glslang::TIntermAggregate* node) +{ + // SPIR-V functions should already be in the functionMap from the prepass + // that called makeFunctions(). + currentFunction = functionMap[node->getName().c_str()]; + spv::Block* functionBlock = currentFunction->getEntryBlock(); + builder.setBuildPoint(functionBlock); +} + +void TGlslangToSpvTraverser::translateArguments(const glslang::TIntermAggregate& node, std::vector& arguments, + spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + const glslang::TIntermSequence& glslangArguments = node.getSequence(); + + glslang::TSampler sampler = {}; + bool cubeCompare = false; +#ifndef GLSLANG_WEB + bool f16ShadowCompare = false; +#endif + if (node.isTexture() || node.isImage()) { + sampler = glslangArguments[0]->getAsTyped()->getType().getSampler(); + cubeCompare = sampler.dim == glslang::EsdCube && sampler.arrayed && sampler.shadow; +#ifndef GLSLANG_WEB + f16ShadowCompare = sampler.shadow && + glslangArguments[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16; +#endif + } + + for (int i = 0; i < (int)glslangArguments.size(); ++i) { + builder.clearAccessChain(); + glslangArguments[i]->traverse(this); + +#ifndef GLSLANG_WEB + // Special case l-value operands + bool lvalue = false; + switch (node.getOp()) { + case glslang::EOpImageAtomicAdd: + case glslang::EOpImageAtomicMin: + case glslang::EOpImageAtomicMax: + case glslang::EOpImageAtomicAnd: + case glslang::EOpImageAtomicOr: + case glslang::EOpImageAtomicXor: + case glslang::EOpImageAtomicExchange: + case glslang::EOpImageAtomicCompSwap: + case glslang::EOpImageAtomicLoad: + case glslang::EOpImageAtomicStore: + if (i == 0) + lvalue = true; + break; + case glslang::EOpSparseImageLoad: + if ((sampler.ms && i == 3) || (! sampler.ms && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTexture: + if (((cubeCompare || f16ShadowCompare) && i == 3) || (! (cubeCompare || f16ShadowCompare) && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureClamp: + if (((cubeCompare || f16ShadowCompare) && i == 4) || (! (cubeCompare || f16ShadowCompare) && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureLod: + case glslang::EOpSparseTextureOffset: + if ((f16ShadowCompare && i == 4) || (! f16ShadowCompare && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureFetch: + if ((sampler.dim != glslang::EsdRect && i == 3) || (sampler.dim == glslang::EsdRect && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureFetchOffset: + if ((sampler.dim != glslang::EsdRect && i == 4) || (sampler.dim == glslang::EsdRect && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureLodOffset: + case glslang::EOpSparseTextureGrad: + case glslang::EOpSparseTextureOffsetClamp: + if ((f16ShadowCompare && i == 5) || (! f16ShadowCompare && i == 4)) + lvalue = true; + break; + case glslang::EOpSparseTextureGradOffset: + case glslang::EOpSparseTextureGradClamp: + if ((f16ShadowCompare && i == 6) || (! f16ShadowCompare && i == 5)) + lvalue = true; + break; + case glslang::EOpSparseTextureGradOffsetClamp: + if ((f16ShadowCompare && i == 7) || (! f16ShadowCompare && i == 6)) + lvalue = true; + break; + case glslang::EOpSparseTextureGather: + if ((sampler.shadow && i == 3) || (! sampler.shadow && i == 2)) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherOffset: + case glslang::EOpSparseTextureGatherOffsets: + if ((sampler.shadow && i == 4) || (! sampler.shadow && i == 3)) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherLod: + if (i == 3) + lvalue = true; + break; + case glslang::EOpSparseTextureGatherLodOffset: + case glslang::EOpSparseTextureGatherLodOffsets: + if (i == 4) + lvalue = true; + break; + case glslang::EOpSparseImageLoadLod: + if (i == 3) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintNV: + if (i == 4) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintClampNV: + case glslang::EOpImageSampleFootprintLodNV: + if (i == 5) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintGradNV: + if (i == 6) + lvalue = true; + break; + case glslang::EOpImageSampleFootprintGradClampNV: + if (i == 7) + lvalue = true; + break; + default: + break; + } + + if (lvalue) { + arguments.push_back(builder.accessChainGetLValue()); + lvalueCoherentFlags = builder.getAccessChain().coherentFlags; + lvalueCoherentFlags |= TranslateCoherent(glslangArguments[i]->getAsTyped()->getType()); + } else +#endif + arguments.push_back(accessChainLoad(glslangArguments[i]->getAsTyped()->getType())); + } +} + +void TGlslangToSpvTraverser::translateArguments(glslang::TIntermUnary& node, std::vector& arguments) +{ + builder.clearAccessChain(); + node.getOperand()->traverse(this); + arguments.push_back(accessChainLoad(node.getOperand()->getType())); +} + +spv::Id TGlslangToSpvTraverser::createImageTextureFunctionCall(glslang::TIntermOperator* node) +{ + if (! node->isImage() && ! node->isTexture()) + return spv::NoResult; + + builder.setLine(node->getLoc().line, node->getLoc().getFilename()); + + // Process a GLSL texturing op (will be SPV image) + + const glslang::TType &imageType = node->getAsAggregate() + ? node->getAsAggregate()->getSequence()[0]->getAsTyped()->getType() + : node->getAsUnaryNode()->getOperand()->getAsTyped()->getType(); + const glslang::TSampler sampler = imageType.getSampler(); +#ifdef GLSLANG_WEB + const bool f16ShadowCompare = false; +#else + bool f16ShadowCompare = (sampler.shadow && node->getAsAggregate()) + ? node->getAsAggregate()->getSequence()[1]->getAsTyped()->getType().getBasicType() == glslang::EbtFloat16 + : false; +#endif + + const auto signExtensionMask = [&]() { + if (builder.getSpvVersion() >= spv::Spv_1_4) { + if (sampler.type == glslang::EbtUint) + return spv::ImageOperandsZeroExtendMask; + else if (sampler.type == glslang::EbtInt) + return spv::ImageOperandsSignExtendMask; + } + return spv::ImageOperandsMaskNone; + }; + + spv::Builder::AccessChain::CoherentFlags lvalueCoherentFlags; + + std::vector arguments; + if (node->getAsAggregate()) + translateArguments(*node->getAsAggregate(), arguments, lvalueCoherentFlags); + else + translateArguments(*node->getAsUnaryNode(), arguments); + spv::Decoration precision = TranslatePrecisionDecoration(node->getType()); + + spv::Builder::TextureParameters params = { }; + params.sampler = arguments[0]; + + glslang::TCrackedTextureOp cracked; + node->crackTexture(sampler, cracked); + + const bool isUnsignedResult = node->getType().getBasicType() == glslang::EbtUint; + + // Check for queries + if (cracked.query) { + // OpImageQueryLod works on a sampled image, for other queries the image has to be extracted first + if (node->getOp() != glslang::EOpTextureQueryLod && builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + + switch (node->getOp()) { + case glslang::EOpImageQuerySize: + case glslang::EOpTextureQuerySize: + if (arguments.size() > 1) { + params.lod = arguments[1]; + return builder.createTextureQueryCall(spv::OpImageQuerySizeLod, params, isUnsignedResult); + } else + return builder.createTextureQueryCall(spv::OpImageQuerySize, params, isUnsignedResult); +#ifndef GLSLANG_WEB + case glslang::EOpImageQuerySamples: + case glslang::EOpTextureQuerySamples: + return builder.createTextureQueryCall(spv::OpImageQuerySamples, params, isUnsignedResult); + case glslang::EOpTextureQueryLod: + params.coords = arguments[1]; + return builder.createTextureQueryCall(spv::OpImageQueryLod, params, isUnsignedResult); + case glslang::EOpTextureQueryLevels: + return builder.createTextureQueryCall(spv::OpImageQueryLevels, params, isUnsignedResult); + case glslang::EOpSparseTexelsResident: + return builder.createUnaryOp(spv::OpImageSparseTexelsResident, builder.makeBoolType(), arguments[0]); +#endif + default: + assert(0); + break; + } + } + + int components = node->getType().getVectorSize(); + + if (node->getOp() == glslang::EOpTextureFetch) { + // These must produce 4 components, per SPIR-V spec. We'll add a conversion constructor if needed. + // This will only happen through the HLSL path for operator[], so we do not have to handle e.g. + // the EOpTexture/Proj/Lod/etc family. It would be harmless to do so, but would need more logic + // here around e.g. which ones return scalars or other types. + components = 4; + } + + glslang::TType returnType(node->getType().getBasicType(), glslang::EvqTemporary, components); + + auto resultType = [&returnType,this]{ return convertGlslangToSpvType(returnType); }; + + // Check for image functions other than queries + if (node->isImage()) { + std::vector operands; + auto opIt = arguments.begin(); + spv::IdImmediate image = { true, *(opIt++) }; + operands.push_back(image); + + // Handle subpass operations + // TODO: GLSL should change to have the "MS" only on the type rather than the + // built-in function. + if (cracked.subpass) { + // add on the (0,0) coordinate + spv::Id zero = builder.makeIntConstant(0); + std::vector comps; + comps.push_back(zero); + comps.push_back(zero); + spv::IdImmediate coord = { true, + builder.makeCompositeConstant(builder.makeVectorType(builder.makeIntType(32), 2), comps) }; + operands.push_back(coord); + spv::IdImmediate imageOperands = { false, spv::ImageOperandsMaskNone }; + imageOperands.word = imageOperands.word | signExtensionMask(); + if (sampler.isMultiSample()) { + imageOperands.word = imageOperands.word | spv::ImageOperandsSampleMask; + } + if (imageOperands.word != spv::ImageOperandsMaskNone) { + operands.push_back(imageOperands); + if (sampler.isMultiSample()) { + spv::IdImmediate imageOperand = { true, *(opIt++) }; + operands.push_back(imageOperand); + } + } + spv::Id result = builder.createOp(spv::OpImageRead, resultType(), operands); + builder.setPrecision(result, precision); + return result; + } + + spv::IdImmediate coord = { true, *(opIt++) }; + operands.push_back(coord); + if (node->getOp() == glslang::EOpImageLoad || node->getOp() == glslang::EOpImageLoadLod) { + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) { + spv::IdImmediate imageOperand = { true, + builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat); + + std::vector result(1, builder.createOp(spv::OpImageRead, resultType(), operands)); + builder.setPrecision(result[0], precision); + + // If needed, add a conversion constructor to the proper size. + if (components != node->getType().getVectorSize()) + result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType())); + + return result[0]; + } else if (node->getOp() == glslang::EOpImageStore || node->getOp() == glslang::EOpImageStoreLod) { + + // Push the texel value before the operands + if (sampler.isMultiSample() || cracked.lod) { + spv::IdImmediate texel = { true, *(opIt + 1) }; + operands.push_back(texel); + } else { + spv::IdImmediate texel = { true, *opIt }; + operands.push_back(texel); + } + + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelVisibleKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelAvailableKHRMask) { + spv::IdImmediate imageOperand = { true, + builder.makeUintConstant(TranslateMemoryScope(TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + builder.createNoResultOp(spv::OpImageWrite, operands); + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageWriteWithoutFormat); + return spv::NoResult; + } else if (node->getOp() == glslang::EOpSparseImageLoad || + node->getOp() == glslang::EOpSparseImageLoadLod) { + builder.addCapability(spv::CapabilitySparseResidency); + if (builder.getImageTypeFormat(builder.getImageType(operands.front().word)) == spv::ImageFormatUnknown) + builder.addCapability(spv::CapabilityStorageImageReadWithoutFormat); + + spv::ImageOperandsMask mask = spv::ImageOperandsMaskNone; + if (sampler.isMultiSample()) { + mask = mask | spv::ImageOperandsSampleMask; + } + if (cracked.lod) { + builder.addExtension(spv::E_SPV_AMD_shader_image_load_store_lod); + builder.addCapability(spv::CapabilityImageReadWriteLodAMD); + + mask = mask | spv::ImageOperandsLodMask; + } + mask = mask | TranslateImageOperands(TranslateCoherent(imageType)); + mask = (spv::ImageOperandsMask)(mask & ~spv::ImageOperandsMakeTexelAvailableKHRMask); + mask = mask | signExtensionMask(); + if (mask != spv::ImageOperandsMaskNone) { + spv::IdImmediate imageOperands = { false, (unsigned int)mask }; + operands.push_back(imageOperands); + } + if (mask & spv::ImageOperandsSampleMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsLodMask) { + spv::IdImmediate imageOperand = { true, *opIt++ }; + operands.push_back(imageOperand); + } + if (mask & spv::ImageOperandsMakeTexelVisibleKHRMask) { + spv::IdImmediate imageOperand = { true, builder.makeUintConstant(TranslateMemoryScope( + TranslateCoherent(imageType))) }; + operands.push_back(imageOperand); + } + + // Create the return type that was a special structure + spv::Id texelOut = *opIt; + spv::Id typeId0 = resultType(); + spv::Id typeId1 = builder.getDerefTypeId(texelOut); + spv::Id resultTypeId = builder.makeStructResultType(typeId0, typeId1); + + spv::Id resultId = builder.createOp(spv::OpImageSparseRead, resultTypeId, operands); + + // Decode the return type + builder.createStore(builder.createCompositeExtract(resultId, typeId1, 1), texelOut); + return builder.createCompositeExtract(resultId, typeId0, 0); + } else { + // Process image atomic operations + + // GLSL "IMAGE_PARAMS" will involve in constructing an image texel pointer and this pointer, + // as the first source operand, is required by SPIR-V atomic operations. + // For non-MS, the sample value should be 0 + spv::IdImmediate sample = { true, sampler.isMultiSample() ? *(opIt++) : builder.makeUintConstant(0) }; + operands.push_back(sample); + + spv::Id resultTypeId; + // imageAtomicStore has a void return type so base the pointer type on + // the type of the value operand. + if (node->getOp() == glslang::EOpImageAtomicStore) { + resultTypeId = builder.makePointer(spv::StorageClassImage, builder.getTypeId(*opIt)); + } else { + resultTypeId = builder.makePointer(spv::StorageClassImage, resultType()); + } + spv::Id pointer = builder.createOp(spv::OpImageTexelPointer, resultTypeId, operands); + if (imageType.getQualifier().nonUniform) { + builder.addDecoration(pointer, spv::DecorationNonUniformEXT); + } + + std::vector operands; + operands.push_back(pointer); + for (; opIt != arguments.end(); ++opIt) + operands.push_back(*opIt); + + return createAtomicOperation(node->getOp(), precision, resultType(), operands, node->getBasicType(), + lvalueCoherentFlags); + } + } + +#ifndef GLSLANG_WEB + // Check for fragment mask functions other than queries + if (cracked.fragMask) { + assert(sampler.ms); + + auto opIt = arguments.begin(); + std::vector operands; + + // Extract the image if necessary + if (builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + + operands.push_back(params.sampler); + ++opIt; + + if (sampler.isSubpass()) { + // add on the (0,0) coordinate + spv::Id zero = builder.makeIntConstant(0); + std::vector comps; + comps.push_back(zero); + comps.push_back(zero); + operands.push_back(builder.makeCompositeConstant( + builder.makeVectorType(builder.makeIntType(32), 2), comps)); + } + + for (; opIt != arguments.end(); ++opIt) + operands.push_back(*opIt); + + spv::Op fragMaskOp = spv::OpNop; + if (node->getOp() == glslang::EOpFragmentMaskFetch) + fragMaskOp = spv::OpFragmentMaskFetchAMD; + else if (node->getOp() == glslang::EOpFragmentFetch) + fragMaskOp = spv::OpFragmentFetchAMD; + + builder.addExtension(spv::E_SPV_AMD_shader_fragment_mask); + builder.addCapability(spv::CapabilityFragmentMaskAMD); + return builder.createOp(fragMaskOp, resultType(), operands); + } +#endif + + // Check for texture functions other than queries + bool sparse = node->isSparseTexture(); + bool imageFootprint = node->isImageFootprint(); + bool cubeCompare = sampler.dim == glslang::EsdCube && sampler.isArrayed() && sampler.isShadow(); + + // check for bias argument + bool bias = false; + if (! cracked.lod && ! cracked.grad && ! cracked.fetch && ! cubeCompare) { + int nonBiasArgCount = 2; + if (cracked.gather) + ++nonBiasArgCount; // comp argument should be present when bias argument is present + + if (f16ShadowCompare) + ++nonBiasArgCount; + if (cracked.offset) + ++nonBiasArgCount; + else if (cracked.offsets) + ++nonBiasArgCount; + if (cracked.grad) + nonBiasArgCount += 2; + if (cracked.lodClamp) + ++nonBiasArgCount; + if (sparse) + ++nonBiasArgCount; + if (imageFootprint) + //Following three extra arguments + // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint + nonBiasArgCount += 3; + if ((int)arguments.size() > nonBiasArgCount) + bias = true; + } + + // See if the sampler param should really be just the SPV image part + if (cracked.fetch) { + // a fetch needs to have the image extracted first + if (builder.isSampledImage(params.sampler)) + params.sampler = builder.createUnaryOp(spv::OpImage, builder.getImageType(params.sampler), params.sampler); + } + +#ifndef GLSLANG_WEB + if (cracked.gather) { + const auto& sourceExtensions = glslangIntermediate->getRequestedExtensions(); + if (bias || cracked.lod || + sourceExtensions.find(glslang::E_GL_AMD_texture_gather_bias_lod) != sourceExtensions.end()) { + builder.addExtension(spv::E_SPV_AMD_texture_gather_bias_lod); + builder.addCapability(spv::CapabilityImageGatherBiasLodAMD); + } + } +#endif + + // set the rest of the arguments + + params.coords = arguments[1]; + int extraArgs = 0; + bool noImplicitLod = false; + + // sort out where Dref is coming from + if (cubeCompare || f16ShadowCompare) { + params.Dref = arguments[2]; + ++extraArgs; + } else if (sampler.shadow && cracked.gather) { + params.Dref = arguments[2]; + ++extraArgs; + } else if (sampler.shadow) { + std::vector indexes; + int dRefComp; + if (cracked.proj) + dRefComp = 2; // "The resulting 3rd component of P in the shadow forms is used as Dref" + else + dRefComp = builder.getNumComponents(params.coords) - 1; + indexes.push_back(dRefComp); + params.Dref = builder.createCompositeExtract(params.coords, + builder.getScalarTypeId(builder.getTypeId(params.coords)), indexes); + } + + // lod + if (cracked.lod) { + params.lod = arguments[2 + extraArgs]; + ++extraArgs; + } else if (glslangIntermediate->getStage() != EShLangFragment && + !(glslangIntermediate->getStage() == EShLangCompute && + glslangIntermediate->hasLayoutDerivativeModeNone())) { + // we need to invent the default lod for an explicit lod instruction for a non-fragment stage + noImplicitLod = true; + } + + // multisample + if (sampler.isMultiSample()) { + params.sample = arguments[2 + extraArgs]; // For MS, "sample" should be specified + ++extraArgs; + } + + // gradient + if (cracked.grad) { + params.gradX = arguments[2 + extraArgs]; + params.gradY = arguments[3 + extraArgs]; + extraArgs += 2; + } + + // offset and offsets + if (cracked.offset) { + params.offset = arguments[2 + extraArgs]; + ++extraArgs; + } else if (cracked.offsets) { + params.offsets = arguments[2 + extraArgs]; + ++extraArgs; + } + +#ifndef GLSLANG_WEB + // lod clamp + if (cracked.lodClamp) { + params.lodClamp = arguments[2 + extraArgs]; + ++extraArgs; + } + // sparse + if (sparse) { + params.texelOut = arguments[2 + extraArgs]; + ++extraArgs; + } + // gather component + if (cracked.gather && ! sampler.shadow) { + // default component is 0, if missing, otherwise an argument + if (2 + extraArgs < (int)arguments.size()) { + params.component = arguments[2 + extraArgs]; + ++extraArgs; + } else + params.component = builder.makeIntConstant(0); + } + spv::Id resultStruct = spv::NoResult; + if (imageFootprint) { + //Following three extra arguments + // int granularity, bool coarse, out gl_TextureFootprint2DNV footprint + params.granularity = arguments[2 + extraArgs]; + params.coarse = arguments[3 + extraArgs]; + resultStruct = arguments[4 + extraArgs]; + extraArgs += 3; + } +#endif + // bias + if (bias) { + params.bias = arguments[2 + extraArgs]; + ++extraArgs; + } + +#ifndef GLSLANG_WEB + if (imageFootprint) { + builder.addExtension(spv::E_SPV_NV_shader_image_footprint); + builder.addCapability(spv::CapabilityImageFootprintNV); + + + //resultStructType(OpenGL type) contains 5 elements: + //struct gl_TextureFootprint2DNV { + // uvec2 anchor; + // uvec2 offset; + // uvec2 mask; + // uint lod; + // uint granularity; + //}; + //or + //struct gl_TextureFootprint3DNV { + // uvec3 anchor; + // uvec3 offset; + // uvec2 mask; + // uint lod; + // uint granularity; + //}; + spv::Id resultStructType = builder.getContainedTypeId(builder.getTypeId(resultStruct)); + assert(builder.isStructType(resultStructType)); + + //resType (SPIR-V type) contains 6 elements: + //Member 0 must be a Boolean type scalar(LOD), + //Member 1 must be a vector of integer type, whose Signedness operand is 0(anchor), + //Member 2 must be a vector of integer type, whose Signedness operand is 0(offset), + //Member 3 must be a vector of integer type, whose Signedness operand is 0(mask), + //Member 4 must be a scalar of integer type, whose Signedness operand is 0(lod), + //Member 5 must be a scalar of integer type, whose Signedness operand is 0(granularity). + std::vector members; + members.push_back(resultType()); + for (int i = 0; i < 5; i++) { + members.push_back(builder.getContainedTypeId(resultStructType, i)); + } + spv::Id resType = builder.makeStructType(members, "ResType"); + + //call ImageFootprintNV + spv::Id res = builder.createTextureCall(precision, resType, sparse, cracked.fetch, cracked.proj, + cracked.gather, noImplicitLod, params, signExtensionMask()); + + //copy resType (SPIR-V type) to resultStructType(OpenGL type) + for (int i = 0; i < 5; i++) { + builder.clearAccessChain(); + builder.setAccessChainLValue(resultStruct); + + //Accessing to a struct we created, no coherent flag is set + spv::Builder::AccessChain::CoherentFlags flags; + flags.clear(); + + builder.accessChainPush(builder.makeIntConstant(i), flags, 0); + builder.accessChainStore(builder.createCompositeExtract(res, builder.getContainedTypeId(resType, i+1), + i+1)); + } + return builder.createCompositeExtract(res, resultType(), 0); + } +#endif + + // projective component (might not to move) + // GLSL: "The texture coordinates consumed from P, not including the last component of P, + // are divided by the last component of P." + // SPIR-V: "... (u [, v] [, w], q)... It may be a vector larger than needed, but all + // unused components will appear after all used components." + if (cracked.proj) { + int projSourceComp = builder.getNumComponents(params.coords) - 1; + int projTargetComp; + switch (sampler.dim) { + case glslang::Esd1D: projTargetComp = 1; break; + case glslang::Esd2D: projTargetComp = 2; break; + case glslang::EsdRect: projTargetComp = 2; break; + default: projTargetComp = projSourceComp; break; + } + // copy the projective coordinate if we have to + if (projTargetComp != projSourceComp) { + spv::Id projComp = builder.createCompositeExtract(params.coords, + builder.getScalarTypeId(builder.getTypeId(params.coords)), projSourceComp); + params.coords = builder.createCompositeInsert(projComp, params.coords, + builder.getTypeId(params.coords), projTargetComp); + } + } + +#ifndef GLSLANG_WEB + // nonprivate + if (imageType.getQualifier().nonprivate) { + params.nonprivate = true; + } + + // volatile + if (imageType.getQualifier().volatil) { + params.volatil = true; + } +#endif + + std::vector result( 1, + builder.createTextureCall(precision, resultType(), sparse, cracked.fetch, cracked.proj, cracked.gather, + noImplicitLod, params, signExtensionMask()) + ); + + if (components != node->getType().getVectorSize()) + result[0] = builder.createConstructor(precision, result, convertGlslangToSpvType(node->getType())); + + return result[0]; +} + +spv::Id TGlslangToSpvTraverser::handleUserFunctionCall(const glslang::TIntermAggregate* node) +{ + // Grab the function's pointer from the previously created function + spv::Function* function = functionMap[node->getName().c_str()]; + if (! function) + return 0; + + const glslang::TIntermSequence& glslangArgs = node->getSequence(); + const glslang::TQualifierList& qualifiers = node->getQualifierList(); + + // See comments in makeFunctions() for details about the semantics for parameter passing. + // + // These imply we need a four step process: + // 1. Evaluate the arguments + // 2. Allocate and make copies of in, out, and inout arguments + // 3. Make the call + // 4. Copy back the results + + // 1. Evaluate the arguments and their types + std::vector lValues; + std::vector rValues; + std::vector argTypes; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + argTypes.push_back(&glslangArgs[a]->getAsTyped()->getType()); + // build l-value + builder.clearAccessChain(); + glslangArgs[a]->traverse(this); + // keep outputs and pass-by-originals as l-values, evaluate others as r-values + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0) || + writableParam(qualifiers[a])) { + // save l-value + lValues.push_back(builder.getAccessChain()); + } else { + // process r-value + rValues.push_back(accessChainLoad(*argTypes.back())); + } + } + + // 2. Allocate space for anything needing a copy, and if it's "in" or "inout" + // copy the original into that space. + // + // Also, build up the list of actual arguments to pass in for the call + int lValueCount = 0; + int rValueCount = 0; + std::vector spvArgs; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + spv::Id arg; + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) { + builder.setAccessChain(lValues[lValueCount]); + arg = builder.accessChainGetLValue(); + ++lValueCount; + } else if (writableParam(qualifiers[a])) { + // need space to hold the copy + arg = builder.createVariable(function->getParamPrecision(a), spv::StorageClassFunction, + builder.getContainedTypeId(function->getParamType(a)), "param"); + if (qualifiers[a] == glslang::EvqIn || qualifiers[a] == glslang::EvqInOut) { + // need to copy the input into output space + builder.setAccessChain(lValues[lValueCount]); + spv::Id copy = accessChainLoad(*argTypes[a]); + builder.clearAccessChain(); + builder.setAccessChainLValue(arg); + multiTypeStore(*argTypes[a], copy); + } + ++lValueCount; + } else { + // process r-value, which involves a copy for a type mismatch + if (function->getParamType(a) != convertGlslangToSpvType(*argTypes[a]) || + TranslatePrecisionDecoration(*argTypes[a]) != function->getParamPrecision(a)) + { + spv::Id argCopy = builder.createVariable(function->getParamPrecision(a), spv::StorageClassFunction, function->getParamType(a), "arg"); + builder.clearAccessChain(); + builder.setAccessChainLValue(argCopy); + multiTypeStore(*argTypes[a], rValues[rValueCount]); + arg = builder.createLoad(argCopy, function->getParamPrecision(a)); + } else + arg = rValues[rValueCount]; + ++rValueCount; + } + spvArgs.push_back(arg); + } + + // 3. Make the call. + spv::Id result = builder.createFunctionCall(function, spvArgs); + builder.setPrecision(result, TranslatePrecisionDecoration(node->getType())); + + // 4. Copy back out an "out" arguments. + lValueCount = 0; + for (int a = 0; a < (int)glslangArgs.size(); ++a) { + if (originalParam(qualifiers[a], *argTypes[a], function->hasImplicitThis() && a == 0)) + ++lValueCount; + else if (writableParam(qualifiers[a])) { + if (qualifiers[a] == glslang::EvqOut || qualifiers[a] == glslang::EvqInOut) { + spv::Id copy = builder.createLoad(spvArgs[a], spv::NoPrecision); + builder.setAccessChain(lValues[lValueCount]); + multiTypeStore(*argTypes[a], copy); + } + ++lValueCount; + } + } + + return result; +} + +// Translate AST operation to SPV operation, already having SPV-based operands/types. +spv::Id TGlslangToSpvTraverser::createBinaryOperation(glslang::TOperator op, OpDecorations& decorations, + spv::Id typeId, spv::Id left, spv::Id right, + glslang::TBasicType typeProxy, bool reduceComparison) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + bool isBool = typeProxy == glslang::EbtBool; + + spv::Op binOp = spv::OpNop; + bool needMatchingVectors = true; // for non-matrix ops, would a scalar need to smear to match a vector? + bool comparison = false; + + switch (op) { + case glslang::EOpAdd: + case glslang::EOpAddAssign: + if (isFloat) + binOp = spv::OpFAdd; + else + binOp = spv::OpIAdd; + break; + case glslang::EOpSub: + case glslang::EOpSubAssign: + if (isFloat) + binOp = spv::OpFSub; + else + binOp = spv::OpISub; + break; + case glslang::EOpMul: + case glslang::EOpMulAssign: + if (isFloat) + binOp = spv::OpFMul; + else + binOp = spv::OpIMul; + break; + case glslang::EOpVectorTimesScalar: + case glslang::EOpVectorTimesScalarAssign: + if (isFloat && (builder.isVector(left) || builder.isVector(right))) { + if (builder.isVector(right)) + std::swap(left, right); + assert(builder.isScalar(right)); + needMatchingVectors = false; + binOp = spv::OpVectorTimesScalar; + } else if (isFloat) + binOp = spv::OpFMul; + else + binOp = spv::OpIMul; + break; + case glslang::EOpVectorTimesMatrix: + case glslang::EOpVectorTimesMatrixAssign: + binOp = spv::OpVectorTimesMatrix; + break; + case glslang::EOpMatrixTimesVector: + binOp = spv::OpMatrixTimesVector; + break; + case glslang::EOpMatrixTimesScalar: + case glslang::EOpMatrixTimesScalarAssign: + binOp = spv::OpMatrixTimesScalar; + break; + case glslang::EOpMatrixTimesMatrix: + case glslang::EOpMatrixTimesMatrixAssign: + binOp = spv::OpMatrixTimesMatrix; + break; + case glslang::EOpOuterProduct: + binOp = spv::OpOuterProduct; + needMatchingVectors = false; + break; + + case glslang::EOpDiv: + case glslang::EOpDivAssign: + if (isFloat) + binOp = spv::OpFDiv; + else if (isUnsigned) + binOp = spv::OpUDiv; + else + binOp = spv::OpSDiv; + break; + case glslang::EOpMod: + case glslang::EOpModAssign: + if (isFloat) + binOp = spv::OpFMod; + else if (isUnsigned) + binOp = spv::OpUMod; + else + binOp = spv::OpSMod; + break; + case glslang::EOpRightShift: + case glslang::EOpRightShiftAssign: + if (isUnsigned) + binOp = spv::OpShiftRightLogical; + else + binOp = spv::OpShiftRightArithmetic; + break; + case glslang::EOpLeftShift: + case glslang::EOpLeftShiftAssign: + binOp = spv::OpShiftLeftLogical; + break; + case glslang::EOpAnd: + case glslang::EOpAndAssign: + binOp = spv::OpBitwiseAnd; + break; + case glslang::EOpLogicalAnd: + needMatchingVectors = false; + binOp = spv::OpLogicalAnd; + break; + case glslang::EOpInclusiveOr: + case glslang::EOpInclusiveOrAssign: + binOp = spv::OpBitwiseOr; + break; + case glslang::EOpLogicalOr: + needMatchingVectors = false; + binOp = spv::OpLogicalOr; + break; + case glslang::EOpExclusiveOr: + case glslang::EOpExclusiveOrAssign: + binOp = spv::OpBitwiseXor; + break; + case glslang::EOpLogicalXor: + needMatchingVectors = false; + binOp = spv::OpLogicalNotEqual; + break; + + case glslang::EOpAbsDifference: + binOp = isUnsigned ? spv::OpAbsUSubINTEL : spv::OpAbsISubINTEL; + break; + + case glslang::EOpAddSaturate: + binOp = isUnsigned ? spv::OpUAddSatINTEL : spv::OpIAddSatINTEL; + break; + + case glslang::EOpSubSaturate: + binOp = isUnsigned ? spv::OpUSubSatINTEL : spv::OpISubSatINTEL; + break; + + case glslang::EOpAverage: + binOp = isUnsigned ? spv::OpUAverageINTEL : spv::OpIAverageINTEL; + break; + + case glslang::EOpAverageRounded: + binOp = isUnsigned ? spv::OpUAverageRoundedINTEL : spv::OpIAverageRoundedINTEL; + break; + + case glslang::EOpMul32x16: + binOp = isUnsigned ? spv::OpUMul32x16INTEL : spv::OpIMul32x16INTEL; + break; + + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpEqual: + case glslang::EOpNotEqual: + case glslang::EOpVectorEqual: + case glslang::EOpVectorNotEqual: + comparison = true; + break; + default: + break; + } + + // handle mapped binary operations (should be non-comparison) + if (binOp != spv::OpNop) { + assert(comparison == false); + if (builder.isMatrix(left) || builder.isMatrix(right) || + builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right)) + return createBinaryMatrixOperation(binOp, decorations, typeId, left, right); + + // No matrix involved; make both operands be the same number of components, if needed + if (needMatchingVectors) + builder.promoteScalar(decorations.precision, left, right); + + spv::Id result = builder.createBinOp(binOp, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + if (! comparison) + return 0; + + // Handle comparison instructions + + if (reduceComparison && (op == glslang::EOpEqual || op == glslang::EOpNotEqual) + && (builder.isVector(left) || builder.isMatrix(left) || builder.isAggregate(left))) { + spv::Id result = builder.createCompositeCompare(decorations.precision, left, right, op == glslang::EOpEqual); + decorations.addNonUniform(builder, result); + return result; + } + + switch (op) { + case glslang::EOpLessThan: + if (isFloat) + binOp = spv::OpFOrdLessThan; + else if (isUnsigned) + binOp = spv::OpULessThan; + else + binOp = spv::OpSLessThan; + break; + case glslang::EOpGreaterThan: + if (isFloat) + binOp = spv::OpFOrdGreaterThan; + else if (isUnsigned) + binOp = spv::OpUGreaterThan; + else + binOp = spv::OpSGreaterThan; + break; + case glslang::EOpLessThanEqual: + if (isFloat) + binOp = spv::OpFOrdLessThanEqual; + else if (isUnsigned) + binOp = spv::OpULessThanEqual; + else + binOp = spv::OpSLessThanEqual; + break; + case glslang::EOpGreaterThanEqual: + if (isFloat) + binOp = spv::OpFOrdGreaterThanEqual; + else if (isUnsigned) + binOp = spv::OpUGreaterThanEqual; + else + binOp = spv::OpSGreaterThanEqual; + break; + case glslang::EOpEqual: + case glslang::EOpVectorEqual: + if (isFloat) + binOp = spv::OpFOrdEqual; + else if (isBool) + binOp = spv::OpLogicalEqual; + else + binOp = spv::OpIEqual; + break; + case glslang::EOpNotEqual: + case glslang::EOpVectorNotEqual: + if (isFloat) + binOp = spv::OpFUnordNotEqual; + else if (isBool) + binOp = spv::OpLogicalNotEqual; + else + binOp = spv::OpINotEqual; + break; + default: + break; + } + + if (binOp != spv::OpNop) { + spv::Id result = builder.createBinOp(binOp, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + return 0; +} + +// +// Translate AST matrix operation to SPV operation, already having SPV-based operands/types. +// These can be any of: +// +// matrix * scalar +// scalar * matrix +// matrix * matrix linear algebraic +// matrix * vector +// vector * matrix +// matrix * matrix componentwise +// matrix op matrix op in {+, -, /} +// matrix op scalar op in {+, -, /} +// scalar op matrix op in {+, -, /} +// +spv::Id TGlslangToSpvTraverser::createBinaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId, + spv::Id left, spv::Id right) +{ + bool firstClass = true; + + // First, handle first-class matrix operations (* and matrix/scalar) + switch (op) { + case spv::OpFDiv: + if (builder.isMatrix(left) && builder.isScalar(right)) { + // turn matrix / scalar into a multiply... + spv::Id resultType = builder.getTypeId(right); + right = builder.createBinOp(spv::OpFDiv, resultType, builder.makeFpConstant(resultType, 1.0), right); + op = spv::OpMatrixTimesScalar; + } else + firstClass = false; + break; + case spv::OpMatrixTimesScalar: + if (builder.isMatrix(right) || builder.isCooperativeMatrix(right)) + std::swap(left, right); + assert(builder.isScalar(right)); + break; + case spv::OpVectorTimesMatrix: + assert(builder.isVector(left)); + assert(builder.isMatrix(right)); + break; + case spv::OpMatrixTimesVector: + assert(builder.isMatrix(left)); + assert(builder.isVector(right)); + break; + case spv::OpMatrixTimesMatrix: + assert(builder.isMatrix(left)); + assert(builder.isMatrix(right)); + break; + default: + firstClass = false; + break; + } + + if (builder.isCooperativeMatrix(left) || builder.isCooperativeMatrix(right)) + firstClass = true; + + if (firstClass) { + spv::Id result = builder.createBinOp(op, typeId, left, right); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + return builder.setPrecision(result, decorations.precision); + } + + // Handle component-wise +, -, *, %, and / for all combinations of type. + // The result type of all of them is the same type as the (a) matrix operand. + // The algorithm is to: + // - break the matrix(es) into vectors + // - smear any scalar to a vector + // - do vector operations + // - make a matrix out the vector results + switch (op) { + case spv::OpFAdd: + case spv::OpFSub: + case spv::OpFDiv: + case spv::OpFMod: + case spv::OpFMul: + { + // one time set up... + bool leftMat = builder.isMatrix(left); + bool rightMat = builder.isMatrix(right); + unsigned int numCols = leftMat ? builder.getNumColumns(left) : builder.getNumColumns(right); + int numRows = leftMat ? builder.getNumRows(left) : builder.getNumRows(right); + spv::Id scalarType = builder.getScalarTypeId(typeId); + spv::Id vecType = builder.makeVectorType(scalarType, numRows); + std::vector results; + spv::Id smearVec = spv::NoResult; + if (builder.isScalar(left)) + smearVec = builder.smearScalar(decorations.precision, left, vecType); + else if (builder.isScalar(right)) + smearVec = builder.smearScalar(decorations.precision, right, vecType); + + // do each vector op + for (unsigned int c = 0; c < numCols; ++c) { + std::vector indexes; + indexes.push_back(c); + spv::Id leftVec = leftMat ? builder.createCompositeExtract( left, vecType, indexes) : smearVec; + spv::Id rightVec = rightMat ? builder.createCompositeExtract(right, vecType, indexes) : smearVec; + spv::Id result = builder.createBinOp(op, vecType, leftVec, rightVec); + decorations.addNoContraction(builder, result); + decorations.addNonUniform(builder, result); + results.push_back(builder.setPrecision(result, decorations.precision)); + } + + // put the pieces together + spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision); + decorations.addNonUniform(builder, result); + return result; + } + default: + assert(0); + return spv::NoResult; + } +} + +spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, OpDecorations& decorations, spv::Id typeId, + spv::Id operand, glslang::TBasicType typeProxy, const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + spv::Op unaryOp = spv::OpNop; + int extBuiltins = -1; + int libCall = -1; + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + switch (op) { + case glslang::EOpNegative: + if (isFloat) { + unaryOp = spv::OpFNegate; + if (builder.isMatrixType(typeId)) + return createUnaryMatrixOperation(unaryOp, decorations, typeId, operand, typeProxy); + } else + unaryOp = spv::OpSNegate; + break; + + case glslang::EOpLogicalNot: + case glslang::EOpVectorLogicalNot: + unaryOp = spv::OpLogicalNot; + break; + case glslang::EOpBitwiseNot: + unaryOp = spv::OpNot; + break; + + case glslang::EOpDeterminant: + libCall = spv::GLSLstd450Determinant; + break; + case glslang::EOpMatrixInverse: + libCall = spv::GLSLstd450MatrixInverse; + break; + case glslang::EOpTranspose: + unaryOp = spv::OpTranspose; + break; + + case glslang::EOpRadians: + libCall = spv::GLSLstd450Radians; + break; + case glslang::EOpDegrees: + libCall = spv::GLSLstd450Degrees; + break; + case glslang::EOpSin: + libCall = spv::GLSLstd450Sin; + break; + case glslang::EOpCos: + libCall = spv::GLSLstd450Cos; + break; + case glslang::EOpTan: + libCall = spv::GLSLstd450Tan; + break; + case glslang::EOpAcos: + libCall = spv::GLSLstd450Acos; + break; + case glslang::EOpAsin: + libCall = spv::GLSLstd450Asin; + break; + case glslang::EOpAtan: + libCall = spv::GLSLstd450Atan; + break; + + case glslang::EOpAcosh: + libCall = spv::GLSLstd450Acosh; + break; + case glslang::EOpAsinh: + libCall = spv::GLSLstd450Asinh; + break; + case glslang::EOpAtanh: + libCall = spv::GLSLstd450Atanh; + break; + case glslang::EOpTanh: + libCall = spv::GLSLstd450Tanh; + break; + case glslang::EOpCosh: + libCall = spv::GLSLstd450Cosh; + break; + case glslang::EOpSinh: + libCall = spv::GLSLstd450Sinh; + break; + + case glslang::EOpLength: + libCall = spv::GLSLstd450Length; + break; + case glslang::EOpNormalize: + libCall = spv::GLSLstd450Normalize; + break; + + case glslang::EOpExp: + libCall = spv::GLSLstd450Exp; + break; + case glslang::EOpLog: + libCall = spv::GLSLstd450Log; + break; + case glslang::EOpExp2: + libCall = spv::GLSLstd450Exp2; + break; + case glslang::EOpLog2: + libCall = spv::GLSLstd450Log2; + break; + case glslang::EOpSqrt: + libCall = spv::GLSLstd450Sqrt; + break; + case glslang::EOpInverseSqrt: + libCall = spv::GLSLstd450InverseSqrt; + break; + + case glslang::EOpFloor: + libCall = spv::GLSLstd450Floor; + break; + case glslang::EOpTrunc: + libCall = spv::GLSLstd450Trunc; + break; + case glslang::EOpRound: + libCall = spv::GLSLstd450Round; + break; + case glslang::EOpRoundEven: + libCall = spv::GLSLstd450RoundEven; + break; + case glslang::EOpCeil: + libCall = spv::GLSLstd450Ceil; + break; + case glslang::EOpFract: + libCall = spv::GLSLstd450Fract; + break; + + case glslang::EOpIsNan: + unaryOp = spv::OpIsNan; + break; + case glslang::EOpIsInf: + unaryOp = spv::OpIsInf; + break; + case glslang::EOpIsFinite: + unaryOp = spv::OpIsFinite; + break; + + case glslang::EOpFloatBitsToInt: + case glslang::EOpFloatBitsToUint: + case glslang::EOpIntBitsToFloat: + case glslang::EOpUintBitsToFloat: + case glslang::EOpDoubleBitsToInt64: + case glslang::EOpDoubleBitsToUint64: + case glslang::EOpInt64BitsToDouble: + case glslang::EOpUint64BitsToDouble: + case glslang::EOpFloat16BitsToInt16: + case glslang::EOpFloat16BitsToUint16: + case glslang::EOpInt16BitsToFloat16: + case glslang::EOpUint16BitsToFloat16: + unaryOp = spv::OpBitcast; + break; + + case glslang::EOpPackSnorm2x16: + libCall = spv::GLSLstd450PackSnorm2x16; + break; + case glslang::EOpUnpackSnorm2x16: + libCall = spv::GLSLstd450UnpackSnorm2x16; + break; + case glslang::EOpPackUnorm2x16: + libCall = spv::GLSLstd450PackUnorm2x16; + break; + case glslang::EOpUnpackUnorm2x16: + libCall = spv::GLSLstd450UnpackUnorm2x16; + break; + case glslang::EOpPackHalf2x16: + libCall = spv::GLSLstd450PackHalf2x16; + break; + case glslang::EOpUnpackHalf2x16: + libCall = spv::GLSLstd450UnpackHalf2x16; + break; +#ifndef GLSLANG_WEB + case glslang::EOpPackSnorm4x8: + libCall = spv::GLSLstd450PackSnorm4x8; + break; + case glslang::EOpUnpackSnorm4x8: + libCall = spv::GLSLstd450UnpackSnorm4x8; + break; + case glslang::EOpPackUnorm4x8: + libCall = spv::GLSLstd450PackUnorm4x8; + break; + case glslang::EOpUnpackUnorm4x8: + libCall = spv::GLSLstd450UnpackUnorm4x8; + break; + case glslang::EOpPackDouble2x32: + libCall = spv::GLSLstd450PackDouble2x32; + break; + case glslang::EOpUnpackDouble2x32: + libCall = spv::GLSLstd450UnpackDouble2x32; + break; +#endif + + case glslang::EOpPackInt2x32: + case glslang::EOpUnpackInt2x32: + case glslang::EOpPackUint2x32: + case glslang::EOpUnpackUint2x32: + case glslang::EOpPack16: + case glslang::EOpPack32: + case glslang::EOpPack64: + case glslang::EOpUnpack32: + case glslang::EOpUnpack16: + case glslang::EOpUnpack8: + case glslang::EOpPackInt2x16: + case glslang::EOpUnpackInt2x16: + case glslang::EOpPackUint2x16: + case glslang::EOpUnpackUint2x16: + case glslang::EOpPackInt4x16: + case glslang::EOpUnpackInt4x16: + case glslang::EOpPackUint4x16: + case glslang::EOpUnpackUint4x16: + case glslang::EOpPackFloat2x16: + case glslang::EOpUnpackFloat2x16: + unaryOp = spv::OpBitcast; + break; + + case glslang::EOpDPdx: + unaryOp = spv::OpDPdx; + break; + case glslang::EOpDPdy: + unaryOp = spv::OpDPdy; + break; + case glslang::EOpFwidth: + unaryOp = spv::OpFwidth; + break; + + case glslang::EOpAny: + unaryOp = spv::OpAny; + break; + case glslang::EOpAll: + unaryOp = spv::OpAll; + break; + + case glslang::EOpAbs: + if (isFloat) + libCall = spv::GLSLstd450FAbs; + else + libCall = spv::GLSLstd450SAbs; + break; + case glslang::EOpSign: + if (isFloat) + libCall = spv::GLSLstd450FSign; + else + libCall = spv::GLSLstd450SSign; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpDPdxFine: + unaryOp = spv::OpDPdxFine; + break; + case glslang::EOpDPdyFine: + unaryOp = spv::OpDPdyFine; + break; + case glslang::EOpFwidthFine: + unaryOp = spv::OpFwidthFine; + break; + case glslang::EOpDPdxCoarse: + unaryOp = spv::OpDPdxCoarse; + break; + case glslang::EOpDPdyCoarse: + unaryOp = spv::OpDPdyCoarse; + break; + case glslang::EOpFwidthCoarse: + unaryOp = spv::OpFwidthCoarse; + break; + case glslang::EOpRayQueryProceed: + unaryOp = spv::OpRayQueryProceedKHR; + break; + case glslang::EOpRayQueryGetRayTMin: + unaryOp = spv::OpRayQueryGetRayTMinKHR; + break; + case glslang::EOpRayQueryGetRayFlags: + unaryOp = spv::OpRayQueryGetRayFlagsKHR; + break; + case glslang::EOpRayQueryGetWorldRayOrigin: + unaryOp = spv::OpRayQueryGetWorldRayOriginKHR; + break; + case glslang::EOpRayQueryGetWorldRayDirection: + unaryOp = spv::OpRayQueryGetWorldRayDirectionKHR; + break; + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + unaryOp = spv::OpRayQueryGetIntersectionCandidateAABBOpaqueKHR; + break; + case glslang::EOpInterpolateAtCentroid: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtCentroid; + break; + case glslang::EOpAtomicCounterIncrement: + case glslang::EOpAtomicCounterDecrement: + case glslang::EOpAtomicCounter: + { + // Handle all of the atomics in one place, in createAtomicOperation() + std::vector operands; + operands.push_back(operand); + return createAtomicOperation(op, decorations.precision, typeId, operands, typeProxy, lvalueCoherentFlags); + } + + case glslang::EOpBitFieldReverse: + unaryOp = spv::OpBitReverse; + break; + case glslang::EOpBitCount: + unaryOp = spv::OpBitCount; + break; + case glslang::EOpFindLSB: + libCall = spv::GLSLstd450FindILsb; + break; + case glslang::EOpFindMSB: + if (isUnsigned) + libCall = spv::GLSLstd450FindUMsb; + else + libCall = spv::GLSLstd450FindSMsb; + break; + + case glslang::EOpCountLeadingZeros: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + unaryOp = spv::OpUCountLeadingZerosINTEL; + break; + + case glslang::EOpCountTrailingZeros: + builder.addCapability(spv::CapabilityIntegerFunctions2INTEL); + builder.addExtension("SPV_INTEL_shader_integer_functions2"); + unaryOp = spv::OpUCountTrailingZerosINTEL; + break; + + case glslang::EOpBallot: + case glslang::EOpReadFirstInvocation: + case glslang::EOpAnyInvocation: + case glslang::EOpAllInvocations: + case glslang::EOpAllInvocationsEqual: + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + { + std::vector operands; + operands.push_back(operand); + return createInvocationsOperation(op, typeId, operands, typeProxy); + } + case glslang::EOpSubgroupAll: + case glslang::EOpSubgroupAny: + case glslang::EOpSubgroupAllEqual: + case glslang::EOpSubgroupBroadcastFirst: + case glslang::EOpSubgroupBallot: + case glslang::EOpSubgroupInverseBallot: + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupBallotFindLSB: + case glslang::EOpSubgroupBallotFindMSB: + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: { + std::vector operands; + operands.push_back(operand); + return createSubgroupOperation(op, typeId, operands, typeProxy); + } + case glslang::EOpMbcnt: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::MbcntAMD; + break; + + case glslang::EOpCubeFaceIndex: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader); + libCall = spv::CubeFaceIndexAMD; + break; + + case glslang::EOpCubeFaceCoord: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_gcn_shader); + libCall = spv::CubeFaceCoordAMD; + break; + case glslang::EOpSubgroupPartition: + unaryOp = spv::OpGroupNonUniformPartitionNV; + break; + case glslang::EOpConstructReference: + unaryOp = spv::OpBitcast; + break; +#endif + + case glslang::EOpCopyObject: + unaryOp = spv::OpCopyObject; + break; + + default: + return 0; + } + + spv::Id id; + if (libCall >= 0) { + std::vector args; + args.push_back(operand); + id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, args); + } else { + id = builder.createUnaryOp(unaryOp, typeId, operand); + } + + decorations.addNoContraction(builder, id); + decorations.addNonUniform(builder, id); + return builder.setPrecision(id, decorations.precision); +} + +// Create a unary operation on a matrix +spv::Id TGlslangToSpvTraverser::createUnaryMatrixOperation(spv::Op op, OpDecorations& decorations, spv::Id typeId, + spv::Id operand, glslang::TBasicType /* typeProxy */) +{ + // Handle unary operations vector by vector. + // The result type is the same type as the original type. + // The algorithm is to: + // - break the matrix into vectors + // - apply the operation to each vector + // - make a matrix out the vector results + + // get the types sorted out + int numCols = builder.getNumColumns(operand); + int numRows = builder.getNumRows(operand); + spv::Id srcVecType = builder.makeVectorType(builder.getScalarTypeId(builder.getTypeId(operand)), numRows); + spv::Id destVecType = builder.makeVectorType(builder.getScalarTypeId(typeId), numRows); + std::vector results; + + // do each vector op + for (int c = 0; c < numCols; ++c) { + std::vector indexes; + indexes.push_back(c); + spv::Id srcVec = builder.createCompositeExtract(operand, srcVecType, indexes); + spv::Id destVec = builder.createUnaryOp(op, destVecType, srcVec); + decorations.addNoContraction(builder, destVec); + decorations.addNonUniform(builder, destVec); + results.push_back(builder.setPrecision(destVec, decorations.precision)); + } + + // put the pieces together + spv::Id result = builder.setPrecision(builder.createCompositeConstruct(typeId, results), decorations.precision); + decorations.addNonUniform(builder, result); + return result; +} + +// For converting integers where both the bitwidth and the signedness could +// change, but only do the width change here. The caller is still responsible +// for the signedness conversion. +spv::Id TGlslangToSpvTraverser::createIntWidthConversion(glslang::TOperator op, spv::Id operand, int vectorSize) +{ + // Get the result type width, based on the type to convert to. + int width = 32; + switch(op) { + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUint64ToInt8: + width = 8; + break; + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUint64ToInt16: + width = 16; + break; + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint64ToInt: + width = 32; + break; + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt64: + width = 64; + break; + + default: + assert(false && "Default missing"); + break; + } + + // Get the conversion operation and result type, + // based on the target width, but the source type. + spv::Id type = spv::NoType; + spv::Op convOp = spv::OpNop; + switch(op) { + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvInt64ToUint: + convOp = spv::OpSConvert; + type = builder.makeIntType(width); + break; + default: + convOp = spv::OpUConvert; + type = builder.makeUintType(width); + break; + } + + if (vectorSize > 0) + type = builder.makeVectorType(type, vectorSize); + + return builder.createUnaryOp(convOp, type, operand); +} + +spv::Id TGlslangToSpvTraverser::createConversion(glslang::TOperator op, OpDecorations& decorations, spv::Id destType, + spv::Id operand, glslang::TBasicType typeProxy) +{ + spv::Op convOp = spv::OpNop; + spv::Id zero = 0; + spv::Id one = 0; + + int vectorSize = builder.isVectorType(destType) ? builder.getNumTypeComponents(destType) : 0; + + switch (op) { + case glslang::EOpConvIntToBool: + case glslang::EOpConvUintToBool: + zero = builder.makeUintConstant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvFloatToBool: + zero = builder.makeFloatConstant(0.0F); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFUnordNotEqual, destType, operand, zero); + case glslang::EOpConvBoolToFloat: + convOp = spv::OpSelect; + zero = builder.makeFloatConstant(0.0F); + one = builder.makeFloatConstant(1.0F); + break; + + case glslang::EOpConvBoolToInt: + case glslang::EOpConvBoolToInt64: +#ifndef GLSLANG_WEB + if (op == glslang::EOpConvBoolToInt64) { + zero = builder.makeInt64Constant(0); + one = builder.makeInt64Constant(1); + } else +#endif + { + zero = builder.makeIntConstant(0); + one = builder.makeIntConstant(1); + } + + convOp = spv::OpSelect; + break; + + case glslang::EOpConvBoolToUint: + case glslang::EOpConvBoolToUint64: +#ifndef GLSLANG_WEB + if (op == glslang::EOpConvBoolToUint64) { + zero = builder.makeUint64Constant(0); + one = builder.makeUint64Constant(1); + } else +#endif + { + zero = builder.makeUintConstant(0); + one = builder.makeUintConstant(1); + } + + convOp = spv::OpSelect; + break; + + case glslang::EOpConvInt8ToFloat16: + case glslang::EOpConvInt8ToFloat: + case glslang::EOpConvInt8ToDouble: + case glslang::EOpConvInt16ToFloat16: + case glslang::EOpConvInt16ToFloat: + case glslang::EOpConvInt16ToDouble: + case glslang::EOpConvIntToFloat16: + case glslang::EOpConvIntToFloat: + case glslang::EOpConvIntToDouble: + case glslang::EOpConvInt64ToFloat: + case glslang::EOpConvInt64ToDouble: + case glslang::EOpConvInt64ToFloat16: + convOp = spv::OpConvertSToF; + break; + + case glslang::EOpConvUint8ToFloat16: + case glslang::EOpConvUint8ToFloat: + case glslang::EOpConvUint8ToDouble: + case glslang::EOpConvUint16ToFloat16: + case glslang::EOpConvUint16ToFloat: + case glslang::EOpConvUint16ToDouble: + case glslang::EOpConvUintToFloat16: + case glslang::EOpConvUintToFloat: + case glslang::EOpConvUintToDouble: + case glslang::EOpConvUint64ToFloat: + case glslang::EOpConvUint64ToDouble: + case glslang::EOpConvUint64ToFloat16: + convOp = spv::OpConvertUToF; + break; + + case glslang::EOpConvFloat16ToInt8: + case glslang::EOpConvFloatToInt8: + case glslang::EOpConvDoubleToInt8: + case glslang::EOpConvFloat16ToInt16: + case glslang::EOpConvFloatToInt16: + case glslang::EOpConvDoubleToInt16: + case glslang::EOpConvFloat16ToInt: + case glslang::EOpConvFloatToInt: + case glslang::EOpConvDoubleToInt: + case glslang::EOpConvFloat16ToInt64: + case glslang::EOpConvFloatToInt64: + case glslang::EOpConvDoubleToInt64: + convOp = spv::OpConvertFToS; + break; + + case glslang::EOpConvUint8ToInt8: + case glslang::EOpConvInt8ToUint8: + case glslang::EOpConvUint16ToInt16: + case glslang::EOpConvInt16ToUint16: + case glslang::EOpConvUintToInt: + case glslang::EOpConvIntToUint: + case glslang::EOpConvUint64ToInt64: + case glslang::EOpConvInt64ToUint64: + if (builder.isInSpecConstCodeGenMode()) { + // Build zero scalar or vector for OpIAdd. +#ifndef GLSLANG_WEB + if(op == glslang::EOpConvUint8ToInt8 || op == glslang::EOpConvInt8ToUint8) { + zero = builder.makeUint8Constant(0); + } else if (op == glslang::EOpConvUint16ToInt16 || op == glslang::EOpConvInt16ToUint16) { + zero = builder.makeUint16Constant(0); + } else if (op == glslang::EOpConvUint64ToInt64 || op == glslang::EOpConvInt64ToUint64) { + zero = builder.makeUint64Constant(0); + } else +#endif + { + zero = builder.makeUintConstant(0); + } + zero = makeSmearedConstant(zero, vectorSize); + // Use OpIAdd, instead of OpBitcast to do the conversion when + // generating for OpSpecConstantOp instruction. + return builder.createBinOp(spv::OpIAdd, destType, operand, zero); + } + // For normal run-time conversion instruction, use OpBitcast. + convOp = spv::OpBitcast; + break; + + case glslang::EOpConvFloat16ToUint8: + case glslang::EOpConvFloatToUint8: + case glslang::EOpConvDoubleToUint8: + case glslang::EOpConvFloat16ToUint16: + case glslang::EOpConvFloatToUint16: + case glslang::EOpConvDoubleToUint16: + case glslang::EOpConvFloat16ToUint: + case glslang::EOpConvFloatToUint: + case glslang::EOpConvDoubleToUint: + case glslang::EOpConvFloatToUint64: + case glslang::EOpConvDoubleToUint64: + case glslang::EOpConvFloat16ToUint64: + convOp = spv::OpConvertFToU; + break; + +#ifndef GLSLANG_WEB + case glslang::EOpConvInt8ToBool: + case glslang::EOpConvUint8ToBool: + zero = builder.makeUint8Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvInt16ToBool: + case glslang::EOpConvUint16ToBool: + zero = builder.makeUint16Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvInt64ToBool: + case glslang::EOpConvUint64ToBool: + zero = builder.makeUint64Constant(0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpINotEqual, destType, operand, zero); + case glslang::EOpConvDoubleToBool: + zero = builder.makeDoubleConstant(0.0); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFUnordNotEqual, destType, operand, zero); + case glslang::EOpConvFloat16ToBool: + zero = builder.makeFloat16Constant(0.0F); + zero = makeSmearedConstant(zero, vectorSize); + return builder.createBinOp(spv::OpFUnordNotEqual, destType, operand, zero); + case glslang::EOpConvBoolToDouble: + convOp = spv::OpSelect; + zero = builder.makeDoubleConstant(0.0); + one = builder.makeDoubleConstant(1.0); + break; + case glslang::EOpConvBoolToFloat16: + convOp = spv::OpSelect; + zero = builder.makeFloat16Constant(0.0F); + one = builder.makeFloat16Constant(1.0F); + break; + case glslang::EOpConvBoolToInt8: + zero = builder.makeInt8Constant(0); + one = builder.makeInt8Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToUint8: + zero = builder.makeUint8Constant(0); + one = builder.makeUint8Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToInt16: + zero = builder.makeInt16Constant(0); + one = builder.makeInt16Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvBoolToUint16: + zero = builder.makeUint16Constant(0); + one = builder.makeUint16Constant(1); + convOp = spv::OpSelect; + break; + case glslang::EOpConvDoubleToFloat: + case glslang::EOpConvFloatToDouble: + case glslang::EOpConvDoubleToFloat16: + case glslang::EOpConvFloat16ToDouble: + case glslang::EOpConvFloatToFloat16: + case glslang::EOpConvFloat16ToFloat: + convOp = spv::OpFConvert; + if (builder.isMatrixType(destType)) + return createUnaryMatrixOperation(convOp, decorations, destType, operand, typeProxy); + break; + + case glslang::EOpConvInt8ToInt16: + case glslang::EOpConvInt8ToInt: + case glslang::EOpConvInt8ToInt64: + case glslang::EOpConvInt16ToInt8: + case glslang::EOpConvInt16ToInt: + case glslang::EOpConvInt16ToInt64: + case glslang::EOpConvIntToInt8: + case glslang::EOpConvIntToInt16: + case glslang::EOpConvIntToInt64: + case glslang::EOpConvInt64ToInt8: + case glslang::EOpConvInt64ToInt16: + case glslang::EOpConvInt64ToInt: + convOp = spv::OpSConvert; + break; + + case glslang::EOpConvUint8ToUint16: + case glslang::EOpConvUint8ToUint: + case glslang::EOpConvUint8ToUint64: + case glslang::EOpConvUint16ToUint8: + case glslang::EOpConvUint16ToUint: + case glslang::EOpConvUint16ToUint64: + case glslang::EOpConvUintToUint8: + case glslang::EOpConvUintToUint16: + case glslang::EOpConvUintToUint64: + case glslang::EOpConvUint64ToUint8: + case glslang::EOpConvUint64ToUint16: + case glslang::EOpConvUint64ToUint: + convOp = spv::OpUConvert; + break; + + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUintToInt64: + case glslang::EOpConvUint64ToInt8: + case glslang::EOpConvUint64ToInt16: + case glslang::EOpConvUint64ToInt: + // OpSConvert/OpUConvert + OpBitCast + operand = createIntWidthConversion(op, operand, vectorSize); + + if (builder.isInSpecConstCodeGenMode()) { + // Build zero scalar or vector for OpIAdd. + switch(op) { + case glslang::EOpConvInt16ToUint8: + case glslang::EOpConvIntToUint8: + case glslang::EOpConvInt64ToUint8: + case glslang::EOpConvUint16ToInt8: + case glslang::EOpConvUintToInt8: + case glslang::EOpConvUint64ToInt8: + zero = builder.makeUint8Constant(0); + break; + case glslang::EOpConvInt8ToUint16: + case glslang::EOpConvIntToUint16: + case glslang::EOpConvInt64ToUint16: + case glslang::EOpConvUint8ToInt16: + case glslang::EOpConvUintToInt16: + case glslang::EOpConvUint64ToInt16: + zero = builder.makeUint16Constant(0); + break; + case glslang::EOpConvInt8ToUint: + case glslang::EOpConvInt16ToUint: + case glslang::EOpConvInt64ToUint: + case glslang::EOpConvUint8ToInt: + case glslang::EOpConvUint16ToInt: + case glslang::EOpConvUint64ToInt: + zero = builder.makeUintConstant(0); + break; + case glslang::EOpConvInt8ToUint64: + case glslang::EOpConvInt16ToUint64: + case glslang::EOpConvIntToUint64: + case glslang::EOpConvUint8ToInt64: + case glslang::EOpConvUint16ToInt64: + case glslang::EOpConvUintToInt64: + zero = builder.makeUint64Constant(0); + break; + default: + assert(false && "Default missing"); + break; + } + zero = makeSmearedConstant(zero, vectorSize); + // Use OpIAdd, instead of OpBitcast to do the conversion when + // generating for OpSpecConstantOp instruction. + return builder.createBinOp(spv::OpIAdd, destType, operand, zero); + } + // For normal run-time conversion instruction, use OpBitcast. + convOp = spv::OpBitcast; + break; + case glslang::EOpConvUint64ToPtr: + convOp = spv::OpConvertUToPtr; + break; + case glslang::EOpConvPtrToUint64: + convOp = spv::OpConvertPtrToU; + break; + case glslang::EOpConvPtrToUvec2: + case glslang::EOpConvUvec2ToPtr: + if (builder.isVector(operand)) + builder.promoteIncorporatedExtension(spv::E_SPV_EXT_physical_storage_buffer, + spv::E_SPV_KHR_physical_storage_buffer, spv::Spv_1_5); + convOp = spv::OpBitcast; + break; +#endif + + default: + break; + } + + spv::Id result = 0; + if (convOp == spv::OpNop) + return result; + + if (convOp == spv::OpSelect) { + zero = makeSmearedConstant(zero, vectorSize); + one = makeSmearedConstant(one, vectorSize); + result = builder.createTriOp(convOp, destType, operand, one, zero); + } else + result = builder.createUnaryOp(convOp, destType, operand); + + result = builder.setPrecision(result, decorations.precision); + decorations.addNonUniform(builder, result); + return result; +} + +spv::Id TGlslangToSpvTraverser::makeSmearedConstant(spv::Id constant, int vectorSize) +{ + if (vectorSize == 0) + return constant; + + spv::Id vectorTypeId = builder.makeVectorType(builder.getTypeId(constant), vectorSize); + std::vector components; + for (int c = 0; c < vectorSize; ++c) + components.push_back(constant); + return builder.makeCompositeConstant(vectorTypeId, components); +} + +// For glslang ops that map to SPV atomic opCodes +spv::Id TGlslangToSpvTraverser::createAtomicOperation(glslang::TOperator op, spv::Decoration /*precision*/, + spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy, + const spv::Builder::AccessChain::CoherentFlags &lvalueCoherentFlags) +{ + spv::Op opCode = spv::OpNop; + + switch (op) { + case glslang::EOpAtomicAdd: + case glslang::EOpImageAtomicAdd: + case glslang::EOpAtomicCounterAdd: + opCode = spv::OpAtomicIAdd; + if (typeProxy == glslang::EbtFloat || typeProxy == glslang::EbtDouble) { + opCode = spv::OpAtomicFAddEXT; + builder.addExtension(spv::E_SPV_EXT_shader_atomic_float_add); + if (typeProxy == glslang::EbtFloat) + builder.addCapability(spv::CapabilityAtomicFloat32AddEXT); + else + builder.addCapability(spv::CapabilityAtomicFloat64AddEXT); + } + break; + case glslang::EOpAtomicCounterSubtract: + opCode = spv::OpAtomicISub; + break; + case glslang::EOpAtomicMin: + case glslang::EOpImageAtomicMin: + case glslang::EOpAtomicCounterMin: + opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? + spv::OpAtomicUMin : spv::OpAtomicSMin; + break; + case glslang::EOpAtomicMax: + case glslang::EOpImageAtomicMax: + case glslang::EOpAtomicCounterMax: + opCode = (typeProxy == glslang::EbtUint || typeProxy == glslang::EbtUint64) ? + spv::OpAtomicUMax : spv::OpAtomicSMax; + break; + case glslang::EOpAtomicAnd: + case glslang::EOpImageAtomicAnd: + case glslang::EOpAtomicCounterAnd: + opCode = spv::OpAtomicAnd; + break; + case glslang::EOpAtomicOr: + case glslang::EOpImageAtomicOr: + case glslang::EOpAtomicCounterOr: + opCode = spv::OpAtomicOr; + break; + case glslang::EOpAtomicXor: + case glslang::EOpImageAtomicXor: + case glslang::EOpAtomicCounterXor: + opCode = spv::OpAtomicXor; + break; + case glslang::EOpAtomicExchange: + case glslang::EOpImageAtomicExchange: + case glslang::EOpAtomicCounterExchange: + opCode = spv::OpAtomicExchange; + break; + case glslang::EOpAtomicCompSwap: + case glslang::EOpImageAtomicCompSwap: + case glslang::EOpAtomicCounterCompSwap: + opCode = spv::OpAtomicCompareExchange; + break; + case glslang::EOpAtomicCounterIncrement: + opCode = spv::OpAtomicIIncrement; + break; + case glslang::EOpAtomicCounterDecrement: + opCode = spv::OpAtomicIDecrement; + break; + case glslang::EOpAtomicCounter: + case glslang::EOpImageAtomicLoad: + case glslang::EOpAtomicLoad: + opCode = spv::OpAtomicLoad; + break; + case glslang::EOpAtomicStore: + case glslang::EOpImageAtomicStore: + opCode = spv::OpAtomicStore; + break; + default: + assert(0); + break; + } + + if (typeProxy == glslang::EbtInt64 || typeProxy == glslang::EbtUint64) + builder.addCapability(spv::CapabilityInt64Atomics); + + // Sort out the operands + // - mapping from glslang -> SPV + // - there are extra SPV operands that are optional in glslang + // - compare-exchange swaps the value and comparator + // - compare-exchange has an extra memory semantics + // - EOpAtomicCounterDecrement needs a post decrement + spv::Id pointerId = 0, compareId = 0, valueId = 0; + // scope defaults to Device in the old model, QueueFamilyKHR in the new model + spv::Id scopeId; + if (glslangIntermediate->usingVulkanMemoryModel()) { + scopeId = builder.makeUintConstant(spv::ScopeQueueFamilyKHR); + } else { + scopeId = builder.makeUintConstant(spv::ScopeDevice); + } + // semantics default to relaxed + spv::Id semanticsId = builder.makeUintConstant(lvalueCoherentFlags.isVolatile() && + glslangIntermediate->usingVulkanMemoryModel() ? + spv::MemorySemanticsVolatileMask : + spv::MemorySemanticsMaskNone); + spv::Id semanticsId2 = semanticsId; + + pointerId = operands[0]; + if (opCode == spv::OpAtomicIIncrement || opCode == spv::OpAtomicIDecrement) { + // no additional operands + } else if (opCode == spv::OpAtomicCompareExchange) { + compareId = operands[1]; + valueId = operands[2]; + if (operands.size() > 3) { + scopeId = operands[3]; + semanticsId = builder.makeUintConstant( + builder.getConstantScalar(operands[4]) | builder.getConstantScalar(operands[5])); + semanticsId2 = builder.makeUintConstant( + builder.getConstantScalar(operands[6]) | builder.getConstantScalar(operands[7])); + } + } else if (opCode == spv::OpAtomicLoad) { + if (operands.size() > 1) { + scopeId = operands[1]; + semanticsId = builder.makeUintConstant( + builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3])); + } + } else { + // atomic store or RMW + valueId = operands[1]; + if (operands.size() > 2) { + scopeId = operands[2]; + semanticsId = builder.makeUintConstant + (builder.getConstantScalar(operands[3]) | builder.getConstantScalar(operands[4])); + } + } + + // Check for capabilities + unsigned semanticsImmediate = builder.getConstantScalar(semanticsId) | builder.getConstantScalar(semanticsId2); + if (semanticsImmediate & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + + if (glslangIntermediate->usingVulkanMemoryModel() && builder.getConstantScalar(scopeId) == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + + std::vector spvAtomicOperands; // hold the spv operands + spvAtomicOperands.push_back(pointerId); + spvAtomicOperands.push_back(scopeId); + spvAtomicOperands.push_back(semanticsId); + if (opCode == spv::OpAtomicCompareExchange) { + spvAtomicOperands.push_back(semanticsId2); + spvAtomicOperands.push_back(valueId); + spvAtomicOperands.push_back(compareId); + } else if (opCode != spv::OpAtomicLoad && opCode != spv::OpAtomicIIncrement && opCode != spv::OpAtomicIDecrement) { + spvAtomicOperands.push_back(valueId); + } + + if (opCode == spv::OpAtomicStore) { + builder.createNoResultOp(opCode, spvAtomicOperands); + return 0; + } else { + spv::Id resultId = builder.createOp(opCode, typeId, spvAtomicOperands); + + // GLSL and HLSL atomic-counter decrement return post-decrement value, + // while SPIR-V returns pre-decrement value. Translate between these semantics. + if (op == glslang::EOpAtomicCounterDecrement) + resultId = builder.createBinOp(spv::OpISub, typeId, resultId, builder.makeIntConstant(1)); + + return resultId; + } +} + +// Create group invocation operations. +spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + spv::Op opCode = spv::OpNop; + std::vector spvGroupOperands; + spv::GroupOperation groupOperation = spv::GroupOperationMax; + + if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation || + op == glslang::EOpReadInvocation) { + builder.addExtension(spv::E_SPV_KHR_shader_ballot); + builder.addCapability(spv::CapabilitySubgroupBallotKHR); + } else if (op == glslang::EOpAnyInvocation || + op == glslang::EOpAllInvocations || + op == glslang::EOpAllInvocationsEqual) { + builder.addExtension(spv::E_SPV_KHR_subgroup_vote); + builder.addCapability(spv::CapabilitySubgroupVoteKHR); + } else { + builder.addCapability(spv::CapabilityGroups); + if (op == glslang::EOpMinInvocationsNonUniform || + op == glslang::EOpMaxInvocationsNonUniform || + op == glslang::EOpAddInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpAddInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform || + op == glslang::EOpAddInvocationsExclusiveScanNonUniform) + builder.addExtension(spv::E_SPV_AMD_shader_ballot); + + switch (op) { + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + groupOperation = spv::GroupOperationReduce; + break; + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + groupOperation = spv::GroupOperationInclusiveScan; + break; + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + groupOperation = spv::GroupOperationExclusiveScan; + break; + default: + break; + } + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + if (groupOperation != spv::GroupOperationMax) { + spv::IdImmediate groupOp = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOp); + } + } + + for (auto opIt = operands.begin(); opIt != operands.end(); ++opIt) { + spv::IdImmediate op = { true, *opIt }; + spvGroupOperands.push_back(op); + } + + switch (op) { + case glslang::EOpAnyInvocation: + opCode = spv::OpSubgroupAnyKHR; + break; + case glslang::EOpAllInvocations: + opCode = spv::OpSubgroupAllKHR; + break; + case glslang::EOpAllInvocationsEqual: + opCode = spv::OpSubgroupAllEqualKHR; + break; + case glslang::EOpReadInvocation: + opCode = spv::OpSubgroupReadInvocationKHR; + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + break; + case glslang::EOpReadFirstInvocation: + opCode = spv::OpSubgroupFirstInvocationKHR; + break; + case glslang::EOpBallot: + { + // NOTE: According to the spec, the result type of "OpSubgroupBallotKHR" must be a 4 component vector of 32 + // bit integer types. The GLSL built-in function "ballotARB()" assumes the maximum number of invocations in + // a subgroup is 64. Thus, we have to convert uvec4.xy to uint64_t as follow: + // + // result = Bitcast(SubgroupBallotKHR(Predicate).xy) + // + spv::Id uintType = builder.makeUintType(32); + spv::Id uvec4Type = builder.makeVectorType(uintType, 4); + spv::Id result = builder.createOp(spv::OpSubgroupBallotKHR, uvec4Type, spvGroupOperands); + + std::vector components; + components.push_back(builder.createCompositeExtract(result, uintType, 0)); + components.push_back(builder.createCompositeExtract(result, uintType, 1)); + + spv::Id uvec2Type = builder.makeVectorType(uintType, 2); + return builder.createUnaryOp(spv::OpBitcast, typeId, + builder.createCompositeConstruct(uvec2Type, components)); + } + + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + if (op == glslang::EOpMinInvocations || + op == glslang::EOpMinInvocationsInclusiveScan || + op == glslang::EOpMinInvocationsExclusiveScan) { + if (isFloat) + opCode = spv::OpGroupFMin; + else { + if (isUnsigned) + opCode = spv::OpGroupUMin; + else + opCode = spv::OpGroupSMin; + } + } else if (op == glslang::EOpMaxInvocations || + op == glslang::EOpMaxInvocationsInclusiveScan || + op == glslang::EOpMaxInvocationsExclusiveScan) { + if (isFloat) + opCode = spv::OpGroupFMax; + else { + if (isUnsigned) + opCode = spv::OpGroupUMax; + else + opCode = spv::OpGroupSMax; + } + } else { + if (isFloat) + opCode = spv::OpGroupFAdd; + else + opCode = spv::OpGroupIAdd; + } + + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + + break; + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + if (op == glslang::EOpMinInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform) { + if (isFloat) + opCode = spv::OpGroupFMinNonUniformAMD; + else { + if (isUnsigned) + opCode = spv::OpGroupUMinNonUniformAMD; + else + opCode = spv::OpGroupSMinNonUniformAMD; + } + } + else if (op == glslang::EOpMaxInvocationsNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform) { + if (isFloat) + opCode = spv::OpGroupFMaxNonUniformAMD; + else { + if (isUnsigned) + opCode = spv::OpGroupUMaxNonUniformAMD; + else + opCode = spv::OpGroupSMaxNonUniformAMD; + } + } + else { + if (isFloat) + opCode = spv::OpGroupFAddNonUniformAMD; + else + opCode = spv::OpGroupIAddNonUniformAMD; + } + + if (builder.isVectorType(typeId)) + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); + + break; + default: + logger->missingFunctionality("invocation operation"); + return spv::NoResult; + } + + assert(opCode != spv::OpNop); + return builder.createOp(opCode, typeId, spvGroupOperands); +} + +// Create group invocation operations on a vector +spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, + spv::Id typeId, std::vector& operands) +{ + assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin || + op == spv::OpGroupFMax || op == spv::OpGroupUMax || op == spv::OpGroupSMax || + op == spv::OpGroupFAdd || op == spv::OpGroupIAdd || op == spv::OpGroupBroadcast || + op == spv::OpSubgroupReadInvocationKHR || + op == spv::OpGroupFMinNonUniformAMD || op == spv::OpGroupUMinNonUniformAMD || + op == spv::OpGroupSMinNonUniformAMD || + op == spv::OpGroupFMaxNonUniformAMD || op == spv::OpGroupUMaxNonUniformAMD || + op == spv::OpGroupSMaxNonUniformAMD || + op == spv::OpGroupFAddNonUniformAMD || op == spv::OpGroupIAddNonUniformAMD); + + // Handle group invocation operations scalar by scalar. + // The result type is the same type as the original type. + // The algorithm is to: + // - break the vector into scalars + // - apply the operation to each scalar + // - make a vector out the scalar results + + // get the types sorted out + int numComponents = builder.getNumComponents(operands[0]); + spv::Id scalarType = builder.getScalarTypeId(builder.getTypeId(operands[0])); + std::vector results; + + // do each scalar op + for (int comp = 0; comp < numComponents; ++comp) { + std::vector indexes; + indexes.push_back(comp); + spv::IdImmediate scalar = { true, builder.createCompositeExtract(operands[0], scalarType, indexes) }; + std::vector spvGroupOperands; + if (op == spv::OpSubgroupReadInvocationKHR) { + spvGroupOperands.push_back(scalar); + spv::IdImmediate operand = { true, operands[1] }; + spvGroupOperands.push_back(operand); + } else if (op == spv::OpGroupBroadcast) { + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + spvGroupOperands.push_back(scalar); + spv::IdImmediate operand = { true, operands[1] }; + spvGroupOperands.push_back(operand); + } else { + spv::IdImmediate scope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(scope); + spv::IdImmediate groupOp = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOp); + spvGroupOperands.push_back(scalar); + } + + results.push_back(builder.createOp(op, scalarType, spvGroupOperands)); + } + + // put the pieces together + return builder.createCompositeConstruct(typeId, results); +} + +// Create subgroup invocation operations. +spv::Id TGlslangToSpvTraverser::createSubgroupOperation(glslang::TOperator op, spv::Id typeId, + std::vector& operands, glslang::TBasicType typeProxy) +{ + // Add the required capabilities. + switch (op) { + case glslang::EOpSubgroupElect: + builder.addCapability(spv::CapabilityGroupNonUniform); + break; + case glslang::EOpSubgroupAll: + case glslang::EOpSubgroupAny: + case glslang::EOpSubgroupAllEqual: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformVote); + break; + case glslang::EOpSubgroupBroadcast: + case glslang::EOpSubgroupBroadcastFirst: + case glslang::EOpSubgroupBallot: + case glslang::EOpSubgroupInverseBallot: + case glslang::EOpSubgroupBallotBitExtract: + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupBallotFindLSB: + case glslang::EOpSubgroupBallotFindMSB: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformBallot); + break; + case glslang::EOpSubgroupShuffle: + case glslang::EOpSubgroupShuffleXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformShuffle); + break; + case glslang::EOpSubgroupShuffleUp: + case glslang::EOpSubgroupShuffleDown: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformShuffleRelative); + break; + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformArithmetic); + break; + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformClustered); + break; + case glslang::EOpSubgroupQuadBroadcast: + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: + builder.addCapability(spv::CapabilityGroupNonUniform); + builder.addCapability(spv::CapabilityGroupNonUniformQuad); + break; + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + builder.addExtension(spv::E_SPV_NV_shader_subgroup_partitioned); + builder.addCapability(spv::CapabilityGroupNonUniformPartitionedNV); + break; + default: assert(0 && "Unhandled subgroup operation!"); + } + + + const bool isUnsigned = isTypeUnsignedInt(typeProxy); + const bool isFloat = isTypeFloat(typeProxy); + const bool isBool = typeProxy == glslang::EbtBool; + + spv::Op opCode = spv::OpNop; + + // Figure out which opcode to use. + switch (op) { + case glslang::EOpSubgroupElect: opCode = spv::OpGroupNonUniformElect; break; + case glslang::EOpSubgroupAll: opCode = spv::OpGroupNonUniformAll; break; + case glslang::EOpSubgroupAny: opCode = spv::OpGroupNonUniformAny; break; + case glslang::EOpSubgroupAllEqual: opCode = spv::OpGroupNonUniformAllEqual; break; + case glslang::EOpSubgroupBroadcast: opCode = spv::OpGroupNonUniformBroadcast; break; + case glslang::EOpSubgroupBroadcastFirst: opCode = spv::OpGroupNonUniformBroadcastFirst; break; + case glslang::EOpSubgroupBallot: opCode = spv::OpGroupNonUniformBallot; break; + case glslang::EOpSubgroupInverseBallot: opCode = spv::OpGroupNonUniformInverseBallot; break; + case glslang::EOpSubgroupBallotBitExtract: opCode = spv::OpGroupNonUniformBallotBitExtract; break; + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupBallotExclusiveBitCount: opCode = spv::OpGroupNonUniformBallotBitCount; break; + case glslang::EOpSubgroupBallotFindLSB: opCode = spv::OpGroupNonUniformBallotFindLSB; break; + case glslang::EOpSubgroupBallotFindMSB: opCode = spv::OpGroupNonUniformBallotFindMSB; break; + case glslang::EOpSubgroupShuffle: opCode = spv::OpGroupNonUniformShuffle; break; + case glslang::EOpSubgroupShuffleXor: opCode = spv::OpGroupNonUniformShuffleXor; break; + case glslang::EOpSubgroupShuffleUp: opCode = spv::OpGroupNonUniformShuffleUp; break; + case glslang::EOpSubgroupShuffleDown: opCode = spv::OpGroupNonUniformShuffleDown; break; + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + if (isFloat) { + opCode = spv::OpGroupNonUniformFAdd; + } else { + opCode = spv::OpGroupNonUniformIAdd; + } + break; + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMul: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMul; + } else { + opCode = spv::OpGroupNonUniformIMul; + } + break; + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMin: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMin; + } else if (isUnsigned) { + opCode = spv::OpGroupNonUniformUMin; + } else { + opCode = spv::OpGroupNonUniformSMin; + } + break; + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveMax: + if (isFloat) { + opCode = spv::OpGroupNonUniformFMax; + } else if (isUnsigned) { + opCode = spv::OpGroupNonUniformUMax; + } else { + opCode = spv::OpGroupNonUniformSMax; + } + break; + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalAnd; + } else { + opCode = spv::OpGroupNonUniformBitwiseAnd; + } + break; + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveOr: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalOr; + } else { + opCode = spv::OpGroupNonUniformBitwiseOr; + } + break; + case glslang::EOpSubgroupXor: + case glslang::EOpSubgroupInclusiveXor: + case glslang::EOpSubgroupExclusiveXor: + case glslang::EOpSubgroupClusteredXor: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveXor: + if (isBool) { + opCode = spv::OpGroupNonUniformLogicalXor; + } else { + opCode = spv::OpGroupNonUniformBitwiseXor; + } + break; + case glslang::EOpSubgroupQuadBroadcast: opCode = spv::OpGroupNonUniformQuadBroadcast; break; + case glslang::EOpSubgroupQuadSwapHorizontal: + case glslang::EOpSubgroupQuadSwapVertical: + case glslang::EOpSubgroupQuadSwapDiagonal: opCode = spv::OpGroupNonUniformQuadSwap; break; + default: assert(0 && "Unhandled subgroup operation!"); + } + + // get the right Group Operation + spv::GroupOperation groupOperation = spv::GroupOperationMax; + switch (op) { + default: + break; + case glslang::EOpSubgroupBallotBitCount: + case glslang::EOpSubgroupAdd: + case glslang::EOpSubgroupMul: + case glslang::EOpSubgroupMin: + case glslang::EOpSubgroupMax: + case glslang::EOpSubgroupAnd: + case glslang::EOpSubgroupOr: + case glslang::EOpSubgroupXor: + groupOperation = spv::GroupOperationReduce; + break; + case glslang::EOpSubgroupBallotInclusiveBitCount: + case glslang::EOpSubgroupInclusiveAdd: + case glslang::EOpSubgroupInclusiveMul: + case glslang::EOpSubgroupInclusiveMin: + case glslang::EOpSubgroupInclusiveMax: + case glslang::EOpSubgroupInclusiveAnd: + case glslang::EOpSubgroupInclusiveOr: + case glslang::EOpSubgroupInclusiveXor: + groupOperation = spv::GroupOperationInclusiveScan; + break; + case glslang::EOpSubgroupBallotExclusiveBitCount: + case glslang::EOpSubgroupExclusiveAdd: + case glslang::EOpSubgroupExclusiveMul: + case glslang::EOpSubgroupExclusiveMin: + case glslang::EOpSubgroupExclusiveMax: + case glslang::EOpSubgroupExclusiveAnd: + case glslang::EOpSubgroupExclusiveOr: + case glslang::EOpSubgroupExclusiveXor: + groupOperation = spv::GroupOperationExclusiveScan; + break; + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + groupOperation = spv::GroupOperationClusteredReduce; + break; + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + groupOperation = spv::GroupOperationPartitionedReduceNV; + break; + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + groupOperation = spv::GroupOperationPartitionedInclusiveScanNV; + break; + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + groupOperation = spv::GroupOperationPartitionedExclusiveScanNV; + break; + } + + // build the instruction + std::vector spvGroupOperands; + + // Every operation begins with the Execution Scope operand. + spv::IdImmediate executionScope = { true, builder.makeUintConstant(spv::ScopeSubgroup) }; + spvGroupOperands.push_back(executionScope); + + // Next, for all operations that use a Group Operation, push that as an operand. + if (groupOperation != spv::GroupOperationMax) { + spv::IdImmediate groupOperand = { false, (unsigned)groupOperation }; + spvGroupOperands.push_back(groupOperand); + } + + // Push back the operands next. + for (auto opIt = operands.cbegin(); opIt != operands.cend(); ++opIt) { + spv::IdImmediate operand = { true, *opIt }; + spvGroupOperands.push_back(operand); + } + + // Some opcodes have additional operands. + spv::Id directionId = spv::NoResult; + switch (op) { + default: break; + case glslang::EOpSubgroupQuadSwapHorizontal: directionId = builder.makeUintConstant(0); break; + case glslang::EOpSubgroupQuadSwapVertical: directionId = builder.makeUintConstant(1); break; + case glslang::EOpSubgroupQuadSwapDiagonal: directionId = builder.makeUintConstant(2); break; + } + if (directionId != spv::NoResult) { + spv::IdImmediate direction = { true, directionId }; + spvGroupOperands.push_back(direction); + } + + return builder.createOp(opCode, typeId, spvGroupOperands); +} + +spv::Id TGlslangToSpvTraverser::createMiscOperation(glslang::TOperator op, spv::Decoration precision, + spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy) +{ + bool isUnsigned = isTypeUnsignedInt(typeProxy); + bool isFloat = isTypeFloat(typeProxy); + + spv::Op opCode = spv::OpNop; + int extBuiltins = -1; + int libCall = -1; + size_t consumedOperands = operands.size(); + spv::Id typeId0 = 0; + if (consumedOperands > 0) + typeId0 = builder.getTypeId(operands[0]); + spv::Id typeId1 = 0; + if (consumedOperands > 1) + typeId1 = builder.getTypeId(operands[1]); + spv::Id frexpIntType = 0; + + switch (op) { + case glslang::EOpMin: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NMin : spv::GLSLstd450FMin; + else if (isUnsigned) + libCall = spv::GLSLstd450UMin; + else + libCall = spv::GLSLstd450SMin; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpModf: + libCall = spv::GLSLstd450Modf; + break; + case glslang::EOpMax: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NMax : spv::GLSLstd450FMax; + else if (isUnsigned) + libCall = spv::GLSLstd450UMax; + else + libCall = spv::GLSLstd450SMax; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpPow: + libCall = spv::GLSLstd450Pow; + break; + case glslang::EOpDot: + opCode = spv::OpDot; + break; + case glslang::EOpAtan: + libCall = spv::GLSLstd450Atan2; + break; + + case glslang::EOpClamp: + if (isFloat) + libCall = nanMinMaxClamp ? spv::GLSLstd450NClamp : spv::GLSLstd450FClamp; + else if (isUnsigned) + libCall = spv::GLSLstd450UClamp; + else + libCall = spv::GLSLstd450SClamp; + builder.promoteScalar(precision, operands.front(), operands[1]); + builder.promoteScalar(precision, operands.front(), operands[2]); + break; + case glslang::EOpMix: + if (! builder.isBoolType(builder.getScalarTypeId(builder.getTypeId(operands.back())))) { + assert(isFloat); + libCall = spv::GLSLstd450FMix; + } else { + opCode = spv::OpSelect; + std::swap(operands.front(), operands.back()); + } + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpStep: + libCall = spv::GLSLstd450Step; + builder.promoteScalar(precision, operands.front(), operands.back()); + break; + case glslang::EOpSmoothStep: + libCall = spv::GLSLstd450SmoothStep; + builder.promoteScalar(precision, operands[0], operands[2]); + builder.promoteScalar(precision, operands[1], operands[2]); + break; + + case glslang::EOpDistance: + libCall = spv::GLSLstd450Distance; + break; + case glslang::EOpCross: + libCall = spv::GLSLstd450Cross; + break; + case glslang::EOpFaceForward: + libCall = spv::GLSLstd450FaceForward; + break; + case glslang::EOpReflect: + libCall = spv::GLSLstd450Reflect; + break; + case glslang::EOpRefract: + libCall = spv::GLSLstd450Refract; + break; + case glslang::EOpBarrier: + { + // This is for the extended controlBarrier function, with four operands. + // The unextended barrier() goes through createNoArgOperation. + assert(operands.size() == 4); + unsigned int executionScope = builder.getConstantScalar(operands[0]); + unsigned int memoryScope = builder.getConstantScalar(operands[1]); + unsigned int semantics = builder.getConstantScalar(operands[2]) | builder.getConstantScalar(operands[3]); + builder.createControlBarrier((spv::Scope)executionScope, (spv::Scope)memoryScope, + (spv::MemorySemanticsMask)semantics); + if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + if (glslangIntermediate->usingVulkanMemoryModel() && (executionScope == spv::ScopeDevice || + memoryScope == spv::ScopeDevice)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + return 0; + } + break; + case glslang::EOpMemoryBarrier: + { + // This is for the extended memoryBarrier function, with three operands. + // The unextended memoryBarrier() goes through createNoArgOperation. + assert(operands.size() == 3); + unsigned int memoryScope = builder.getConstantScalar(operands[0]); + unsigned int semantics = builder.getConstantScalar(operands[1]) | builder.getConstantScalar(operands[2]); + builder.createMemoryBarrier((spv::Scope)memoryScope, (spv::MemorySemanticsMask)semantics); + if (semantics & (spv::MemorySemanticsMakeAvailableKHRMask | + spv::MemorySemanticsMakeVisibleKHRMask | + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsVolatileMask)) { + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } + if (glslangIntermediate->usingVulkanMemoryModel() && memoryScope == spv::ScopeDevice) { + builder.addCapability(spv::CapabilityVulkanMemoryModelDeviceScopeKHR); + } + return 0; + } + break; + +#ifndef GLSLANG_WEB + case glslang::EOpInterpolateAtSample: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtSample; + break; + case glslang::EOpInterpolateAtOffset: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + libCall = spv::GLSLstd450InterpolateAtOffset; + break; + case glslang::EOpAddCarry: + opCode = spv::OpIAddCarry; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpSubBorrow: + opCode = spv::OpISubBorrow; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpUMulExtended: + opCode = spv::OpUMulExtended; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpIMulExtended: + opCode = spv::OpSMulExtended; + typeId = builder.makeStructResultType(typeId0, typeId0); + consumedOperands = 2; + break; + case glslang::EOpBitfieldExtract: + if (isUnsigned) + opCode = spv::OpBitFieldUExtract; + else + opCode = spv::OpBitFieldSExtract; + break; + case glslang::EOpBitfieldInsert: + opCode = spv::OpBitFieldInsert; + break; + + case glslang::EOpFma: + libCall = spv::GLSLstd450Fma; + break; + case glslang::EOpFrexp: + { + libCall = spv::GLSLstd450FrexpStruct; + assert(builder.isPointerType(typeId1)); + typeId1 = builder.getContainedTypeId(typeId1); + int width = builder.getScalarTypeWidth(typeId1); + if (width == 16) + // Using 16-bit exp operand, enable extension SPV_AMD_gpu_shader_int16 + builder.addExtension(spv::E_SPV_AMD_gpu_shader_int16); + if (builder.getNumComponents(operands[0]) == 1) + frexpIntType = builder.makeIntegerType(width, true); + else + frexpIntType = builder.makeVectorType(builder.makeIntegerType(width, true), + builder.getNumComponents(operands[0])); + typeId = builder.makeStructResultType(typeId0, frexpIntType); + consumedOperands = 1; + } + break; + case glslang::EOpLdexp: + libCall = spv::GLSLstd450Ldexp; + break; + + case glslang::EOpReadInvocation: + return createInvocationsOperation(op, typeId, operands, typeProxy); + + case glslang::EOpSubgroupBroadcast: + case glslang::EOpSubgroupBallotBitExtract: + case glslang::EOpSubgroupShuffle: + case glslang::EOpSubgroupShuffleXor: + case glslang::EOpSubgroupShuffleUp: + case glslang::EOpSubgroupShuffleDown: + case glslang::EOpSubgroupClusteredAdd: + case glslang::EOpSubgroupClusteredMul: + case glslang::EOpSubgroupClusteredMin: + case glslang::EOpSubgroupClusteredMax: + case glslang::EOpSubgroupClusteredAnd: + case glslang::EOpSubgroupClusteredOr: + case glslang::EOpSubgroupClusteredXor: + case glslang::EOpSubgroupQuadBroadcast: + case glslang::EOpSubgroupPartitionedAdd: + case glslang::EOpSubgroupPartitionedMul: + case glslang::EOpSubgroupPartitionedMin: + case glslang::EOpSubgroupPartitionedMax: + case glslang::EOpSubgroupPartitionedAnd: + case glslang::EOpSubgroupPartitionedOr: + case glslang::EOpSubgroupPartitionedXor: + case glslang::EOpSubgroupPartitionedInclusiveAdd: + case glslang::EOpSubgroupPartitionedInclusiveMul: + case glslang::EOpSubgroupPartitionedInclusiveMin: + case glslang::EOpSubgroupPartitionedInclusiveMax: + case glslang::EOpSubgroupPartitionedInclusiveAnd: + case glslang::EOpSubgroupPartitionedInclusiveOr: + case glslang::EOpSubgroupPartitionedInclusiveXor: + case glslang::EOpSubgroupPartitionedExclusiveAdd: + case glslang::EOpSubgroupPartitionedExclusiveMul: + case glslang::EOpSubgroupPartitionedExclusiveMin: + case glslang::EOpSubgroupPartitionedExclusiveMax: + case glslang::EOpSubgroupPartitionedExclusiveAnd: + case glslang::EOpSubgroupPartitionedExclusiveOr: + case glslang::EOpSubgroupPartitionedExclusiveXor: + return createSubgroupOperation(op, typeId, operands, typeProxy); + + case glslang::EOpSwizzleInvocations: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::SwizzleInvocationsAMD; + break; + case glslang::EOpSwizzleInvocationsMasked: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::SwizzleInvocationsMaskedAMD; + break; + case glslang::EOpWriteInvocation: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_ballot); + libCall = spv::WriteInvocationAMD; + break; + + case glslang::EOpMin3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMin3AMD; + else { + if (isUnsigned) + libCall = spv::UMin3AMD; + else + libCall = spv::SMin3AMD; + } + break; + case glslang::EOpMax3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMax3AMD; + else { + if (isUnsigned) + libCall = spv::UMax3AMD; + else + libCall = spv::SMax3AMD; + } + break; + case glslang::EOpMid3: + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_trinary_minmax); + if (isFloat) + libCall = spv::FMid3AMD; + else { + if (isUnsigned) + libCall = spv::UMid3AMD; + else + libCall = spv::SMid3AMD; + } + break; + + case glslang::EOpInterpolateAtVertex: + if (typeProxy == glslang::EbtFloat16) + builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + extBuiltins = getExtBuiltins(spv::E_SPV_AMD_shader_explicit_vertex_parameter); + libCall = spv::InterpolateAtVertexAMD; + break; + + case glslang::EOpReportIntersection: + typeId = builder.makeBoolType(); + opCode = spv::OpReportIntersectionKHR; + break; + case glslang::EOpTrace: + builder.createNoResultOp(spv::OpTraceRayKHR, operands); + return 0; + case glslang::EOpExecuteCallable: + builder.createNoResultOp(spv::OpExecuteCallableKHR, operands); + return 0; + + case glslang::EOpRayQueryInitialize: + builder.createNoResultOp(spv::OpRayQueryInitializeKHR, operands); + return 0; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR, operands); + return 0; + case glslang::EOpRayQueryGenerateIntersection: + builder.createNoResultOp(spv::OpRayQueryGenerateIntersectionKHR, operands); + return 0; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR, operands); + return 0; + case glslang::EOpRayQueryProceed: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryProceedKHR; + break; + case glslang::EOpRayQueryGetIntersectionType: + typeId = builder.makeUintType(32); + opCode = spv::OpRayQueryGetIntersectionTypeKHR; + break; + case glslang::EOpRayQueryGetRayTMin: + typeId = builder.makeFloatType(32); + opCode = spv::OpRayQueryGetRayTMinKHR; + break; + case glslang::EOpRayQueryGetRayFlags: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetRayFlagsKHR; + break; + case glslang::EOpRayQueryGetIntersectionT: + typeId = builder.makeFloatType(32); + opCode = spv::OpRayQueryGetIntersectionTKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceCustomIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceCustomIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceId: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceIdKHR; + break; + case glslang::EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR; + break; + case glslang::EOpRayQueryGetIntersectionGeometryIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionGeometryIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionPrimitiveIndex: + typeId = builder.makeIntType(32); + opCode = spv::OpRayQueryGetIntersectionPrimitiveIndexKHR; + break; + case glslang::EOpRayQueryGetIntersectionBarycentrics: + typeId = builder.makeVectorType(builder.makeFloatType(32), 2); + opCode = spv::OpRayQueryGetIntersectionBarycentricsKHR; + break; + case glslang::EOpRayQueryGetIntersectionFrontFace: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryGetIntersectionFrontFaceKHR; + break; + case glslang::EOpRayQueryGetIntersectionCandidateAABBOpaque: + typeId = builder.makeBoolType(); + opCode = spv::OpRayQueryGetIntersectionCandidateAABBOpaqueKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectRayDirection: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetIntersectionObjectRayDirectionKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectRayOrigin: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetIntersectionObjectRayOriginKHR; + break; + case glslang::EOpRayQueryGetWorldRayDirection: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetWorldRayDirectionKHR; + break; + case glslang::EOpRayQueryGetWorldRayOrigin: + typeId = builder.makeVectorType(builder.makeFloatType(32), 3); + opCode = spv::OpRayQueryGetWorldRayOriginKHR; + break; + case glslang::EOpRayQueryGetIntersectionObjectToWorld: + typeId = builder.makeMatrixType(builder.makeFloatType(32), 4, 3); + opCode = spv::OpRayQueryGetIntersectionObjectToWorldKHR; + break; + case glslang::EOpRayQueryGetIntersectionWorldToObject: + typeId = builder.makeMatrixType(builder.makeFloatType(32), 4, 3); + opCode = spv::OpRayQueryGetIntersectionWorldToObjectKHR; + break; + case glslang::EOpWritePackedPrimitiveIndices4x8NV: + builder.createNoResultOp(spv::OpWritePackedPrimitiveIndices4x8NV, operands); + return 0; + case glslang::EOpCooperativeMatrixMulAdd: + opCode = spv::OpCooperativeMatrixMulAddNV; + break; +#endif // GLSLANG_WEB + default: + return 0; + } + + spv::Id id = 0; + if (libCall >= 0) { + // Use an extended instruction from the standard library. + // Construct the call arguments, without modifying the original operands vector. + // We might need the remaining arguments, e.g. in the EOpFrexp case. + std::vector callArguments(operands.begin(), operands.begin() + consumedOperands); + id = builder.createBuiltinCall(typeId, extBuiltins >= 0 ? extBuiltins : stdBuiltins, libCall, callArguments); + } else if (opCode == spv::OpDot && !isFloat) { + // int dot(int, int) + // NOTE: never called for scalar/vector1, this is turned into simple mul before this can be reached + const int componentCount = builder.getNumComponents(operands[0]); + spv::Id mulOp = builder.createBinOp(spv::OpIMul, builder.getTypeId(operands[0]), operands[0], operands[1]); + builder.setPrecision(mulOp, precision); + id = builder.createCompositeExtract(mulOp, typeId, 0); + for (int i = 1; i < componentCount; ++i) { + builder.setPrecision(id, precision); + id = builder.createBinOp(spv::OpIAdd, typeId, id, builder.createCompositeExtract(mulOp, typeId, i)); + } + } else { + switch (consumedOperands) { + case 0: + // should all be handled by visitAggregate and createNoArgOperation + assert(0); + return 0; + case 1: + // should all be handled by createUnaryOperation + assert(0); + return 0; + case 2: + id = builder.createBinOp(opCode, typeId, operands[0], operands[1]); + break; + default: + // anything 3 or over doesn't have l-value operands, so all should be consumed + assert(consumedOperands == operands.size()); + id = builder.createOp(opCode, typeId, operands); + break; + } + } + +#ifndef GLSLANG_WEB + // Decode the return types that were structures + switch (op) { + case glslang::EOpAddCarry: + case glslang::EOpSubBorrow: + builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]); + id = builder.createCompositeExtract(id, typeId0, 0); + break; + case glslang::EOpUMulExtended: + case glslang::EOpIMulExtended: + builder.createStore(builder.createCompositeExtract(id, typeId0, 0), operands[3]); + builder.createStore(builder.createCompositeExtract(id, typeId0, 1), operands[2]); + break; + case glslang::EOpFrexp: + { + assert(operands.size() == 2); + if (builder.isFloatType(builder.getScalarTypeId(typeId1))) { + // "exp" is floating-point type (from HLSL intrinsic) + spv::Id member1 = builder.createCompositeExtract(id, frexpIntType, 1); + member1 = builder.createUnaryOp(spv::OpConvertSToF, typeId1, member1); + builder.createStore(member1, operands[1]); + } else + // "exp" is integer type (from GLSL built-in function) + builder.createStore(builder.createCompositeExtract(id, frexpIntType, 1), operands[1]); + id = builder.createCompositeExtract(id, typeId0, 0); + } + break; + default: + break; + } +#endif + + return builder.setPrecision(id, precision); +} + +// Intrinsics with no arguments (or no return value, and no precision). +spv::Id TGlslangToSpvTraverser::createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId) +{ + // GLSL memory barriers use queuefamily scope in new model, device scope in old model + spv::Scope memoryBarrierScope = glslangIntermediate->usingVulkanMemoryModel() ? + spv::ScopeQueueFamilyKHR : spv::ScopeDevice; + + switch (op) { + case glslang::EOpBarrier: + if (glslangIntermediate->getStage() == EShLangTessControl) { + if (glslangIntermediate->usingVulkanMemoryModel()) { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsOutputMemoryKHRMask | + spv::MemorySemanticsAcquireReleaseMask); + builder.addCapability(spv::CapabilityVulkanMemoryModelKHR); + } else { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeInvocation, spv::MemorySemanticsMaskNone); + } + } else { + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + } + return 0; + case glslang::EOpMemoryBarrier: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierBuffer: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierShared: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpGroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; +#ifndef GLSLANG_WEB + case glslang::EOpMemoryBarrierAtomicCounter: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsAtomicCounterMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpMemoryBarrierImage: + builder.createMemoryBarrier(memoryBarrierScope, spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpAllMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, + spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpDeviceMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpDeviceMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeDevice, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpWorkgroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeWorkgroup, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpWorkgroupMemoryBarrierWithGroupSync: + builder.createControlBarrier(spv::ScopeWorkgroup, spv::ScopeWorkgroup, + spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return 0; + case glslang::EOpSubgroupBarrier: + builder.createControlBarrier(spv::ScopeSubgroup, spv::ScopeSubgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrier: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsAllMemory | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierBuffer: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsUniformMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierImage: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsImageMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + case glslang::EOpSubgroupMemoryBarrierShared: + builder.createMemoryBarrier(spv::ScopeSubgroup, spv::MemorySemanticsWorkgroupMemoryMask | + spv::MemorySemanticsAcquireReleaseMask); + return spv::NoResult; + + case glslang::EOpEmitVertex: + builder.createNoResultOp(spv::OpEmitVertex); + return 0; + case glslang::EOpEndPrimitive: + builder.createNoResultOp(spv::OpEndPrimitive); + return 0; + + case glslang::EOpSubgroupElect: { + std::vector operands; + return createSubgroupOperation(op, typeId, operands, glslang::EbtVoid); + } + case glslang::EOpTime: + { + std::vector args; // Dummy arguments + spv::Id id = builder.createBuiltinCall(typeId, getExtBuiltins(spv::E_SPV_AMD_gcn_shader), spv::TimeAMD, args); + return builder.setPrecision(id, precision); + } + case glslang::EOpIgnoreIntersection: + builder.createNoResultOp(spv::OpIgnoreIntersectionKHR); + return 0; + case glslang::EOpTerminateRay: + builder.createNoResultOp(spv::OpTerminateRayKHR); + return 0; + case glslang::EOpRayQueryInitialize: + builder.createNoResultOp(spv::OpRayQueryInitializeKHR); + return 0; + case glslang::EOpRayQueryTerminate: + builder.createNoResultOp(spv::OpRayQueryTerminateKHR); + return 0; + case glslang::EOpRayQueryGenerateIntersection: + builder.createNoResultOp(spv::OpRayQueryGenerateIntersectionKHR); + return 0; + case glslang::EOpRayQueryConfirmIntersection: + builder.createNoResultOp(spv::OpRayQueryConfirmIntersectionKHR); + return 0; + case glslang::EOpBeginInvocationInterlock: + builder.createNoResultOp(spv::OpBeginInvocationInterlockEXT); + return 0; + case glslang::EOpEndInvocationInterlock: + builder.createNoResultOp(spv::OpEndInvocationInterlockEXT); + return 0; + + case glslang::EOpIsHelperInvocation: + { + std::vector args; // Dummy arguments + builder.addExtension(spv::E_SPV_EXT_demote_to_helper_invocation); + builder.addCapability(spv::CapabilityDemoteToHelperInvocationEXT); + return builder.createOp(spv::OpIsHelperInvocationEXT, typeId, args); + } + + case glslang::EOpReadClockSubgroupKHR: { + std::vector args; + args.push_back(builder.makeUintConstant(spv::ScopeSubgroup)); + builder.addExtension(spv::E_SPV_KHR_shader_clock); + builder.addCapability(spv::CapabilityShaderClockKHR); + return builder.createOp(spv::OpReadClockKHR, typeId, args); + } + + case glslang::EOpReadClockDeviceKHR: { + std::vector args; + args.push_back(builder.makeUintConstant(spv::ScopeDevice)); + builder.addExtension(spv::E_SPV_KHR_shader_clock); + builder.addCapability(spv::CapabilityShaderClockKHR); + return builder.createOp(spv::OpReadClockKHR, typeId, args); + } +#endif + default: + break; + } + + logger->missingFunctionality("unknown operation with no arguments"); + + return 0; +} + +spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol) +{ + auto iter = symbolValues.find(symbol->getId()); + spv::Id id; + if (symbolValues.end() != iter) { + id = iter->second; + return id; + } + + // it was not found, create it + spv::BuiltIn builtIn = TranslateBuiltInDecoration(symbol->getQualifier().builtIn, false); + auto forcedType = getForcedType(symbol->getQualifier().builtIn, symbol->getType()); + id = createSpvVariable(symbol, forcedType.first); + symbolValues[symbol->getId()] = id; + if (forcedType.second != spv::NoType) + forceType[id] = forcedType.second; + + if (symbol->getBasicType() != glslang::EbtBlock) { + builder.addDecoration(id, TranslatePrecisionDecoration(symbol->getType())); + builder.addDecoration(id, TranslateInterpolationDecoration(symbol->getType().getQualifier())); + builder.addDecoration(id, TranslateAuxiliaryStorageDecoration(symbol->getType().getQualifier())); +#ifndef GLSLANG_WEB + addMeshNVDecoration(id, /*member*/ -1, symbol->getType().getQualifier()); + if (symbol->getQualifier().hasComponent()) + builder.addDecoration(id, spv::DecorationComponent, symbol->getQualifier().layoutComponent); + if (symbol->getQualifier().hasIndex()) + builder.addDecoration(id, spv::DecorationIndex, symbol->getQualifier().layoutIndex); +#endif + if (symbol->getType().getQualifier().hasSpecConstantId()) + builder.addDecoration(id, spv::DecorationSpecId, symbol->getType().getQualifier().layoutSpecConstantId); + // atomic counters use this: + if (symbol->getQualifier().hasOffset()) + builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutOffset); + } + + if (symbol->getQualifier().hasLocation()) + builder.addDecoration(id, spv::DecorationLocation, symbol->getQualifier().layoutLocation); + builder.addDecoration(id, TranslateInvariantDecoration(symbol->getType().getQualifier())); + if (symbol->getQualifier().hasStream() && glslangIntermediate->isMultiStream()) { + builder.addCapability(spv::CapabilityGeometryStreams); + builder.addDecoration(id, spv::DecorationStream, symbol->getQualifier().layoutStream); + } + if (symbol->getQualifier().hasSet()) + builder.addDecoration(id, spv::DecorationDescriptorSet, symbol->getQualifier().layoutSet); + else if (IsDescriptorResource(symbol->getType())) { + // default to 0 + builder.addDecoration(id, spv::DecorationDescriptorSet, 0); + } + if (symbol->getQualifier().hasBinding()) + builder.addDecoration(id, spv::DecorationBinding, symbol->getQualifier().layoutBinding); + else if (IsDescriptorResource(symbol->getType())) { + // default to 0 + builder.addDecoration(id, spv::DecorationBinding, 0); + } + if (symbol->getQualifier().hasAttachment()) + builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment); + if (glslangIntermediate->getXfbMode()) { + builder.addCapability(spv::CapabilityTransformFeedback); + if (symbol->getQualifier().hasXfbBuffer()) { + builder.addDecoration(id, spv::DecorationXfbBuffer, symbol->getQualifier().layoutXfbBuffer); + unsigned stride = glslangIntermediate->getXfbStride(symbol->getQualifier().layoutXfbBuffer); + if (stride != glslang::TQualifier::layoutXfbStrideEnd) + builder.addDecoration(id, spv::DecorationXfbStride, stride); + } + if (symbol->getQualifier().hasXfbOffset()) + builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset); + } + + // add built-in variable decoration + if (builtIn != spv::BuiltInMax) { + builder.addDecoration(id, spv::DecorationBuiltIn, (int)builtIn); + } + +#ifndef GLSLANG_WEB + if (symbol->getType().isImage()) { + std::vector memory; + TranslateMemoryDecoration(symbol->getType().getQualifier(), memory, + glslangIntermediate->usingVulkanMemoryModel()); + for (unsigned int i = 0; i < memory.size(); ++i) + builder.addDecoration(id, memory[i]); + } + + // nonuniform + builder.addDecoration(id, TranslateNonUniformDecoration(symbol->getType().getQualifier())); + + if (builtIn == spv::BuiltInSampleMask) { + spv::Decoration decoration; + // GL_NV_sample_mask_override_coverage extension + if (glslangIntermediate->getLayoutOverrideCoverage()) + decoration = (spv::Decoration)spv::DecorationOverrideCoverageNV; + else + decoration = (spv::Decoration)spv::DecorationMax; + builder.addDecoration(id, decoration); + if (decoration != spv::DecorationMax) { + builder.addCapability(spv::CapabilitySampleMaskOverrideCoverageNV); + builder.addExtension(spv::E_SPV_NV_sample_mask_override_coverage); + } + } + else if (builtIn == spv::BuiltInLayer) { + // SPV_NV_viewport_array2 extension + if (symbol->getQualifier().layoutViewportRelative) { + builder.addDecoration(id, (spv::Decoration)spv::DecorationViewportRelativeNV); + builder.addCapability(spv::CapabilityShaderViewportMaskNV); + builder.addExtension(spv::E_SPV_NV_viewport_array2); + } + if (symbol->getQualifier().layoutSecondaryViewportRelativeOffset != -2048) { + builder.addDecoration(id, (spv::Decoration)spv::DecorationSecondaryViewportRelativeNV, + symbol->getQualifier().layoutSecondaryViewportRelativeOffset); + builder.addCapability(spv::CapabilityShaderStereoViewNV); + builder.addExtension(spv::E_SPV_NV_stereo_view_rendering); + } + } + + if (symbol->getQualifier().layoutPassthrough) { + builder.addDecoration(id, spv::DecorationPassthroughNV); + builder.addCapability(spv::CapabilityGeometryShaderPassthroughNV); + builder.addExtension(spv::E_SPV_NV_geometry_shader_passthrough); + } + if (symbol->getQualifier().pervertexNV) { + builder.addDecoration(id, spv::DecorationPerVertexNV); + builder.addCapability(spv::CapabilityFragmentBarycentricNV); + builder.addExtension(spv::E_SPV_NV_fragment_shader_barycentric); + } + + if (glslangIntermediate->getHlslFunctionality1() && symbol->getType().getQualifier().semanticName != nullptr) { + builder.addExtension("SPV_GOOGLE_hlsl_functionality1"); + builder.addDecoration(id, (spv::Decoration)spv::DecorationHlslSemanticGOOGLE, + symbol->getType().getQualifier().semanticName); + } + + if (symbol->isReference()) { + builder.addDecoration(id, symbol->getType().getQualifier().restrict ? + spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT); + } +#endif + + return id; +} + +#ifndef GLSLANG_WEB +// add per-primitive, per-view. per-task decorations to a struct member (member >= 0) or an object +void TGlslangToSpvTraverser::addMeshNVDecoration(spv::Id id, int member, const glslang::TQualifier& qualifier) +{ + if (member >= 0) { + if (qualifier.perPrimitiveNV) { + // Need to add capability/extension for fragment shader. + // Mesh shader already adds this by default. + if (glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + } + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerPrimitiveNV); + } + if (qualifier.perViewNV) + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerViewNV); + if (qualifier.perTaskNV) + builder.addMemberDecoration(id, (unsigned)member, spv::DecorationPerTaskNV); + } else { + if (qualifier.perPrimitiveNV) { + // Need to add capability/extension for fragment shader. + // Mesh shader already adds this by default. + if (glslangIntermediate->getStage() == EShLangFragment) { + builder.addCapability(spv::CapabilityMeshShadingNV); + builder.addExtension(spv::E_SPV_NV_mesh_shader); + } + builder.addDecoration(id, spv::DecorationPerPrimitiveNV); + } + if (qualifier.perViewNV) + builder.addDecoration(id, spv::DecorationPerViewNV); + if (qualifier.perTaskNV) + builder.addDecoration(id, spv::DecorationPerTaskNV); + } +} +#endif + +// Make a full tree of instructions to build a SPIR-V specialization constant, +// or regular constant if possible. +// +// TBD: this is not yet done, nor verified to be the best design, it does do the leaf symbols though +// +// Recursively walk the nodes. The nodes form a tree whose leaves are +// regular constants, which themselves are trees that createSpvConstant() +// recursively walks. So, this function walks the "top" of the tree: +// - emit specialization constant-building instructions for specConstant +// - when running into a non-spec-constant, switch to createSpvConstant() +spv::Id TGlslangToSpvTraverser::createSpvConstant(const glslang::TIntermTyped& node) +{ + assert(node.getQualifier().isConstant()); + + // Handle front-end constants first (non-specialization constants). + if (! node.getQualifier().specConstant) { + // hand off to the non-spec-constant path + assert(node.getAsConstantUnion() != nullptr || node.getAsSymbolNode() != nullptr); + int nextConst = 0; + return createSpvConstantFromConstUnionArray(node.getType(), node.getAsConstantUnion() ? + node.getAsConstantUnion()->getConstArray() : node.getAsSymbolNode()->getConstArray(), + nextConst, false); + } + + // We now know we have a specialization constant to build + + // Extra capabilities may be needed. + if (node.getType().contains8BitInt()) + builder.addCapability(spv::CapabilityInt8); + if (node.getType().contains16BitFloat()) + builder.addCapability(spv::CapabilityFloat16); + if (node.getType().contains16BitInt()) + builder.addCapability(spv::CapabilityInt16); + if (node.getType().contains64BitInt()) + builder.addCapability(spv::CapabilityInt64); + if (node.getType().containsDouble()) + builder.addCapability(spv::CapabilityFloat64); + + // gl_WorkGroupSize is a special case until the front-end handles hierarchical specialization constants, + // even then, it's specialization ids are handled by special case syntax in GLSL: layout(local_size_x = ... + if (node.getType().getQualifier().builtIn == glslang::EbvWorkGroupSize) { + std::vector dimConstId; + for (int dim = 0; dim < 3; ++dim) { + bool specConst = (glslangIntermediate->getLocalSizeSpecId(dim) != glslang::TQualifier::layoutNotSet); + dimConstId.push_back(builder.makeUintConstant(glslangIntermediate->getLocalSize(dim), specConst)); + if (specConst) { + builder.addDecoration(dimConstId.back(), spv::DecorationSpecId, + glslangIntermediate->getLocalSizeSpecId(dim)); + } + } + return builder.makeCompositeConstant(builder.makeVectorType(builder.makeUintType(32), 3), dimConstId, true); + } + + // An AST node labelled as specialization constant should be a symbol node. + // Its initializer should either be a sub tree with constant nodes, or a constant union array. + if (auto* sn = node.getAsSymbolNode()) { + spv::Id result; + if (auto* sub_tree = sn->getConstSubtree()) { + // Traverse the constant constructor sub tree like generating normal run-time instructions. + // During the AST traversal, if the node is marked as 'specConstant', SpecConstantOpModeGuard + // will set the builder into spec constant op instruction generating mode. + sub_tree->traverse(this); + result = accessChainLoad(sub_tree->getType()); + } else if (auto* const_union_array = &sn->getConstArray()) { + int nextConst = 0; + result = createSpvConstantFromConstUnionArray(sn->getType(), *const_union_array, nextConst, true); + } else { + logger->missingFunctionality("Invalid initializer for spec onstant."); + return spv::NoResult; + } + builder.addName(result, sn->getName().c_str()); + return result; + } + + // Neither a front-end constant node, nor a specialization constant node with constant union array or + // constant sub tree as initializer. + logger->missingFunctionality("Neither a front-end constant nor a spec constant."); + return spv::NoResult; +} + +// Use 'consts' as the flattened glslang source of scalar constants to recursively +// build the aggregate SPIR-V constant. +// +// If there are not enough elements present in 'consts', 0 will be substituted; +// an empty 'consts' can be used to create a fully zeroed SPIR-V constant. +// +spv::Id TGlslangToSpvTraverser::createSpvConstantFromConstUnionArray(const glslang::TType& glslangType, + const glslang::TConstUnionArray& consts, int& nextConst, bool specConstant) +{ + // vector of constants for SPIR-V + std::vector spvConsts; + + // Type is used for struct and array constants + spv::Id typeId = convertGlslangToSpvType(glslangType); + + if (glslangType.isArray()) { + glslang::TType elementType(glslangType, 0); + for (int i = 0; i < glslangType.getOuterArraySize(); ++i) + spvConsts.push_back(createSpvConstantFromConstUnionArray(elementType, consts, nextConst, false)); + } else if (glslangType.isMatrix()) { + glslang::TType vectorType(glslangType, 0); + for (int col = 0; col < glslangType.getMatrixCols(); ++col) + spvConsts.push_back(createSpvConstantFromConstUnionArray(vectorType, consts, nextConst, false)); + } else if (glslangType.isCoopMat()) { + glslang::TType componentType(glslangType.getBasicType()); + spvConsts.push_back(createSpvConstantFromConstUnionArray(componentType, consts, nextConst, false)); + } else if (glslangType.isStruct()) { + glslang::TVector::const_iterator iter; + for (iter = glslangType.getStruct()->begin(); iter != glslangType.getStruct()->end(); ++iter) + spvConsts.push_back(createSpvConstantFromConstUnionArray(*iter->type, consts, nextConst, false)); + } else if (glslangType.getVectorSize() > 1) { + for (unsigned int i = 0; i < (unsigned int)glslangType.getVectorSize(); ++i) { + bool zero = nextConst >= consts.size(); + switch (glslangType.getBasicType()) { + case glslang::EbtInt: + spvConsts.push_back(builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst())); + break; + case glslang::EbtUint: + spvConsts.push_back(builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst())); + break; + case glslang::EbtFloat: + spvConsts.push_back(builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst())); + break; + case glslang::EbtBool: + spvConsts.push_back(builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst())); + break; +#ifndef GLSLANG_WEB + case glslang::EbtInt8: + spvConsts.push_back(builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const())); + break; + case glslang::EbtUint8: + spvConsts.push_back(builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const())); + break; + case glslang::EbtInt16: + spvConsts.push_back(builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const())); + break; + case glslang::EbtUint16: + spvConsts.push_back(builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const())); + break; + case glslang::EbtInt64: + spvConsts.push_back(builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const())); + break; + case glslang::EbtUint64: + spvConsts.push_back(builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const())); + break; + case glslang::EbtDouble: + spvConsts.push_back(builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst())); + break; + case glslang::EbtFloat16: + spvConsts.push_back(builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst())); + break; +#endif + default: + assert(0); + break; + } + ++nextConst; + } + } else { + // we have a non-aggregate (scalar) constant + bool zero = nextConst >= consts.size(); + spv::Id scalar = 0; + switch (glslangType.getBasicType()) { + case glslang::EbtInt: + scalar = builder.makeIntConstant(zero ? 0 : consts[nextConst].getIConst(), specConstant); + break; + case glslang::EbtUint: + scalar = builder.makeUintConstant(zero ? 0 : consts[nextConst].getUConst(), specConstant); + break; + case glslang::EbtFloat: + scalar = builder.makeFloatConstant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtBool: + scalar = builder.makeBoolConstant(zero ? false : consts[nextConst].getBConst(), specConstant); + break; +#ifndef GLSLANG_WEB + case glslang::EbtInt8: + scalar = builder.makeInt8Constant(zero ? 0 : consts[nextConst].getI8Const(), specConstant); + break; + case glslang::EbtUint8: + scalar = builder.makeUint8Constant(zero ? 0 : consts[nextConst].getU8Const(), specConstant); + break; + case glslang::EbtInt16: + scalar = builder.makeInt16Constant(zero ? 0 : consts[nextConst].getI16Const(), specConstant); + break; + case glslang::EbtUint16: + scalar = builder.makeUint16Constant(zero ? 0 : consts[nextConst].getU16Const(), specConstant); + break; + case glslang::EbtInt64: + scalar = builder.makeInt64Constant(zero ? 0 : consts[nextConst].getI64Const(), specConstant); + break; + case glslang::EbtUint64: + scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant); + break; + case glslang::EbtDouble: + scalar = builder.makeDoubleConstant(zero ? 0.0 : consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtFloat16: + scalar = builder.makeFloat16Constant(zero ? 0.0F : (float)consts[nextConst].getDConst(), specConstant); + break; + case glslang::EbtReference: + scalar = builder.makeUint64Constant(zero ? 0 : consts[nextConst].getU64Const(), specConstant); + scalar = builder.createUnaryOp(spv::OpBitcast, typeId, scalar); + break; +#endif + case glslang::EbtString: + scalar = builder.getStringId(consts[nextConst].getSConst()->c_str()); + break; + default: + assert(0); + break; + } + ++nextConst; + return scalar; + } + + return builder.makeCompositeConstant(typeId, spvConsts); +} + +// Return true if the node is a constant or symbol whose reading has no +// non-trivial observable cost or effect. +bool TGlslangToSpvTraverser::isTrivialLeaf(const glslang::TIntermTyped* node) +{ + // don't know what this is + if (node == nullptr) + return false; + + // a constant is safe + if (node->getAsConstantUnion() != nullptr) + return true; + + // not a symbol means non-trivial + if (node->getAsSymbolNode() == nullptr) + return false; + + // a symbol, depends on what's being read + switch (node->getType().getQualifier().storage) { + case glslang::EvqTemporary: + case glslang::EvqGlobal: + case glslang::EvqIn: + case glslang::EvqInOut: + case glslang::EvqConst: + case glslang::EvqConstReadOnly: + case glslang::EvqUniform: + return true; + default: + return false; + } +} + +// A node is trivial if it is a single operation with no side effects. +// HLSL (and/or vectors) are always trivial, as it does not short circuit. +// Otherwise, error on the side of saying non-trivial. +// Return true if trivial. +bool TGlslangToSpvTraverser::isTrivial(const glslang::TIntermTyped* node) +{ + if (node == nullptr) + return false; + + // count non scalars as trivial, as well as anything coming from HLSL + if (! node->getType().isScalarOrVec1() || glslangIntermediate->getSource() == glslang::EShSourceHlsl) + return true; + + // symbols and constants are trivial + if (isTrivialLeaf(node)) + return true; + + // otherwise, it needs to be a simple operation or one or two leaf nodes + + // not a simple operation + const glslang::TIntermBinary* binaryNode = node->getAsBinaryNode(); + const glslang::TIntermUnary* unaryNode = node->getAsUnaryNode(); + if (binaryNode == nullptr && unaryNode == nullptr) + return false; + + // not on leaf nodes + if (binaryNode && (! isTrivialLeaf(binaryNode->getLeft()) || ! isTrivialLeaf(binaryNode->getRight()))) + return false; + + if (unaryNode && ! isTrivialLeaf(unaryNode->getOperand())) { + return false; + } + + switch (node->getAsOperator()->getOp()) { + case glslang::EOpLogicalNot: + case glslang::EOpConvIntToBool: + case glslang::EOpConvUintToBool: + case glslang::EOpConvFloatToBool: + case glslang::EOpConvDoubleToBool: + case glslang::EOpEqual: + case glslang::EOpNotEqual: + case glslang::EOpLessThan: + case glslang::EOpGreaterThan: + case glslang::EOpLessThanEqual: + case glslang::EOpGreaterThanEqual: + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + case glslang::EOpLogicalXor: + case glslang::EOpAny: + case glslang::EOpAll: + return true; + default: + return false; + } +} + +// Emit short-circuiting code, where 'right' is never evaluated unless +// the left side is true (for &&) or false (for ||). +spv::Id TGlslangToSpvTraverser::createShortCircuit(glslang::TOperator op, glslang::TIntermTyped& left, + glslang::TIntermTyped& right) +{ + spv::Id boolTypeId = builder.makeBoolType(); + + // emit left operand + builder.clearAccessChain(); + left.traverse(this); + spv::Id leftId = accessChainLoad(left.getType()); + + // Operands to accumulate OpPhi operands + std::vector phiOperands; + // accumulate left operand's phi information + phiOperands.push_back(leftId); + phiOperands.push_back(builder.getBuildPoint()->getId()); + + // Make the two kinds of operation symmetric with a "!" + // || => emit "if (! left) result = right" + // && => emit "if ( left) result = right" + // + // TODO: this runtime "not" for || could be avoided by adding functionality + // to 'builder' to have an "else" without an "then" + if (op == glslang::EOpLogicalOr) + leftId = builder.createUnaryOp(spv::OpLogicalNot, boolTypeId, leftId); + + // make an "if" based on the left value + spv::Builder::If ifBuilder(leftId, spv::SelectionControlMaskNone, builder); + + // emit right operand as the "then" part of the "if" + builder.clearAccessChain(); + right.traverse(this); + spv::Id rightId = accessChainLoad(right.getType()); + + // accumulate left operand's phi information + phiOperands.push_back(rightId); + phiOperands.push_back(builder.getBuildPoint()->getId()); + + // finish the "if" + ifBuilder.makeEndIf(); + + // phi together the two results + return builder.createOp(spv::OpPhi, boolTypeId, phiOperands); +} + +#ifndef GLSLANG_WEB +// Return type Id of the imported set of extended instructions corresponds to the name. +// Import this set if it has not been imported yet. +spv::Id TGlslangToSpvTraverser::getExtBuiltins(const char* name) +{ + if (extBuiltinMap.find(name) != extBuiltinMap.end()) + return extBuiltinMap[name]; + else { + builder.addExtension(name); + spv::Id extBuiltins = builder.import(name); + extBuiltinMap[name] = extBuiltins; + return extBuiltins; + } +} +#endif + +}; // end anonymous namespace + +namespace glslang { + +void GetSpirvVersion(std::string& version) +{ + const int bufSize = 100; + char buf[bufSize]; + snprintf(buf, bufSize, "0x%08x, Revision %d", spv::Version, spv::Revision); + version = buf; +} + +// For low-order part of the generator's magic number. Bump up +// when there is a change in the style (e.g., if SSA form changes, +// or a different instruction sequence to do something gets used). +int GetSpirvGeneratorVersion() +{ + // return 1; // start + // return 2; // EOpAtomicCounterDecrement gets a post decrement, to map between GLSL -> SPIR-V + // return 3; // change/correct barrier-instruction operands, to match memory model group decisions + // return 4; // some deeper access chains: for dynamic vector component, and local Boolean component + // return 5; // make OpArrayLength result type be an int with signedness of 0 + // return 6; // revert version 5 change, which makes a different (new) kind of incorrect code, + // versions 4 and 6 each generate OpArrayLength as it has long been done + // return 7; // GLSL volatile keyword maps to both SPIR-V decorations Volatile and Coherent + // return 8; // switch to new dead block eliminator; use OpUnreachable + // return 9; // don't include opaque function parameters in OpEntryPoint global's operand list + return 10; // Generate OpFUnordNotEqual for != comparisons +} + +// Write SPIR-V out to a binary file +void OutputSpvBin(const std::vector& spirv, const char* baseName) +{ + std::ofstream out; + out.open(baseName, std::ios::binary | std::ios::out); + if (out.fail()) + printf("ERROR: Failed to open file: %s\n", baseName); + for (int i = 0; i < (int)spirv.size(); ++i) { + unsigned int word = spirv[i]; + out.write((const char*)&word, 4); + } + out.close(); +} + +// Write SPIR-V out to a text file with 32-bit hexadecimal words +void OutputSpvHex(const std::vector& spirv, const char* baseName, const char* varName) +{ +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + std::ofstream out; + out.open(baseName, std::ios::binary | std::ios::out); + if (out.fail()) + printf("ERROR: Failed to open file: %s\n", baseName); + out << "\t// " << + GetSpirvGeneratorVersion() << + GLSLANG_VERSION_MAJOR << "." << GLSLANG_VERSION_MINOR << "." << GLSLANG_VERSION_PATCH << + GLSLANG_VERSION_FLAVOR << std::endl; + if (varName != nullptr) { + out << "\t #pragma once" << std::endl; + out << "const uint32_t " << varName << "[] = {" << std::endl; + } + const int WORDS_PER_LINE = 8; + for (int i = 0; i < (int)spirv.size(); i += WORDS_PER_LINE) { + out << "\t"; + for (int j = 0; j < WORDS_PER_LINE && i + j < (int)spirv.size(); ++j) { + const unsigned int word = spirv[i + j]; + out << "0x" << std::hex << std::setw(8) << std::setfill('0') << word; + if (i + j + 1 < (int)spirv.size()) { + out << ","; + } + } + out << std::endl; + } + if (varName != nullptr) { + out << "};"; + out << std::endl; + } + out.close(); +#endif +} + +// +// Set up the glslang traversal +// +void GlslangToSpv(const TIntermediate& intermediate, std::vector& spirv, SpvOptions* options) +{ + spv::SpvBuildLogger logger; + GlslangToSpv(intermediate, spirv, &logger, options); +} + +void GlslangToSpv(const TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, SpvOptions* options) +{ + TIntermNode* root = intermediate.getTreeRoot(); + + if (root == 0) + return; + + SpvOptions defaultOptions; + if (options == nullptr) + options = &defaultOptions; + + GetThreadPoolAllocator().push(); + + TGlslangToSpvTraverser it(intermediate.getSpv().spv, &intermediate, logger, *options); + root->traverse(&it); + it.finishSpv(); + it.dumpSpv(spirv); + +#if ENABLE_OPT + // If from HLSL, run spirv-opt to "legalize" the SPIR-V for Vulkan + // eg. forward and remove memory writes of opaque types. + bool prelegalization = intermediate.getSource() == EShSourceHlsl; + if ((prelegalization || options->optimizeSize) && !options->disableOptimizer) { + SpirvToolsTransform(intermediate, spirv, logger, options); + prelegalization = false; + } + else if (options->stripDebugInfo) { + // Strip debug info even if optimization is disabled. + SpirvToolsStripDebugInfo(intermediate, spirv, logger); + } + + if (options->validate) + SpirvToolsValidate(intermediate, spirv, logger, prelegalization); + + if (options->disassemble) + SpirvToolsDisassemble(std::cout, spirv); + +#endif + + GetThreadPoolAllocator().pop(); +} + +}; // end namespace glslang diff --git a/src/third_party/glslang/SPIRV/GlslangToSpv.h b/src/third_party/glslang/SPIRV/GlslangToSpv.h new file mode 100644 index 0000000..86e1c23 --- /dev/null +++ b/src/third_party/glslang/SPIRV/GlslangToSpv.h @@ -0,0 +1,61 @@ +// +// Copyright (C) 2014 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#if defined(_MSC_VER) && _MSC_VER >= 1900 + #pragma warning(disable : 4464) // relative include path contains '..' +#endif + +#include "SpvTools.h" +#include "../glslang/Include/intermediate.h" + +#include +#include + +#include "Logger.h" + +namespace glslang { + +void GetSpirvVersion(std::string&); +int GetSpirvGeneratorVersion(); +void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector& spirv, + SpvOptions* options = nullptr); +void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, SpvOptions* options = nullptr); +void OutputSpvBin(const std::vector& spirv, const char* baseName); +void OutputSpvHex(const std::vector& spirv, const char* baseName, const char* varName); + +} diff --git a/src/third_party/glslang/SPIRV/InReadableOrder.cpp b/src/third_party/glslang/SPIRV/InReadableOrder.cpp new file mode 100644 index 0000000..9d9410b --- /dev/null +++ b/src/third_party/glslang/SPIRV/InReadableOrder.cpp @@ -0,0 +1,131 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// The SPIR-V spec requires code blocks to appear in an order satisfying the +// dominator-tree direction (ie, dominator before the dominated). This is, +// actually, easy to achieve: any pre-order CFG traversal algorithm will do it. +// Because such algorithms visit a block only after traversing some path to it +// from the root, they necessarily visit the block's idom first. +// +// But not every graph-traversal algorithm outputs blocks in an order that +// appears logical to human readers. The problem is that unrelated branches may +// be interspersed with each other, and merge blocks may come before some of the +// branches being merged. +// +// A good, human-readable order of blocks may be achieved by performing +// depth-first search but delaying merge nodes until after all their branches +// have been visited. This is implemented below by the inReadableOrder() +// function. + +#include "spvIR.h" + +#include +#include + +using spv::Block; +using spv::Id; + +namespace { +// Traverses CFG in a readable order, invoking a pre-set callback on each block. +// Use by calling visit() on the root block. +class ReadableOrderTraverser { +public: + ReadableOrderTraverser(std::function callback) + : callback_(callback) {} + // Visits the block if it hasn't been visited already and isn't currently + // being delayed. Invokes callback(block, why, header), then descends into its + // successors. Delays merge-block and continue-block processing until all + // the branches have been completed. If |block| is an unreachable merge block or + // an unreachable continue target, then |header| is the corresponding header block. + void visit(Block* block, spv::ReachReason why, Block* header) + { + assert(block); + if (why == spv::ReachViaControlFlow) { + reachableViaControlFlow_.insert(block); + } + if (visited_.count(block) || delayed_.count(block)) + return; + callback_(block, why, header); + visited_.insert(block); + Block* mergeBlock = nullptr; + Block* continueBlock = nullptr; + auto mergeInst = block->getMergeInstruction(); + if (mergeInst) { + Id mergeId = mergeInst->getIdOperand(0); + mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock(); + delayed_.insert(mergeBlock); + if (mergeInst->getOpCode() == spv::OpLoopMerge) { + Id continueId = mergeInst->getIdOperand(1); + continueBlock = + block->getParent().getParent().getInstruction(continueId)->getBlock(); + delayed_.insert(continueBlock); + } + } + if (why == spv::ReachViaControlFlow) { + const auto& successors = block->getSuccessors(); + for (auto it = successors.cbegin(); it != successors.cend(); ++it) + visit(*it, why, nullptr); + } + if (continueBlock) { + const spv::ReachReason continueWhy = + (reachableViaControlFlow_.count(continueBlock) > 0) + ? spv::ReachViaControlFlow + : spv::ReachDeadContinue; + delayed_.erase(continueBlock); + visit(continueBlock, continueWhy, block); + } + if (mergeBlock) { + const spv::ReachReason mergeWhy = + (reachableViaControlFlow_.count(mergeBlock) > 0) + ? spv::ReachViaControlFlow + : spv::ReachDeadMerge; + delayed_.erase(mergeBlock); + visit(mergeBlock, mergeWhy, block); + } + } + +private: + std::function callback_; + // Whether a block has already been visited or is being delayed. + std::unordered_set visited_, delayed_; + + // The set of blocks that actually are reached via control flow. + std::unordered_set reachableViaControlFlow_; +}; +} + +void spv::inReadableOrder(Block* root, std::function callback) +{ + ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr); +} diff --git a/src/third_party/glslang/SPIRV/Logger.cpp b/src/third_party/glslang/SPIRV/Logger.cpp new file mode 100644 index 0000000..cdc8469 --- /dev/null +++ b/src/third_party/glslang/SPIRV/Logger.cpp @@ -0,0 +1,72 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef GLSLANG_WEB + +#include "Logger.h" + +#include +#include +#include + +namespace spv { + +void SpvBuildLogger::tbdFunctionality(const std::string& f) +{ + if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures)) + tbdFeatures.push_back(f); +} + +void SpvBuildLogger::missingFunctionality(const std::string& f) +{ + if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures)) + missingFeatures.push_back(f); +} + +std::string SpvBuildLogger::getAllMessages() const { + std::ostringstream messages; + for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it) + messages << "TBD functionality: " << *it << "\n"; + for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it) + messages << "Missing functionality: " << *it << "\n"; + for (auto it = warnings.cbegin(); it != warnings.cend(); ++it) + messages << "warning: " << *it << "\n"; + for (auto it = errors.cbegin(); it != errors.cend(); ++it) + messages << "error: " << *it << "\n"; + return messages.str(); +} + +} // end spv namespace + +#endif diff --git a/src/third_party/glslang/SPIRV/Logger.h b/src/third_party/glslang/SPIRV/Logger.h new file mode 100644 index 0000000..411367c --- /dev/null +++ b/src/third_party/glslang/SPIRV/Logger.h @@ -0,0 +1,83 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef GLSLANG_SPIRV_LOGGER_H +#define GLSLANG_SPIRV_LOGGER_H + +#include +#include + +namespace spv { + +// A class for holding all SPIR-V build status messages, including +// missing/TBD functionalities, warnings, and errors. +class SpvBuildLogger { +public: + SpvBuildLogger() {} + +#ifdef GLSLANG_WEB + void tbdFunctionality(const std::string& f) { } + void missingFunctionality(const std::string& f) { } + void warning(const std::string& w) { } + void error(const std::string& e) { errors.push_back(e); } + std::string getAllMessages() { return ""; } +#else + + // Registers a TBD functionality. + void tbdFunctionality(const std::string& f); + // Registers a missing functionality. + void missingFunctionality(const std::string& f); + + // Logs a warning. + void warning(const std::string& w) { warnings.push_back(w); } + // Logs an error. + void error(const std::string& e) { errors.push_back(e); } + + // Returns all messages accumulated in the order of: + // TBD functionalities, missing functionalities, warnings, errors. + std::string getAllMessages() const; +#endif + +private: + SpvBuildLogger(const SpvBuildLogger&); + + std::vector tbdFeatures; + std::vector missingFeatures; + std::vector warnings; + std::vector errors; +}; + +} // end spv namespace + +#endif // GLSLANG_SPIRV_LOGGER_H diff --git a/src/third_party/glslang/SPIRV/NonSemanticDebugPrintf.h b/src/third_party/glslang/SPIRV/NonSemanticDebugPrintf.h new file mode 100644 index 0000000..83796d7 --- /dev/null +++ b/src/third_party/glslang/SPIRV/NonSemanticDebugPrintf.h @@ -0,0 +1,50 @@ +// Copyright (c) 2020 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and/or associated documentation files (the +// "Materials"), to deal in the Materials without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Materials, and to +// permit persons to whom the Materials are furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS +// KHRONOS STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS +// SPECIFICATIONS AND HEADER INFORMATION ARE LOCATED AT +// https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +// + +#ifndef SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ +#define SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +enum { + NonSemanticDebugPrintfRevision = 1, + NonSemanticDebugPrintfRevision_BitWidthPadding = 0x7fffffff +}; + +enum NonSemanticDebugPrintfInstructions { + NonSemanticDebugPrintfDebugPrintf = 1, + NonSemanticDebugPrintfInstructionsMax = 0x7fffffff +}; + + +#ifdef __cplusplus +} +#endif + +#endif // SPIRV_UNIFIED1_NonSemanticDebugPrintf_H_ diff --git a/src/third_party/glslang/SPIRV/SPVRemapper.cpp b/src/third_party/glslang/SPIRV/SPVRemapper.cpp new file mode 100644 index 0000000..1adc6a3 --- /dev/null +++ b/src/third_party/glslang/SPIRV/SPVRemapper.cpp @@ -0,0 +1,1490 @@ +// +// Copyright (C) 2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "SPVRemapper.h" +#include "doc.h" + +#if !defined (use_cpp11) +// ... not supported before C++11 +#else // defined (use_cpp11) + +#include +#include +#include "../glslang/Include/Common.h" + +namespace spv { + + // By default, just abort on error. Can be overridden via RegisterErrorHandler + spirvbin_t::errorfn_t spirvbin_t::errorHandler = [](const std::string&) { exit(5); }; + // By default, eat log messages. Can be overridden via RegisterLogHandler + spirvbin_t::logfn_t spirvbin_t::logHandler = [](const std::string&) { }; + + // This can be overridden to provide other message behavior if needed + void spirvbin_t::msg(int minVerbosity, int indent, const std::string& txt) const + { + if (verbose >= minVerbosity) + logHandler(std::string(indent, ' ') + txt); + } + + // hash opcode, with special handling for OpExtInst + std::uint32_t spirvbin_t::asOpCodeHash(unsigned word) + { + const spv::Op opCode = asOpCode(word); + + std::uint32_t offset = 0; + + switch (opCode) { + case spv::OpExtInst: + offset += asId(word + 4); break; + default: + break; + } + + return opCode * 19 + offset; // 19 = small prime + } + + spirvbin_t::range_t spirvbin_t::literalRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + switch (opCode) { + case spv::OpTypeFloat: // fall through... + case spv::OpTypePointer: return range_t(2, 3); + case spv::OpTypeInt: return range_t(2, 4); + // TODO: case spv::OpTypeImage: + // TODO: case spv::OpTypeSampledImage: + case spv::OpTypeSampler: return range_t(3, 8); + case spv::OpTypeVector: // fall through + case spv::OpTypeMatrix: // ... + case spv::OpTypePipe: return range_t(3, 4); + case spv::OpConstant: return range_t(3, maxCount); + default: return range_t(0, 0); + } + } + + spirvbin_t::range_t spirvbin_t::typeRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + if (isConstOp(opCode)) + return range_t(1, 2); + + switch (opCode) { + case spv::OpTypeVector: // fall through + case spv::OpTypeMatrix: // ... + case spv::OpTypeSampler: // ... + case spv::OpTypeArray: // ... + case spv::OpTypeRuntimeArray: // ... + case spv::OpTypePipe: return range_t(2, 3); + case spv::OpTypeStruct: // fall through + case spv::OpTypeFunction: return range_t(2, maxCount); + case spv::OpTypePointer: return range_t(3, 4); + default: return range_t(0, 0); + } + } + + spirvbin_t::range_t spirvbin_t::constRange(spv::Op opCode) const + { + static const int maxCount = 1<<30; + + switch (opCode) { + case spv::OpTypeArray: // fall through... + case spv::OpTypeRuntimeArray: return range_t(3, 4); + case spv::OpConstantComposite: return range_t(3, maxCount); + default: return range_t(0, 0); + } + } + + // Return the size of a type in 32-bit words. This currently only + // handles ints and floats, and is only invoked by queries which must be + // integer types. If ever needed, it can be generalized. + unsigned spirvbin_t::typeSizeInWords(spv::Id id) const + { + const unsigned typeStart = idPos(id); + const spv::Op opCode = asOpCode(typeStart); + + if (errorLatch) + return 0; + + switch (opCode) { + case spv::OpTypeInt: // fall through... + case spv::OpTypeFloat: return (spv[typeStart+2]+31)/32; + default: + return 0; + } + } + + // Looks up the type of a given const or variable ID, and + // returns its size in 32-bit words. + unsigned spirvbin_t::idTypeSizeInWords(spv::Id id) const + { + const auto tid_it = idTypeSizeMap.find(id); + if (tid_it == idTypeSizeMap.end()) { + error("type size for ID not found"); + return 0; + } + + return tid_it->second; + } + + // Is this an opcode we should remove when using --strip? + bool spirvbin_t::isStripOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpSource: + case spv::OpSourceExtension: + case spv::OpName: + case spv::OpMemberName: + case spv::OpLine: return true; + default: return false; + } + } + + // Return true if this opcode is flow control + bool spirvbin_t::isFlowCtrl(spv::Op opCode) const + { + switch (opCode) { + case spv::OpBranchConditional: + case spv::OpBranch: + case spv::OpSwitch: + case spv::OpLoopMerge: + case spv::OpSelectionMerge: + case spv::OpLabel: + case spv::OpFunction: + case spv::OpFunctionEnd: return true; + default: return false; + } + } + + // Return true if this opcode defines a type + bool spirvbin_t::isTypeOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpTypeVoid: + case spv::OpTypeBool: + case spv::OpTypeInt: + case spv::OpTypeFloat: + case spv::OpTypeVector: + case spv::OpTypeMatrix: + case spv::OpTypeImage: + case spv::OpTypeSampler: + case spv::OpTypeArray: + case spv::OpTypeRuntimeArray: + case spv::OpTypeStruct: + case spv::OpTypeOpaque: + case spv::OpTypePointer: + case spv::OpTypeFunction: + case spv::OpTypeEvent: + case spv::OpTypeDeviceEvent: + case spv::OpTypeReserveId: + case spv::OpTypeQueue: + case spv::OpTypeSampledImage: + case spv::OpTypePipe: return true; + default: return false; + } + } + + // Return true if this opcode defines a constant + bool spirvbin_t::isConstOp(spv::Op opCode) const + { + switch (opCode) { + case spv::OpConstantSampler: + error("unimplemented constant type"); + return true; + + case spv::OpConstantNull: + case spv::OpConstantTrue: + case spv::OpConstantFalse: + case spv::OpConstantComposite: + case spv::OpConstant: + return true; + + default: + return false; + } + } + + const auto inst_fn_nop = [](spv::Op, unsigned) { return false; }; + const auto op_fn_nop = [](spv::Id&) { }; + + // g++ doesn't like these defined in the class proper in an anonymous namespace. + // Dunno why. Also MSVC doesn't like the constexpr keyword. Also dunno why. + // Defining them externally seems to please both compilers, so, here they are. + const spv::Id spirvbin_t::unmapped = spv::Id(-10000); + const spv::Id spirvbin_t::unused = spv::Id(-10001); + const int spirvbin_t::header_size = 5; + + spv::Id spirvbin_t::nextUnusedId(spv::Id id) + { + while (isNewIdMapped(id)) // search for an unused ID + ++id; + + return id; + } + + spv::Id spirvbin_t::localId(spv::Id id, spv::Id newId) + { + //assert(id != spv::NoResult && newId != spv::NoResult); + + if (id > bound()) { + error(std::string("ID out of range: ") + std::to_string(id)); + return spirvbin_t::unused; + } + + if (id >= idMapL.size()) + idMapL.resize(id+1, unused); + + if (newId != unmapped && newId != unused) { + if (isOldIdUnused(id)) { + error(std::string("ID unused in module: ") + std::to_string(id)); + return spirvbin_t::unused; + } + + if (!isOldIdUnmapped(id)) { + error(std::string("ID already mapped: ") + std::to_string(id) + " -> " + + std::to_string(localId(id))); + + return spirvbin_t::unused; + } + + if (isNewIdMapped(newId)) { + error(std::string("ID already used in module: ") + std::to_string(newId)); + return spirvbin_t::unused; + } + + msg(4, 4, std::string("map: ") + std::to_string(id) + " -> " + std::to_string(newId)); + setMapped(newId); + largestNewId = std::max(largestNewId, newId); + } + + return idMapL[id] = newId; + } + + // Parse a literal string from the SPIR binary and return it as an std::string + // Due to C++11 RValue references, this doesn't copy the result string. + std::string spirvbin_t::literalString(unsigned word) const + { + std::string literal; + + literal.reserve(16); + + const char* bytes = reinterpret_cast(spv.data() + word); + + while (bytes && *bytes) + literal += *bytes++; + + return literal; + } + + void spirvbin_t::applyMap() + { + msg(3, 2, std::string("Applying map: ")); + + // Map local IDs through the ID map + process(inst_fn_nop, // ignore instructions + [this](spv::Id& id) { + id = localId(id); + + if (errorLatch) + return; + + assert(id != unused && id != unmapped); + } + ); + } + + // Find free IDs for anything we haven't mapped + void spirvbin_t::mapRemainder() + { + msg(3, 2, std::string("Remapping remainder: ")); + + spv::Id unusedId = 1; // can't use 0: that's NoResult + spirword_t maxBound = 0; + + for (spv::Id id = 0; id < idMapL.size(); ++id) { + if (isOldIdUnused(id)) + continue; + + // Find a new mapping for any used but unmapped IDs + if (isOldIdUnmapped(id)) { + localId(id, unusedId = nextUnusedId(unusedId)); + if (errorLatch) + return; + } + + if (isOldIdUnmapped(id)) { + error(std::string("old ID not mapped: ") + std::to_string(id)); + return; + } + + // Track max bound + maxBound = std::max(maxBound, localId(id) + 1); + + if (errorLatch) + return; + } + + bound(maxBound); // reset header ID bound to as big as it now needs to be + } + + // Mark debug instructions for stripping + void spirvbin_t::stripDebug() + { + // Strip instructions in the stripOp set: debug info. + process( + [&](spv::Op opCode, unsigned start) { + // remember opcodes we want to strip later + if (isStripOp(opCode)) + stripInst(start); + return true; + }, + op_fn_nop); + } + + // Mark instructions that refer to now-removed IDs for stripping + void spirvbin_t::stripDeadRefs() + { + process( + [&](spv::Op opCode, unsigned start) { + // strip opcodes pointing to removed data + switch (opCode) { + case spv::OpName: + case spv::OpMemberName: + case spv::OpDecorate: + case spv::OpMemberDecorate: + if (idPosR.find(asId(start+1)) == idPosR.end()) + stripInst(start); + break; + default: + break; // leave it alone + } + + return true; + }, + op_fn_nop); + + strip(); + } + + // Update local maps of ID, type, etc positions + void spirvbin_t::buildLocalMaps() + { + msg(2, 2, std::string("build local maps: ")); + + mapped.clear(); + idMapL.clear(); +// preserve nameMap, so we don't clear that. + fnPos.clear(); + fnCalls.clear(); + typeConstPos.clear(); + idPosR.clear(); + entryPoint = spv::NoResult; + largestNewId = 0; + + idMapL.resize(bound(), unused); + + int fnStart = 0; + spv::Id fnRes = spv::NoResult; + + // build local Id and name maps + process( + [&](spv::Op opCode, unsigned start) { + unsigned word = start+1; + spv::Id typeId = spv::NoResult; + + if (spv::InstructionDesc[opCode].hasType()) + typeId = asId(word++); + + // If there's a result ID, remember the size of its type + if (spv::InstructionDesc[opCode].hasResult()) { + const spv::Id resultId = asId(word++); + idPosR[resultId] = start; + + if (typeId != spv::NoResult) { + const unsigned idTypeSize = typeSizeInWords(typeId); + + if (errorLatch) + return false; + + if (idTypeSize != 0) + idTypeSizeMap[resultId] = idTypeSize; + } + } + + if (opCode == spv::Op::OpName) { + const spv::Id target = asId(start+1); + const std::string name = literalString(start+2); + nameMap[name] = target; + + } else if (opCode == spv::Op::OpFunctionCall) { + ++fnCalls[asId(start + 3)]; + } else if (opCode == spv::Op::OpEntryPoint) { + entryPoint = asId(start + 2); + } else if (opCode == spv::Op::OpFunction) { + if (fnStart != 0) { + error("nested function found"); + return false; + } + + fnStart = start; + fnRes = asId(start + 2); + } else if (opCode == spv::Op::OpFunctionEnd) { + assert(fnRes != spv::NoResult); + if (fnStart == 0) { + error("function end without function start"); + return false; + } + + fnPos[fnRes] = range_t(fnStart, start + asWordCount(start)); + fnStart = 0; + } else if (isConstOp(opCode)) { + if (errorLatch) + return false; + + assert(asId(start + 2) != spv::NoResult); + typeConstPos.insert(start); + } else if (isTypeOp(opCode)) { + assert(asId(start + 1) != spv::NoResult); + typeConstPos.insert(start); + } + + return false; + }, + + [this](spv::Id& id) { localId(id, unmapped); } + ); + } + + // Validate the SPIR header + void spirvbin_t::validate() const + { + msg(2, 2, std::string("validating: ")); + + if (spv.size() < header_size) { + error("file too short: "); + return; + } + + if (magic() != spv::MagicNumber) { + error("bad magic number"); + return; + } + + // field 1 = version + // field 2 = generator magic + // field 3 = result bound + + if (schemaNum() != 0) { + error("bad schema, must be 0"); + return; + } + } + + int spirvbin_t::processInstruction(unsigned word, instfn_t instFn, idfn_t idFn) + { + const auto instructionStart = word; + const unsigned wordCount = asWordCount(instructionStart); + const int nextInst = word++ + wordCount; + spv::Op opCode = asOpCode(instructionStart); + + if (nextInst > int(spv.size())) { + error("spir instruction terminated too early"); + return -1; + } + + // Base for computing number of operands; will be updated as more is learned + unsigned numOperands = wordCount - 1; + + if (instFn(opCode, instructionStart)) + return nextInst; + + // Read type and result ID from instruction desc table + if (spv::InstructionDesc[opCode].hasType()) { + idFn(asId(word++)); + --numOperands; + } + + if (spv::InstructionDesc[opCode].hasResult()) { + idFn(asId(word++)); + --numOperands; + } + + // Extended instructions: currently, assume everything is an ID. + // TODO: add whatever data we need for exceptions to that + if (opCode == spv::OpExtInst) { + word += 2; // instruction set, and instruction from set + numOperands -= 2; + + for (unsigned op=0; op < numOperands; ++op) + idFn(asId(word++)); // ID + + return nextInst; + } + + // Circular buffer so we can look back at previous unmapped values during the mapping pass. + static const unsigned idBufferSize = 4; + spv::Id idBuffer[idBufferSize]; + unsigned idBufferPos = 0; + + // Store IDs from instruction in our map + for (int op = 0; numOperands > 0; ++op, --numOperands) { + // SpecConstantOp is special: it includes the operands of another opcode which is + // given as a literal in the 3rd word. We will switch over to pretending that the + // opcode being processed is the literal opcode value of the SpecConstantOp. See the + // SPIRV spec for details. This way we will handle IDs and literals as appropriate for + // the embedded op. + if (opCode == spv::OpSpecConstantOp) { + if (op == 0) { + opCode = asOpCode(word++); // this is the opcode embedded in the SpecConstantOp. + --numOperands; + } + } + + switch (spv::InstructionDesc[opCode].operands.getClass(op)) { + case spv::OperandId: + case spv::OperandScope: + case spv::OperandMemorySemantics: + idBuffer[idBufferPos] = asId(word); + idBufferPos = (idBufferPos + 1) % idBufferSize; + idFn(asId(word++)); + break; + + case spv::OperandVariableIds: + for (unsigned i = 0; i < numOperands; ++i) + idFn(asId(word++)); + return nextInst; + + case spv::OperandVariableLiterals: + // for clarity + // if (opCode == spv::OpDecorate && asDecoration(word - 1) == spv::DecorationBuiltIn) { + // ++word; + // --numOperands; + // } + // word += numOperands; + return nextInst; + + case spv::OperandVariableLiteralId: { + if (opCode == OpSwitch) { + // word-2 is the position of the selector ID. OpSwitch Literals match its type. + // In case the IDs are currently being remapped, we get the word[-2] ID from + // the circular idBuffer. + const unsigned literalSizePos = (idBufferPos+idBufferSize-2) % idBufferSize; + const unsigned literalSize = idTypeSizeInWords(idBuffer[literalSizePos]); + const unsigned numLiteralIdPairs = (nextInst-word) / (1+literalSize); + + if (errorLatch) + return -1; + + for (unsigned arg=0; arg instPos; + instPos.reserve(unsigned(spv.size()) / 16); // initial estimate; can grow if needed. + + // Build local table of instruction start positions + process( + [&](spv::Op, unsigned start) { instPos.push_back(start); return true; }, + op_fn_nop); + + if (errorLatch) + return; + + // Window size for context-sensitive canonicalization values + // Empirical best size from a single data set. TODO: Would be a good tunable. + // We essentially perform a little convolution around each instruction, + // to capture the flavor of nearby code, to hopefully match to similar + // code in other modules. + static const unsigned windowSize = 2; + + for (unsigned entry = 0; entry < unsigned(instPos.size()); ++entry) { + const unsigned start = instPos[entry]; + const spv::Op opCode = asOpCode(start); + + if (opCode == spv::OpFunction) + fnId = asId(start + 2); + + if (opCode == spv::OpFunctionEnd) + fnId = spv::NoResult; + + if (fnId != spv::NoResult) { // if inside a function + if (spv::InstructionDesc[opCode].hasResult()) { + const unsigned word = start + (spv::InstructionDesc[opCode].hasType() ? 2 : 1); + const spv::Id resId = asId(word); + std::uint32_t hashval = fnId * 17; // small prime + + for (unsigned i = entry-1; i >= entry-windowSize; --i) { + if (asOpCode(instPos[i]) == spv::OpFunction) + break; + hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime + } + + for (unsigned i = entry; i <= entry + windowSize; ++i) { + if (asOpCode(instPos[i]) == spv::OpFunctionEnd) + break; + hashval = hashval * 30103 + asOpCodeHash(instPos[i]); // 30103 = semiarbitrary prime + } + + if (isOldIdUnmapped(resId)) { + localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + if (errorLatch) + return; + } + + } + } + } + + spv::Op thisOpCode(spv::OpNop); + std::unordered_map opCounter; + int idCounter(0); + fnId = spv::NoResult; + + process( + [&](spv::Op opCode, unsigned start) { + switch (opCode) { + case spv::OpFunction: + // Reset counters at each function + idCounter = 0; + opCounter.clear(); + fnId = asId(start + 2); + break; + + case spv::OpImageSampleImplicitLod: + case spv::OpImageSampleExplicitLod: + case spv::OpImageSampleDrefImplicitLod: + case spv::OpImageSampleDrefExplicitLod: + case spv::OpImageSampleProjImplicitLod: + case spv::OpImageSampleProjExplicitLod: + case spv::OpImageSampleProjDrefImplicitLod: + case spv::OpImageSampleProjDrefExplicitLod: + case spv::OpDot: + case spv::OpCompositeExtract: + case spv::OpCompositeInsert: + case spv::OpVectorShuffle: + case spv::OpLabel: + case spv::OpVariable: + + case spv::OpAccessChain: + case spv::OpLoad: + case spv::OpStore: + case spv::OpCompositeConstruct: + case spv::OpFunctionCall: + ++opCounter[opCode]; + idCounter = 0; + thisOpCode = opCode; + break; + default: + thisOpCode = spv::OpNop; + } + + return false; + }, + + [&](spv::Id& id) { + if (thisOpCode != spv::OpNop) { + ++idCounter; + const std::uint32_t hashval = opCounter[thisOpCode] * thisOpCode * 50047 + idCounter + fnId * 117; + + if (isOldIdUnmapped(id)) + localId(id, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + } + }); + } + + // EXPERIMENTAL: forward IO and uniform load/stores into operands + // This produces invalid Schema-0 SPIRV + void spirvbin_t::forwardLoadStores() + { + idset_t fnLocalVars; // set of function local vars + idmap_t idMap; // Map of load result IDs to what they load + + // EXPERIMENTAL: Forward input and access chain loads into consumptions + process( + [&](spv::Op opCode, unsigned start) { + // Add inputs and uniforms to the map + if ((opCode == spv::OpVariable && asWordCount(start) == 4) && + (spv[start+3] == spv::StorageClassUniform || + spv[start+3] == spv::StorageClassUniformConstant || + spv[start+3] == spv::StorageClassInput)) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpAccessChain && fnLocalVars.count(asId(start+3)) > 0) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) { + idMap[asId(start+2)] = asId(start+3); + stripInst(start); + } + + return false; + }, + + [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; } + ); + + if (errorLatch) + return; + + // EXPERIMENTAL: Implicit output stores + fnLocalVars.clear(); + idMap.clear(); + + process( + [&](spv::Op opCode, unsigned start) { + // Add inputs and uniforms to the map + if ((opCode == spv::OpVariable && asWordCount(start) == 4) && + (spv[start+3] == spv::StorageClassOutput)) + fnLocalVars.insert(asId(start+2)); + + if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) { + idMap[asId(start+2)] = asId(start+1); + stripInst(start); + } + + return false; + }, + op_fn_nop); + + if (errorLatch) + return; + + process( + inst_fn_nop, + [&](spv::Id& id) { if (idMap.find(id) != idMap.end()) id = idMap[id]; } + ); + + if (errorLatch) + return; + + strip(); // strip out data we decided to eliminate + } + + // optimize loads and stores + void spirvbin_t::optLoadStore() + { + idset_t fnLocalVars; // candidates for removal (only locals) + idmap_t idMap; // Map of load result IDs to what they load + blockmap_t blockMap; // Map of IDs to blocks they first appear in + int blockNum = 0; // block count, to avoid crossing flow control + + // Find all the function local pointers stored at most once, and not via access chains + process( + [&](spv::Op opCode, unsigned start) { + const int wordCount = asWordCount(start); + + // Count blocks, so we can avoid crossing flow control + if (isFlowCtrl(opCode)) + ++blockNum; + + // Add local variables to the map + if ((opCode == spv::OpVariable && spv[start+3] == spv::StorageClassFunction && asWordCount(start) == 4)) { + fnLocalVars.insert(asId(start+2)); + return true; + } + + // Ignore process vars referenced via access chain + if ((opCode == spv::OpAccessChain || opCode == spv::OpInBoundsAccessChain) && fnLocalVars.count(asId(start+3)) > 0) { + fnLocalVars.erase(asId(start+3)); + idMap.erase(asId(start+3)); + return true; + } + + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) { + const spv::Id varId = asId(start+3); + + // Avoid loads before stores + if (idMap.find(varId) == idMap.end()) { + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // don't do for volatile references + if (wordCount > 4 && (spv[start+4] & spv::MemoryAccessVolatileMask)) { + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // Handle flow control + if (blockMap.find(varId) == blockMap.end()) { + blockMap[varId] = blockNum; // track block we found it in. + } else if (blockMap[varId] != blockNum) { + fnLocalVars.erase(varId); // Ignore if crosses flow control + idMap.erase(varId); + } + + return true; + } + + if (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) { + const spv::Id varId = asId(start+1); + + if (idMap.find(varId) == idMap.end()) { + idMap[varId] = asId(start+2); + } else { + // Remove if it has more than one store to the same pointer + fnLocalVars.erase(varId); + idMap.erase(varId); + } + + // don't do for volatile references + if (wordCount > 3 && (spv[start+3] & spv::MemoryAccessVolatileMask)) { + fnLocalVars.erase(asId(start+3)); + idMap.erase(asId(start+3)); + } + + // Handle flow control + if (blockMap.find(varId) == blockMap.end()) { + blockMap[varId] = blockNum; // track block we found it in. + } else if (blockMap[varId] != blockNum) { + fnLocalVars.erase(varId); // Ignore if crosses flow control + idMap.erase(varId); + } + + return true; + } + + return false; + }, + + // If local var id used anywhere else, don't eliminate + [&](spv::Id& id) { + if (fnLocalVars.count(id) > 0) { + fnLocalVars.erase(id); + idMap.erase(id); + } + } + ); + + if (errorLatch) + return; + + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) + idMap[asId(start+2)] = idMap[asId(start+3)]; + return false; + }, + op_fn_nop); + + if (errorLatch) + return; + + // Chase replacements to their origins, in case there is a chain such as: + // 2 = store 1 + // 3 = load 2 + // 4 = store 3 + // 5 = load 4 + // We want to replace uses of 5 with 1. + for (const auto& idPair : idMap) { + spv::Id id = idPair.first; + while (idMap.find(id) != idMap.end()) // Chase to end of chain + id = idMap[id]; + + idMap[idPair.first] = id; // replace with final result + } + + // Remove the load/store/variables for the ones we've discovered + process( + [&](spv::Op opCode, unsigned start) { + if ((opCode == spv::OpLoad && fnLocalVars.count(asId(start+3)) > 0) || + (opCode == spv::OpStore && fnLocalVars.count(asId(start+1)) > 0) || + (opCode == spv::OpVariable && fnLocalVars.count(asId(start+2)) > 0)) { + + stripInst(start); + return true; + } + + return false; + }, + + [&](spv::Id& id) { + if (idMap.find(id) != idMap.end()) id = idMap[id]; + } + ); + + if (errorLatch) + return; + + strip(); // strip out data we decided to eliminate + } + + // remove bodies of uncalled functions + void spirvbin_t::dceFuncs() + { + msg(3, 2, std::string("Removing Dead Functions: ")); + + // TODO: There are more efficient ways to do this. + bool changed = true; + + while (changed) { + changed = false; + + for (auto fn = fnPos.begin(); fn != fnPos.end(); ) { + if (fn->first == entryPoint) { // don't DCE away the entry point! + ++fn; + continue; + } + + const auto call_it = fnCalls.find(fn->first); + + if (call_it == fnCalls.end() || call_it->second == 0) { + changed = true; + stripRange.push_back(fn->second); + + // decrease counts of called functions + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::Op::OpFunctionCall) { + const auto call_it = fnCalls.find(asId(start + 3)); + if (call_it != fnCalls.end()) { + if (--call_it->second <= 0) + fnCalls.erase(call_it); + } + } + + return true; + }, + op_fn_nop, + fn->second.first, + fn->second.second); + + if (errorLatch) + return; + + fn = fnPos.erase(fn); + } else ++fn; + } + } + } + + // remove unused function variables + decorations + void spirvbin_t::dceVars() + { + msg(3, 2, std::string("DCE Vars: ")); + + std::unordered_map varUseCount; + + // Count function variable use + process( + [&](spv::Op opCode, unsigned start) { + if (opCode == spv::OpVariable) { + ++varUseCount[asId(start+2)]; + return true; + } else if (opCode == spv::OpEntryPoint) { + const int wordCount = asWordCount(start); + for (int i = 4; i < wordCount; i++) { + ++varUseCount[asId(start+i)]; + } + return true; + } else + return false; + }, + + [&](spv::Id& id) { if (varUseCount[id]) ++varUseCount[id]; } + ); + + if (errorLatch) + return; + + // Remove single-use function variables + associated decorations and names + process( + [&](spv::Op opCode, unsigned start) { + spv::Id id = spv::NoResult; + if (opCode == spv::OpVariable) + id = asId(start+2); + if (opCode == spv::OpDecorate || opCode == spv::OpName) + id = asId(start+1); + + if (id != spv::NoResult && varUseCount[id] == 1) + stripInst(start); + + return true; + }, + op_fn_nop); + } + + // remove unused types + void spirvbin_t::dceTypes() + { + std::vector isType(bound(), false); + + // for speed, make O(1) way to get to type query (map is log(n)) + for (const auto typeStart : typeConstPos) + isType[asTypeConstId(typeStart)] = true; + + std::unordered_map typeUseCount; + + // This is not the most efficient algorithm, but this is an offline tool, and + // it's easy to write this way. Can be improved opportunistically if needed. + bool changed = true; + while (changed) { + changed = false; + strip(); + typeUseCount.clear(); + + // Count total type usage + process(inst_fn_nop, + [&](spv::Id& id) { if (isType[id]) ++typeUseCount[id]; } + ); + + if (errorLatch) + return; + + // Remove single reference types + for (const auto typeStart : typeConstPos) { + const spv::Id typeId = asTypeConstId(typeStart); + if (typeUseCount[typeId] == 1) { + changed = true; + --typeUseCount[typeId]; + stripInst(typeStart); + } + } + + if (errorLatch) + return; + } + } + +#ifdef NOTDEF + bool spirvbin_t::matchType(const spirvbin_t::globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const + { + // Find the local type id "lt" and global type id "gt" + const auto lt_it = typeConstPosR.find(lt); + if (lt_it == typeConstPosR.end()) + return false; + + const auto typeStart = lt_it->second; + + // Search for entry in global table + const auto gtype = globalTypes.find(gt); + if (gtype == globalTypes.end()) + return false; + + const auto& gdata = gtype->second; + + // local wordcount and opcode + const int wordCount = asWordCount(typeStart); + const spv::Op opCode = asOpCode(typeStart); + + // no type match if opcodes don't match, or operand count doesn't match + if (opCode != opOpCode(gdata[0]) || wordCount != opWordCount(gdata[0])) + return false; + + const unsigned numOperands = wordCount - 2; // all types have a result + + const auto cmpIdRange = [&](range_t range) { + for (int x=range.first; xsecond; + } + + // Hash types to canonical values. This can return ID collisions (it's a bit + // inevitable): it's up to the caller to handle that gracefully. + std::uint32_t spirvbin_t::hashType(unsigned typeStart) const + { + const unsigned wordCount = asWordCount(typeStart); + const spv::Op opCode = asOpCode(typeStart); + + switch (opCode) { + case spv::OpTypeVoid: return 0; + case spv::OpTypeBool: return 1; + case spv::OpTypeInt: return 3 + (spv[typeStart+3]); + case spv::OpTypeFloat: return 5; + case spv::OpTypeVector: + return 6 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1); + case spv::OpTypeMatrix: + return 30 + hashType(idPos(spv[typeStart+2])) * (spv[typeStart+3] - 1); + case spv::OpTypeImage: + return 120 + hashType(idPos(spv[typeStart+2])) + + spv[typeStart+3] + // dimensionality + spv[typeStart+4] * 8 * 16 + // depth + spv[typeStart+5] * 4 * 16 + // arrayed + spv[typeStart+6] * 2 * 16 + // multisampled + spv[typeStart+7] * 1 * 16; // format + case spv::OpTypeSampler: + return 500; + case spv::OpTypeSampledImage: + return 502; + case spv::OpTypeArray: + return 501 + hashType(idPos(spv[typeStart+2])) * spv[typeStart+3]; + case spv::OpTypeRuntimeArray: + return 5000 + hashType(idPos(spv[typeStart+2])); + case spv::OpTypeStruct: + { + std::uint32_t hash = 10000; + for (unsigned w=2; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + + case spv::OpTypeOpaque: return 6000 + spv[typeStart+2]; + case spv::OpTypePointer: return 100000 + hashType(idPos(spv[typeStart+3])); + case spv::OpTypeFunction: + { + std::uint32_t hash = 200000; + for (unsigned w=2; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + + case spv::OpTypeEvent: return 300000; + case spv::OpTypeDeviceEvent: return 300001; + case spv::OpTypeReserveId: return 300002; + case spv::OpTypeQueue: return 300003; + case spv::OpTypePipe: return 300004; + case spv::OpConstantTrue: return 300007; + case spv::OpConstantFalse: return 300008; + case spv::OpConstantComposite: + { + std::uint32_t hash = 300011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * hashType(idPos(spv[typeStart+w])); + return hash; + } + case spv::OpConstant: + { + std::uint32_t hash = 400011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * spv[typeStart+w]; + return hash; + } + case spv::OpConstantNull: + { + std::uint32_t hash = 500009 + hashType(idPos(spv[typeStart+1])); + return hash; + } + case spv::OpConstantSampler: + { + std::uint32_t hash = 600011 + hashType(idPos(spv[typeStart+1])); + for (unsigned w=3; w < wordCount; ++w) + hash += w * spv[typeStart+w]; + return hash; + } + + default: + error("unknown type opcode"); + return 0; + } + } + + void spirvbin_t::mapTypeConst() + { + globaltypes_t globalTypeMap; + + msg(3, 2, std::string("Remapping Consts & Types: ")); + + static const std::uint32_t softTypeIdLimit = 3011; // small prime. TODO: get from options + static const std::uint32_t firstMappedID = 8; // offset into ID space + + for (auto& typeStart : typeConstPos) { + const spv::Id resId = asTypeConstId(typeStart); + const std::uint32_t hashval = hashType(typeStart); + + if (errorLatch) + return; + + if (isOldIdUnmapped(resId)) { + localId(resId, nextUnusedId(hashval % softTypeIdLimit + firstMappedID)); + if (errorLatch) + return; + } + } + } + + // Strip a single binary by removing ranges given in stripRange + void spirvbin_t::strip() + { + if (stripRange.empty()) // nothing to do + return; + + // Sort strip ranges in order of traversal + std::sort(stripRange.begin(), stripRange.end()); + + // Allocate a new binary big enough to hold old binary + // We'll step this iterator through the strip ranges as we go through the binary + auto strip_it = stripRange.begin(); + + int strippedPos = 0; + for (unsigned word = 0; word < unsigned(spv.size()); ++word) { + while (strip_it != stripRange.end() && word >= strip_it->second) + ++strip_it; + + if (strip_it == stripRange.end() || word < strip_it->first || word >= strip_it->second) + spv[strippedPos++] = spv[word]; + } + + spv.resize(strippedPos); + stripRange.clear(); + + buildLocalMaps(); + } + + // Strip a single binary by removing ranges given in stripRange + void spirvbin_t::remap(std::uint32_t opts) + { + options = opts; + + // Set up opcode tables from SpvDoc + spv::Parameterize(); + + validate(); // validate header + buildLocalMaps(); // build ID maps + + msg(3, 4, std::string("ID bound: ") + std::to_string(bound())); + + if (options & STRIP) stripDebug(); + if (errorLatch) return; + + strip(); // strip out data we decided to eliminate + if (errorLatch) return; + + if (options & OPT_LOADSTORE) optLoadStore(); + if (errorLatch) return; + + if (options & OPT_FWD_LS) forwardLoadStores(); + if (errorLatch) return; + + if (options & DCE_FUNCS) dceFuncs(); + if (errorLatch) return; + + if (options & DCE_VARS) dceVars(); + if (errorLatch) return; + + if (options & DCE_TYPES) dceTypes(); + if (errorLatch) return; + + strip(); // strip out data we decided to eliminate + if (errorLatch) return; + + stripDeadRefs(); // remove references to things we DCEed + if (errorLatch) return; + + // after the last strip, we must clean any debug info referring to now-deleted data + + if (options & MAP_TYPES) mapTypeConst(); + if (errorLatch) return; + + if (options & MAP_NAMES) mapNames(); + if (errorLatch) return; + + if (options & MAP_FUNCS) mapFnBodies(); + if (errorLatch) return; + + if (options & MAP_ALL) { + mapRemainder(); // map any unmapped IDs + if (errorLatch) return; + + applyMap(); // Now remap each shader to the new IDs we've come up with + if (errorLatch) return; + } + } + + // remap from a memory image + void spirvbin_t::remap(std::vector& in_spv, std::uint32_t opts) + { + spv.swap(in_spv); + remap(opts); + spv.swap(in_spv); + } + +} // namespace SPV + +#endif // defined (use_cpp11) + diff --git a/src/third_party/glslang/SPIRV/SPVRemapper.h b/src/third_party/glslang/SPIRV/SPVRemapper.h new file mode 100644 index 0000000..d6b9c34 --- /dev/null +++ b/src/third_party/glslang/SPIRV/SPVRemapper.h @@ -0,0 +1,304 @@ +// +// Copyright (C) 2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef SPIRVREMAPPER_H +#define SPIRVREMAPPER_H + +#include +#include +#include +#include + +namespace spv { + +// MSVC defines __cplusplus as an older value, even when it supports almost all of 11. +// We handle that here by making our own symbol. +#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700) +# define use_cpp11 1 +#endif + +class spirvbin_base_t +{ +public: + enum Options { + NONE = 0, + STRIP = (1<<0), + MAP_TYPES = (1<<1), + MAP_NAMES = (1<<2), + MAP_FUNCS = (1<<3), + DCE_FUNCS = (1<<4), + DCE_VARS = (1<<5), + DCE_TYPES = (1<<6), + OPT_LOADSTORE = (1<<7), + OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV + MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS), + DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES), + OPT_ALL = (OPT_LOADSTORE), + + ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL), + DO_EVERYTHING = (STRIP | ALL_BUT_STRIP) + }; +}; + +} // namespace SPV + +#if !defined (use_cpp11) +#include +#include + +namespace spv { +class spirvbin_t : public spirvbin_base_t +{ +public: + spirvbin_t(int /*verbose = 0*/) { } + + void remap(std::vector& /*spv*/, unsigned int /*opts = 0*/) + { + printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n"); + exit(5); + } +}; + +} // namespace SPV + +#else // defined (use_cpp11) + +#include +#include +#include +#include +#include +#include +#include + +#include "spirv.hpp" +#include "spvIR.h" + +namespace spv { + +// class to hold SPIR-V binary data for remapping, DCE, and debug stripping +class spirvbin_t : public spirvbin_base_t +{ +public: + spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false) + { } + + virtual ~spirvbin_t() { } + + // remap on an existing binary in memory + void remap(std::vector& spv, std::uint32_t opts = DO_EVERYTHING); + + // Type for error/log handler functions + typedef std::function errorfn_t; + typedef std::function logfn_t; + + // Register error/log handling functions (can be lambda fn / functor / etc) + static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; } + static void registerLogHandler(logfn_t handler) { logHandler = handler; } + +protected: + // This can be overridden to provide other message behavior if needed + virtual void msg(int minVerbosity, int indent, const std::string& txt) const; + +private: + // Local to global, or global to local ID map + typedef std::unordered_map idmap_t; + typedef std::unordered_set idset_t; + typedef std::unordered_map blockmap_t; + + void remap(std::uint32_t opts = DO_EVERYTHING); + + // Map of names to IDs + typedef std::unordered_map namemap_t; + + typedef std::uint32_t spirword_t; + + typedef std::pair range_t; + typedef std::function idfn_t; + typedef std::function instfn_t; + + // Special Values for ID map: + static const spv::Id unmapped; // unchanged from default value + static const spv::Id unused; // unused ID + static const int header_size; // SPIR header = 5 words + + class id_iterator_t; + + // For mapping type entries between different shaders + typedef std::vector typeentry_t; + typedef std::map globaltypes_t; + + // A set that preserves position order, and a reverse map + typedef std::set posmap_t; + typedef std::unordered_map posmap_rev_t; + + // Maps and ID to the size of its base type, if known. + typedef std::unordered_map typesize_map_t; + + // handle error + void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); } + + bool isConstOp(spv::Op opCode) const; + bool isTypeOp(spv::Op opCode) const; + bool isStripOp(spv::Op opCode) const; + bool isFlowCtrl(spv::Op opCode) const; + range_t literalRange(spv::Op opCode) const; + range_t typeRange(spv::Op opCode) const; + range_t constRange(spv::Op opCode) const; + unsigned typeSizeInWords(spv::Id id) const; + unsigned idTypeSizeInWords(spv::Id id) const; + + spv::Id& asId(unsigned word) { return spv[word]; } + const spv::Id& asId(unsigned word) const { return spv[word]; } + spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); } + std::uint32_t asOpCodeHash(unsigned word); + spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); } + unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); } + spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); } + unsigned idPos(spv::Id id) const; + + static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; } + static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); } + + // Header access & set methods + spirword_t magic() const { return spv[0]; } // return magic number + spirword_t bound() const { return spv[3]; } // return Id bound from header + spirword_t bound(spirword_t b) { return spv[3] = b; } + spirword_t genmagic() const { return spv[2]; } // generator magic + spirword_t genmagic(spirword_t m) { return spv[2] = m; } + spirword_t schemaNum() const { return spv[4]; } // schema number from header + + // Mapping fns: get + spv::Id localId(spv::Id id) const { return idMapL[id]; } + + // Mapping fns: set + inline spv::Id localId(spv::Id id, spv::Id newId); + void countIds(spv::Id id); + + // Return next unused new local ID. + // NOTE: boost::dynamic_bitset would be more efficient due to find_next(), + // which std::vector doens't have. + inline spv::Id nextUnusedId(spv::Id id); + + void buildLocalMaps(); + std::string literalString(unsigned word) const; // Return literal as a std::string + int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; } + + bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); } + bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; } + bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; } + bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); } + bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); } + + // bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const; + // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const; + std::uint32_t hashType(unsigned typeStart) const; + + spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0); + int processInstruction(unsigned word, instfn_t, idfn_t); + + void validate() const; + void mapTypeConst(); + void mapFnBodies(); + void optLoadStore(); + void dceFuncs(); + void dceVars(); + void dceTypes(); + void mapNames(); + void foldIds(); // fold IDs to smallest space + void forwardLoadStores(); // load store forwarding (EXPERIMENTAL) + void offsetIds(); // create relative offset IDs + + void applyMap(); // remap per local name map + void mapRemainder(); // map any IDs we haven't touched yet + void stripDebug(); // strip all debug info + void stripDeadRefs(); // strips debug info for now-dead references after DCE + void strip(); // remove debug symbols + + std::vector spv; // SPIR words + + namemap_t nameMap; // ID names from OpName + + // Since we want to also do binary ops, we can't use std::vector. we could use + // boost::dynamic_bitset, but we're trying to avoid a boost dependency. + typedef std::uint64_t bits_t; + std::vector mapped; // which new IDs have been mapped + static const int mBits = sizeof(bits_t) * 4; + + bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); } + void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); } + void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); } + size_t maxMappedId() const { return mapped.size() * mBits; } + + // Add a strip range for a given instruction starting at 'start' + // Note: avoiding brace initializers to please older versions os MSVC. + void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); } + + // Function start and end. use unordered_map because we'll have + // many fewer functions than IDs. + std::unordered_map fnPos; + + // Which functions are called, anywhere in the module, with a call count + std::unordered_map fnCalls; + + posmap_t typeConstPos; // word positions that define types & consts (ordered) + posmap_rev_t idPosR; // reverse map from IDs to positions + typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known. + + std::vector idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs + + spv::Id entryPoint; // module entry point + spv::Id largestNewId; // biggest new ID we have mapped anything to + + // Sections of the binary to strip, given as [begin,end) + std::vector stripRange; + + // processing options: + std::uint32_t options; + int verbose; // verbosity level + + // Error latch: this is set if the error handler is ever executed. It would be better to + // use a try/catch block and throw, but that's not desired for certain environments, so + // this is the alternative. + mutable bool errorLatch; + + static errorfn_t errorHandler; + static logfn_t logHandler; +}; + +} // namespace SPV + +#endif // defined (use_cpp11) +#endif // SPIRVREMAPPER_H diff --git a/src/third_party/glslang/SPIRV/SpvBuilder.cpp b/src/third_party/glslang/SPIRV/SpvBuilder.cpp new file mode 100644 index 0000000..9680331 --- /dev/null +++ b/src/third_party/glslang/SPIRV/SpvBuilder.cpp @@ -0,0 +1,3228 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Helper for making SPIR-V IR. Generally, this is documented in the header +// SpvBuilder.h. +// + +#include +#include + +#include +#include + +#include "SpvBuilder.h" + +#ifndef GLSLANG_WEB +#include "hex_float.h" +#endif + +#ifndef _WIN32 + #include +#endif + +namespace spv { + +Builder::Builder(unsigned int spvVersion, unsigned int magicNumber, SpvBuildLogger* buildLogger) : + spvVersion(spvVersion), + source(SourceLanguageUnknown), + sourceVersion(0), + sourceFileStringId(NoResult), + currentLine(0), + currentFile(nullptr), + emitOpLines(false), + addressModel(AddressingModelLogical), + memoryModel(MemoryModelGLSL450), + builderNumber(magicNumber), + buildPoint(0), + uniqueId(0), + entryPointFunction(0), + generatingOpCodeForSpecConst(false), + logger(buildLogger) +{ + clearAccessChain(); +} + +Builder::~Builder() +{ +} + +Id Builder::import(const char* name) +{ + Instruction* import = new Instruction(getUniqueId(), NoType, OpExtInstImport); + import->addStringOperand(name); + module.mapInstruction(import); + + imports.push_back(std::unique_ptr(import)); + return import->getResultId(); +} + +// Emit instruction for non-filename-based #line directives (ie. no filename +// seen yet): emit an OpLine if we've been asked to emit OpLines and the line +// number has changed since the last time, and is a valid line number. +void Builder::setLine(int lineNum) +{ + if (lineNum != 0 && lineNum != currentLine) { + currentLine = lineNum; + if (emitOpLines) + addLine(sourceFileStringId, currentLine, 0); + } +} + +// If no filename, do non-filename-based #line emit. Else do filename-based emit. +// Emit OpLine if we've been asked to emit OpLines and the line number or filename +// has changed since the last time, and line number is valid. +void Builder::setLine(int lineNum, const char* filename) +{ + if (filename == nullptr) { + setLine(lineNum); + return; + } + if ((lineNum != 0 && lineNum != currentLine) || currentFile == nullptr || + strncmp(filename, currentFile, strlen(currentFile) + 1) != 0) { + currentLine = lineNum; + currentFile = filename; + if (emitOpLines) { + spv::Id strId = getStringId(filename); + addLine(strId, currentLine, 0); + } + } +} + +void Builder::addLine(Id fileName, int lineNum, int column) +{ + Instruction* line = new Instruction(OpLine); + line->addIdOperand(fileName); + line->addImmediateOperand(lineNum); + line->addImmediateOperand(column); + buildPoint->addInstruction(std::unique_ptr(line)); +} + +// For creating new groupedTypes (will return old type if the requested one was already made). +Id Builder::makeVoidType() +{ + Instruction* type; + if (groupedTypes[OpTypeVoid].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeVoid); + groupedTypes[OpTypeVoid].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeVoid].back(); + + return type->getResultId(); +} + +Id Builder::makeBoolType() +{ + Instruction* type; + if (groupedTypes[OpTypeBool].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeBool); + groupedTypes[OpTypeBool].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeBool].back(); + + return type->getResultId(); +} + +Id Builder::makeSamplerType() +{ + Instruction* type; + if (groupedTypes[OpTypeSampler].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeSampler); + groupedTypes[OpTypeSampler].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else + type = groupedTypes[OpTypeSampler].back(); + + return type->getResultId(); +} + +Id Builder::makePointer(StorageClass storageClass, Id pointee) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)storageClass && + type->getIdOperand(1) == pointee) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypePointer); + type->addImmediateOperand(storageClass); + type->addIdOperand(pointee); + groupedTypes[OpTypePointer].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeForwardPointer(StorageClass storageClass) +{ + // Caching/uniquifying doesn't work here, because we don't know the + // pointee type and there can be multiple forward pointers of the same + // storage type. Somebody higher up in the stack must keep track. + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeForwardPointer); + type->addImmediateOperand(storageClass); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardPointerType, Id pointee) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)storageClass && + type->getIdOperand(1) == pointee) + return type->getResultId(); + } + + type = new Instruction(forwardPointerType, NoType, OpTypePointer); + type->addImmediateOperand(storageClass); + type->addIdOperand(pointee); + groupedTypes[OpTypePointer].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeIntegerType(int width, bool hasSign) +{ +#ifdef GLSLANG_WEB + assert(width == 32); + width = 32; +#endif + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) { + type = groupedTypes[OpTypeInt][t]; + if (type->getImmediateOperand(0) == (unsigned)width && + type->getImmediateOperand(1) == (hasSign ? 1u : 0u)) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeInt); + type->addImmediateOperand(width); + type->addImmediateOperand(hasSign ? 1 : 0); + groupedTypes[OpTypeInt].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + // deal with capabilities + switch (width) { + case 8: + case 16: + // these are currently handled by storage-type declarations and post processing + break; + case 64: + addCapability(CapabilityInt64); + break; + default: + break; + } + + return type->getResultId(); +} + +Id Builder::makeFloatType(int width) +{ +#ifdef GLSLANG_WEB + assert(width == 32); + width = 32; +#endif + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) { + type = groupedTypes[OpTypeFloat][t]; + if (type->getImmediateOperand(0) == (unsigned)width) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeFloat); + type->addImmediateOperand(width); + groupedTypes[OpTypeFloat].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + // deal with capabilities + switch (width) { + case 16: + // currently handled by storage-type declarations and post processing + break; + case 64: + addCapability(CapabilityFloat64); + break; + default: + break; + } + + return type->getResultId(); +} + +// Make a struct without checking for duplication. +// See makeStructResultType() for non-decorated structs +// needed as the result of some instructions, which does +// check for duplicates. +Id Builder::makeStructType(const std::vector& members, const char* name) +{ + // Don't look for previous one, because in the general case, + // structs can be duplicated except for decorations. + + // not found, make it + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeStruct); + for (int op = 0; op < (int)members.size(); ++op) + type->addIdOperand(members[op]); + groupedTypes[OpTypeStruct].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + addName(type->getResultId(), name); + + return type->getResultId(); +} + +// Make a struct for the simple results of several instructions, +// checking for duplication. +Id Builder::makeStructResultType(Id type0, Id type1) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeStruct].size(); ++t) { + type = groupedTypes[OpTypeStruct][t]; + if (type->getNumOperands() != 2) + continue; + if (type->getIdOperand(0) != type0 || + type->getIdOperand(1) != type1) + continue; + return type->getResultId(); + } + + // not found, make it + std::vector members; + members.push_back(type0); + members.push_back(type1); + + return makeStructType(members, "ResType"); +} + +Id Builder::makeVectorType(Id component, int size) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeVector].size(); ++t) { + type = groupedTypes[OpTypeVector][t]; + if (type->getIdOperand(0) == component && + type->getImmediateOperand(1) == (unsigned)size) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeVector); + type->addIdOperand(component); + type->addImmediateOperand(size); + groupedTypes[OpTypeVector].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeMatrixType(Id component, int cols, int rows) +{ + assert(cols <= maxMatrixSize && rows <= maxMatrixSize); + + Id column = makeVectorType(component, rows); + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeMatrix].size(); ++t) { + type = groupedTypes[OpTypeMatrix][t]; + if (type->getIdOperand(0) == column && + type->getImmediateOperand(1) == (unsigned)cols) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeMatrix); + type->addIdOperand(column); + type->addImmediateOperand(cols); + groupedTypes[OpTypeMatrix].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) { + type = groupedTypes[OpTypeCooperativeMatrixNV][t]; + if (type->getIdOperand(0) == component && + type->getIdOperand(1) == scope && + type->getIdOperand(2) == rows && + type->getIdOperand(3) == cols) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV); + type->addIdOperand(component); + type->addIdOperand(scope); + type->addIdOperand(rows); + type->addIdOperand(cols); + groupedTypes[OpTypeCooperativeMatrixNV].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + + +// TODO: performance: track arrays per stride +// If a stride is supplied (non-zero) make an array. +// If no stride (0), reuse previous array types. +// 'size' is an Id of a constant or specialization constant of the array size +Id Builder::makeArrayType(Id element, Id sizeId, int stride) +{ + Instruction* type; + if (stride == 0) { + // try to find existing type + for (int t = 0; t < (int)groupedTypes[OpTypeArray].size(); ++t) { + type = groupedTypes[OpTypeArray][t]; + if (type->getIdOperand(0) == element && + type->getIdOperand(1) == sizeId) + return type->getResultId(); + } + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeArray); + type->addIdOperand(element); + type->addIdOperand(sizeId); + groupedTypes[OpTypeArray].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeRuntimeArray(Id element) +{ + Instruction* type = new Instruction(getUniqueId(), NoType, OpTypeRuntimeArray); + type->addIdOperand(element); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeFunctionType(Id returnType, const std::vector& paramTypes) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeFunction].size(); ++t) { + type = groupedTypes[OpTypeFunction][t]; + if (type->getIdOperand(0) != returnType || (int)paramTypes.size() != type->getNumOperands() - 1) + continue; + bool mismatch = false; + for (int p = 0; p < (int)paramTypes.size(); ++p) { + if (paramTypes[p] != type->getIdOperand(p + 1)) { + mismatch = true; + break; + } + } + if (! mismatch) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeFunction); + type->addIdOperand(returnType); + for (int p = 0; p < (int)paramTypes.size(); ++p) + type->addIdOperand(paramTypes[p]); + groupedTypes[OpTypeFunction].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, bool ms, unsigned sampled, + ImageFormat format) +{ + assert(sampled == 1 || sampled == 2); + + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeImage].size(); ++t) { + type = groupedTypes[OpTypeImage][t]; + if (type->getIdOperand(0) == sampledType && + type->getImmediateOperand(1) == (unsigned int)dim && + type->getImmediateOperand(2) == ( depth ? 1u : 0u) && + type->getImmediateOperand(3) == (arrayed ? 1u : 0u) && + type->getImmediateOperand(4) == ( ms ? 1u : 0u) && + type->getImmediateOperand(5) == sampled && + type->getImmediateOperand(6) == (unsigned int)format) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeImage); + type->addIdOperand(sampledType); + type->addImmediateOperand( dim); + type->addImmediateOperand( depth ? 1 : 0); + type->addImmediateOperand(arrayed ? 1 : 0); + type->addImmediateOperand( ms ? 1 : 0); + type->addImmediateOperand(sampled); + type->addImmediateOperand((unsigned int)format); + + groupedTypes[OpTypeImage].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + +#ifndef GLSLANG_WEB + // deal with capabilities + switch (dim) { + case DimBuffer: + if (sampled == 1) + addCapability(CapabilitySampledBuffer); + else + addCapability(CapabilityImageBuffer); + break; + case Dim1D: + if (sampled == 1) + addCapability(CapabilitySampled1D); + else + addCapability(CapabilityImage1D); + break; + case DimCube: + if (arrayed) { + if (sampled == 1) + addCapability(CapabilitySampledCubeArray); + else + addCapability(CapabilityImageCubeArray); + } + break; + case DimRect: + if (sampled == 1) + addCapability(CapabilitySampledRect); + else + addCapability(CapabilityImageRect); + break; + case DimSubpassData: + addCapability(CapabilityInputAttachment); + break; + default: + break; + } + + if (ms) { + if (sampled == 2) { + // Images used with subpass data are not storage + // images, so don't require the capability for them. + if (dim != Dim::DimSubpassData) + addCapability(CapabilityStorageImageMultisample); + if (arrayed) + addCapability(CapabilityImageMSArray); + } + } +#endif + + return type->getResultId(); +} + +Id Builder::makeSampledImageType(Id imageType) +{ + // try to find it + Instruction* type; + for (int t = 0; t < (int)groupedTypes[OpTypeSampledImage].size(); ++t) { + type = groupedTypes[OpTypeSampledImage][t]; + if (type->getIdOperand(0) == imageType) + return type->getResultId(); + } + + // not found, make it + type = new Instruction(getUniqueId(), NoType, OpTypeSampledImage); + type->addIdOperand(imageType); + + groupedTypes[OpTypeSampledImage].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + + return type->getResultId(); +} + +#ifndef GLSLANG_WEB +Id Builder::makeAccelerationStructureType() +{ + Instruction *type; + if (groupedTypes[OpTypeAccelerationStructureKHR].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeAccelerationStructureKHR); + groupedTypes[OpTypeAccelerationStructureKHR].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else { + type = groupedTypes[OpTypeAccelerationStructureKHR].back(); + } + + return type->getResultId(); +} + +Id Builder::makeRayQueryType() +{ + Instruction *type; + if (groupedTypes[OpTypeRayQueryProvisionalKHR].size() == 0) { + type = new Instruction(getUniqueId(), NoType, OpTypeRayQueryProvisionalKHR); + groupedTypes[OpTypeRayQueryProvisionalKHR].push_back(type); + constantsTypesGlobals.push_back(std::unique_ptr(type)); + module.mapInstruction(type); + } else { + type = groupedTypes[OpTypeRayQueryProvisionalKHR].back(); + } + + return type->getResultId(); +} +#endif + +Id Builder::getDerefTypeId(Id resultId) const +{ + Id typeId = getTypeId(resultId); + assert(isPointerType(typeId)); + + return module.getInstruction(typeId)->getIdOperand(1); +} + +Op Builder::getMostBasicTypeClass(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + return getMostBasicTypeClass(instr->getIdOperand(0)); + case OpTypePointer: + return getMostBasicTypeClass(instr->getIdOperand(1)); + default: + return typeClass; + } +} + +int Builder::getNumTypeConstituents(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + switch (instr->getOpCode()) + { + case OpTypeBool: + case OpTypeInt: + case OpTypeFloat: + case OpTypePointer: + return 1; + case OpTypeVector: + case OpTypeMatrix: + return instr->getImmediateOperand(1); + case OpTypeArray: + { + Id lengthId = instr->getIdOperand(1); + return module.getInstruction(lengthId)->getImmediateOperand(0); + } + case OpTypeStruct: + return instr->getNumOperands(); + case OpTypeCooperativeMatrixNV: + // has only one constituent when used with OpCompositeConstruct. + return 1; + default: + assert(0); + return 1; + } +} + +// Return the lowest-level type of scalar that an homogeneous composite is made out of. +// Typically, this is just to find out if something is made out of ints or floats. +// However, it includes returning a structure, if say, it is an array of structure. +Id Builder::getScalarTypeId(Id typeId) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVoid: + case OpTypeBool: + case OpTypeInt: + case OpTypeFloat: + case OpTypeStruct: + return instr->getResultId(); + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + case OpTypePointer: + return getScalarTypeId(getContainedTypeId(typeId)); + default: + assert(0); + return NoResult; + } +} + +// Return the type of 'member' of a composite. +Id Builder::getContainedTypeId(Id typeId, int member) const +{ + Instruction* instr = module.getInstruction(typeId); + + Op typeClass = instr->getOpCode(); + switch (typeClass) + { + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + case OpTypeCooperativeMatrixNV: + return instr->getIdOperand(0); + case OpTypePointer: + return instr->getIdOperand(1); + case OpTypeStruct: + return instr->getIdOperand(member); + default: + assert(0); + return NoResult; + } +} + +// Return the immediately contained type of a given composite type. +Id Builder::getContainedTypeId(Id typeId) const +{ + return getContainedTypeId(typeId, 0); +} + +// Returns true if 'typeId' is or contains a scalar type declared with 'typeOp' +// of width 'width'. The 'width' is only consumed for int and float types. +// Returns false otherwise. +bool Builder::containsType(Id typeId, spv::Op typeOp, unsigned int width) const +{ + const Instruction& instr = *module.getInstruction(typeId); + + Op typeClass = instr.getOpCode(); + switch (typeClass) + { + case OpTypeInt: + case OpTypeFloat: + return typeClass == typeOp && instr.getImmediateOperand(0) == width; + case OpTypeStruct: + for (int m = 0; m < instr.getNumOperands(); ++m) { + if (containsType(instr.getIdOperand(m), typeOp, width)) + return true; + } + return false; + case OpTypePointer: + return false; + case OpTypeVector: + case OpTypeMatrix: + case OpTypeArray: + case OpTypeRuntimeArray: + return containsType(getContainedTypeId(typeId), typeOp, width); + default: + return typeClass == typeOp; + } +} + +// return true if the type is a pointer to PhysicalStorageBufferEXT or an +// array of such pointers. These require restrict/aliased decorations. +bool Builder::containsPhysicalStorageBufferOrArray(Id typeId) const +{ + const Instruction& instr = *module.getInstruction(typeId); + + Op typeClass = instr.getOpCode(); + switch (typeClass) + { + case OpTypePointer: + return getTypeStorageClass(typeId) == StorageClassPhysicalStorageBufferEXT; + case OpTypeArray: + return containsPhysicalStorageBufferOrArray(getContainedTypeId(typeId)); + default: + return false; + } +} + +// See if a scalar constant of this type has already been created, so it +// can be reused rather than duplicated. (Required by the specification). +Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value) +{ + Instruction* constant; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + if (constant->getOpCode() == opcode && + constant->getTypeId() == typeId && + constant->getImmediateOperand(0) == value) + return constant->getResultId(); + } + + return 0; +} + +// Version of findScalarConstant (see above) for scalars that take two operands (e.g. a 'double' or 'int64'). +Id Builder::findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2) +{ + Instruction* constant; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + if (constant->getOpCode() == opcode && + constant->getTypeId() == typeId && + constant->getImmediateOperand(0) == v1 && + constant->getImmediateOperand(1) == v2) + return constant->getResultId(); + } + + return 0; +} + +// Return true if consuming 'opcode' means consuming a constant. +// "constant" here means after final transform to executable code, +// the value consumed will be a constant, so includes specialization. +bool Builder::isConstantOpCode(Op opcode) const +{ + switch (opcode) { + case OpUndef: + case OpConstantTrue: + case OpConstantFalse: + case OpConstant: + case OpConstantComposite: + case OpConstantSampler: + case OpConstantNull: + case OpSpecConstantTrue: + case OpSpecConstantFalse: + case OpSpecConstant: + case OpSpecConstantComposite: + case OpSpecConstantOp: + return true; + default: + return false; + } +} + +// Return true if consuming 'opcode' means consuming a specialization constant. +bool Builder::isSpecConstantOpCode(Op opcode) const +{ + switch (opcode) { + case OpSpecConstantTrue: + case OpSpecConstantFalse: + case OpSpecConstant: + case OpSpecConstantComposite: + case OpSpecConstantOp: + return true; + default: + return false; + } +} + +Id Builder::makeBoolConstant(bool b, bool specConstant) +{ + Id typeId = makeBoolType(); + Instruction* constant; + Op opcode = specConstant ? (b ? OpSpecConstantTrue : OpSpecConstantFalse) : (b ? OpConstantTrue : OpConstantFalse); + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = 0; + for (int i = 0; i < (int)groupedConstants[OpTypeBool].size(); ++i) { + constant = groupedConstants[OpTypeBool][i]; + if (constant->getTypeId() == typeId && constant->getOpCode() == opcode) + existing = constant->getResultId(); + } + + if (existing) + return existing; + } + + // Make it + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeBool].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeIntConstant(Id typeId, unsigned value, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeInt, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeInt].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeInt64Constant(Id typeId, unsigned long long value, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + + unsigned op1 = value & 0xFFFFFFFF; + unsigned op2 = value >> 32; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeInt, opcode, typeId, op1, op2); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(op1); + c->addImmediateOperand(op2); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeInt].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeFloatConstant(float f, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(32); + union { float fl; unsigned int ui; } u; + u.fl = f; + unsigned value = u.ui; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Id Builder::makeDoubleConstant(double d, bool specConstant) +{ +#ifdef GLSLANG_WEB + assert(0); + return NoResult; +#else + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(64); + union { double db; unsigned long long ull; } u; + u.db = d; + unsigned long long value = u.ull; + unsigned op1 = value & 0xFFFFFFFF; + unsigned op2 = value >> 32; + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (! specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, op1, op2); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(op1); + c->addImmediateOperand(op2); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +#endif +} + +Id Builder::makeFloat16Constant(float f16, bool specConstant) +{ +#ifdef GLSLANG_WEB + assert(0); + return NoResult; +#else + Op opcode = specConstant ? OpSpecConstant : OpConstant; + Id typeId = makeFloatType(16); + + spvutils::HexFloat> fVal(f16); + spvutils::HexFloat> f16Val(0); + fVal.castTo(f16Val, spvutils::kRoundToZero); + + unsigned value = f16Val.value().getAsFloat().get_value(); + + // See if we already made it. Applies only to regular constants, because specialization constants + // must remain distinct for the purpose of applying a SpecId decoration. + if (!specConstant) { + Id existing = findScalarConstant(OpTypeFloat, opcode, typeId, value); + if (existing) + return existing; + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + c->addImmediateOperand(value); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + groupedConstants[OpTypeFloat].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +#endif +} + +Id Builder::makeFpConstant(Id type, double d, bool specConstant) +{ +#ifdef GLSLANG_WEB + const int width = 32; + assert(width == getScalarTypeWidth(type)); +#else + const int width = getScalarTypeWidth(type); +#endif + + assert(isFloatType(type)); + + switch (width) { + case 16: + return makeFloat16Constant((float)d, specConstant); + case 32: + return makeFloatConstant((float)d, specConstant); + case 64: + return makeDoubleConstant(d, specConstant); + default: + break; + } + + assert(false); + return NoResult; +} + +Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector& comps) +{ + Instruction* constant = 0; + bool found = false; + for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) { + constant = groupedConstants[typeClass][i]; + + if (constant->getTypeId() != typeId) + continue; + + // same contents? + bool mismatch = false; + for (int op = 0; op < constant->getNumOperands(); ++op) { + if (constant->getIdOperand(op) != comps[op]) { + mismatch = true; + break; + } + } + if (! mismatch) { + found = true; + break; + } + } + + return found ? constant->getResultId() : NoResult; +} + +Id Builder::findStructConstant(Id typeId, const std::vector& comps) +{ + Instruction* constant = 0; + bool found = false; + for (int i = 0; i < (int)groupedStructConstants[typeId].size(); ++i) { + constant = groupedStructConstants[typeId][i]; + + // same contents? + bool mismatch = false; + for (int op = 0; op < constant->getNumOperands(); ++op) { + if (constant->getIdOperand(op) != comps[op]) { + mismatch = true; + break; + } + } + if (! mismatch) { + found = true; + break; + } + } + + return found ? constant->getResultId() : NoResult; +} + +// Comments in header +Id Builder::makeCompositeConstant(Id typeId, const std::vector& members, bool specConstant) +{ + Op opcode = specConstant ? OpSpecConstantComposite : OpConstantComposite; + assert(typeId); + Op typeClass = getTypeClass(typeId); + + switch (typeClass) { + case OpTypeVector: + case OpTypeArray: + case OpTypeMatrix: + case OpTypeCooperativeMatrixNV: + if (! specConstant) { + Id existing = findCompositeConstant(typeClass, typeId, members); + if (existing) + return existing; + } + break; + case OpTypeStruct: + if (! specConstant) { + Id existing = findStructConstant(typeId, members); + if (existing) + return existing; + } + break; + default: + assert(0); + return makeFloatConstant(0.0); + } + + Instruction* c = new Instruction(getUniqueId(), typeId, opcode); + for (int op = 0; op < (int)members.size(); ++op) + c->addIdOperand(members[op]); + constantsTypesGlobals.push_back(std::unique_ptr(c)); + if (typeClass == OpTypeStruct) + groupedStructConstants[typeId].push_back(c); + else + groupedConstants[typeClass].push_back(c); + module.mapInstruction(c); + + return c->getResultId(); +} + +Instruction* Builder::addEntryPoint(ExecutionModel model, Function* function, const char* name) +{ + Instruction* entryPoint = new Instruction(OpEntryPoint); + entryPoint->addImmediateOperand(model); + entryPoint->addIdOperand(function->getId()); + entryPoint->addStringOperand(name); + + entryPoints.push_back(std::unique_ptr(entryPoint)); + + return entryPoint; +} + +// Currently relying on the fact that all 'value' of interest are small non-negative values. +void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, int value1, int value2, int value3) +{ + Instruction* instr = new Instruction(OpExecutionMode); + instr->addIdOperand(entryPoint->getId()); + instr->addImmediateOperand(mode); + if (value1 >= 0) + instr->addImmediateOperand(value1); + if (value2 >= 0) + instr->addImmediateOperand(value2); + if (value3 >= 0) + instr->addImmediateOperand(value3); + + executionModes.push_back(std::unique_ptr(instr)); +} + +void Builder::addExecutionMode(Function* entryPoint, ExecutionMode mode, const std::vector& literals) +{ + Instruction* instr = new Instruction(OpExecutionMode); + instr->addIdOperand(entryPoint->getId()); + instr->addImmediateOperand(mode); + for (auto literal : literals) + instr->addImmediateOperand(literal); + + executionModes.push_back(std::unique_ptr(instr)); +} + +void Builder::addExecutionModeId(Function* entryPoint, ExecutionMode mode, const std::vector& operandIds) +{ + Instruction* instr = new Instruction(OpExecutionModeId); + instr->addIdOperand(entryPoint->getId()); + instr->addImmediateOperand(mode); + for (auto operandId : operandIds) + instr->addIdOperand(operandId); + + executionModes.push_back(std::unique_ptr(instr)); +} + +void Builder::addName(Id id, const char* string) +{ + Instruction* name = new Instruction(OpName); + name->addIdOperand(id); + name->addStringOperand(string); + + names.push_back(std::unique_ptr(name)); +} + +void Builder::addMemberName(Id id, int memberNumber, const char* string) +{ + Instruction* name = new Instruction(OpMemberName); + name->addIdOperand(id); + name->addImmediateOperand(memberNumber); + name->addStringOperand(string); + + names.push_back(std::unique_ptr(name)); +} + +void Builder::addDecoration(Id id, Decoration decoration, int num) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + if (num >= 0) + dec->addImmediateOperand(num); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecoration(Id id, Decoration decoration, const char* s) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateString); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + dec->addStringOperand(s); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecoration(Id id, Decoration decoration, const std::vector& literals) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + for (auto literal : literals) + dec->addImmediateOperand(literal); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecoration(Id id, Decoration decoration, const std::vector& strings) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateString); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + for (auto string : strings) + dec->addStringOperand(string); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecorationId(Id id, Decoration decoration, Id idDecoration) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateId); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + dec->addIdOperand(idDecoration); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addDecorationId(Id id, Decoration decoration, const std::vector& operandIds) +{ + if(decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpDecorateId); + dec->addIdOperand(id); + dec->addImmediateOperand(decoration); + + for (auto operandId : operandIds) + dec->addIdOperand(operandId); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, int num) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + if (num >= 0) + dec->addImmediateOperand(num); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const char *s) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorateStringGOOGLE); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + dec->addStringOperand(s); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const std::vector& literals) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorate); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + for (auto literal : literals) + dec->addImmediateOperand(literal); + + decorations.push_back(std::unique_ptr(dec)); +} + +void Builder::addMemberDecoration(Id id, unsigned int member, Decoration decoration, const std::vector& strings) +{ + if (decoration == spv::DecorationMax) + return; + + Instruction* dec = new Instruction(OpMemberDecorateString); + dec->addIdOperand(id); + dec->addImmediateOperand(member); + dec->addImmediateOperand(decoration); + for (auto string : strings) + dec->addStringOperand(string); + + decorations.push_back(std::unique_ptr(dec)); +} + +// Comments in header +Function* Builder::makeEntryPoint(const char* entryPoint) +{ + assert(! entryPointFunction); + + Block* entry; + std::vector params; + std::vector> decorations; + + entryPointFunction = makeFunctionEntry(NoPrecision, makeVoidType(), entryPoint, params, decorations, &entry); + + return entryPointFunction; +} + +// Comments in header +Function* Builder::makeFunctionEntry(Decoration precision, Id returnType, const char* name, + const std::vector& paramTypes, + const std::vector>& decorations, Block **entry) +{ + // Make the function and initial instructions in it + Id typeId = makeFunctionType(returnType, paramTypes); + Id firstParamId = paramTypes.size() == 0 ? 0 : getUniqueIds((int)paramTypes.size()); + Function* function = new Function(getUniqueId(), returnType, typeId, firstParamId, module); + + // Set up the precisions + setPrecision(function->getId(), precision); + function->setReturnPrecision(precision); + for (unsigned p = 0; p < (unsigned)decorations.size(); ++p) { + for (int d = 0; d < (int)decorations[p].size(); ++d) { + addDecoration(firstParamId + p, decorations[p][d]); + function->addParamPrecision(p, decorations[p][d]); + } + } + + // CFG + if (entry) { + *entry = new Block(getUniqueId(), *function); + function->addBlock(*entry); + setBuildPoint(*entry); + } + + if (name) + addName(function->getId(), name); + + functions.push_back(std::unique_ptr(function)); + + return function; +} + +// Comments in header +void Builder::makeReturn(bool implicit, Id retVal) +{ + if (retVal) { + Instruction* inst = new Instruction(NoResult, NoType, OpReturnValue); + inst->addIdOperand(retVal); + buildPoint->addInstruction(std::unique_ptr(inst)); + } else + buildPoint->addInstruction(std::unique_ptr(new Instruction(NoResult, NoType, OpReturn))); + + if (! implicit) + createAndSetNoPredecessorBlock("post-return"); +} + +// Comments in header +void Builder::leaveFunction() +{ + Block* block = buildPoint; + Function& function = buildPoint->getParent(); + assert(block); + + // If our function did not contain a return, add a return void now. + if (! block->isTerminated()) { + if (function.getReturnType() == makeVoidType()) + makeReturn(true); + else { + makeReturn(true, createUndefined(function.getReturnType())); + } + } +} + +// Comments in header +void Builder::makeDiscard() +{ + buildPoint->addInstruction(std::unique_ptr(new Instruction(OpKill))); + createAndSetNoPredecessorBlock("post-discard"); +} + +// Comments in header +Id Builder::createVariable(Decoration precision, StorageClass storageClass, Id type, const char* name, Id initializer) +{ + Id pointerType = makePointer(storageClass, type); + Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable); + inst->addImmediateOperand(storageClass); + if (initializer != NoResult) + inst->addIdOperand(initializer); + + switch (storageClass) { + case StorageClassFunction: + // Validation rules require the declaration in the entry block + buildPoint->getParent().addLocalVariable(std::unique_ptr(inst)); + break; + + default: + constantsTypesGlobals.push_back(std::unique_ptr(inst)); + module.mapInstruction(inst); + break; + } + + if (name) + addName(inst->getResultId(), name); + setPrecision(inst->getResultId(), precision); + + return inst->getResultId(); +} + +// Comments in header +Id Builder::createUndefined(Id type) +{ + Instruction* inst = new Instruction(getUniqueId(), type, OpUndef); + buildPoint->addInstruction(std::unique_ptr(inst)); + return inst->getResultId(); +} + +// av/vis/nonprivate are unnecessary and illegal for some storage classes. +spv::MemoryAccessMask Builder::sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) + const +{ + switch (sc) { + case spv::StorageClassUniform: + case spv::StorageClassWorkgroup: + case spv::StorageClassStorageBuffer: + case spv::StorageClassPhysicalStorageBufferEXT: + break; + default: + memoryAccess = spv::MemoryAccessMask(memoryAccess & + ~(spv::MemoryAccessMakePointerAvailableKHRMask | + spv::MemoryAccessMakePointerVisibleKHRMask | + spv::MemoryAccessNonPrivatePointerKHRMask)); + break; + } + return memoryAccess; +} + +// Comments in header +void Builder::createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, + unsigned int alignment) +{ + Instruction* store = new Instruction(OpStore); + store->addIdOperand(lValue); + store->addIdOperand(rValue); + + memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue)); + + if (memoryAccess != MemoryAccessMaskNone) { + store->addImmediateOperand(memoryAccess); + if (memoryAccess & spv::MemoryAccessAlignedMask) { + store->addImmediateOperand(alignment); + } + if (memoryAccess & spv::MemoryAccessMakePointerAvailableKHRMask) { + store->addIdOperand(makeUintConstant(scope)); + } + } + + buildPoint->addInstruction(std::unique_ptr(store)); +} + +// Comments in header +Id Builder::createLoad(Id lValue, spv::Decoration precision, spv::MemoryAccessMask memoryAccess, + spv::Scope scope, unsigned int alignment) +{ + Instruction* load = new Instruction(getUniqueId(), getDerefTypeId(lValue), OpLoad); + load->addIdOperand(lValue); + + memoryAccess = sanitizeMemoryAccessForStorageClass(memoryAccess, getStorageClass(lValue)); + + if (memoryAccess != MemoryAccessMaskNone) { + load->addImmediateOperand(memoryAccess); + if (memoryAccess & spv::MemoryAccessAlignedMask) { + load->addImmediateOperand(alignment); + } + if (memoryAccess & spv::MemoryAccessMakePointerVisibleKHRMask) { + load->addIdOperand(makeUintConstant(scope)); + } + } + + buildPoint->addInstruction(std::unique_ptr(load)); + setPrecision(load->getResultId(), precision); + + return load->getResultId(); +} + +// Comments in header +Id Builder::createAccessChain(StorageClass storageClass, Id base, const std::vector& offsets) +{ + // Figure out the final resulting type. + spv::Id typeId = getTypeId(base); + assert(isPointerType(typeId) && offsets.size() > 0); + typeId = getContainedTypeId(typeId); + for (int i = 0; i < (int)offsets.size(); ++i) { + if (isStructType(typeId)) { + assert(isConstantScalar(offsets[i])); + typeId = getContainedTypeId(typeId, getConstantScalar(offsets[i])); + } else + typeId = getContainedTypeId(typeId, offsets[i]); + } + typeId = makePointer(storageClass, typeId); + + // Make the instruction + Instruction* chain = new Instruction(getUniqueId(), typeId, OpAccessChain); + chain->addIdOperand(base); + for (int i = 0; i < (int)offsets.size(); ++i) + chain->addIdOperand(offsets[i]); + buildPoint->addInstruction(std::unique_ptr(chain)); + + return chain->getResultId(); +} + +Id Builder::createArrayLength(Id base, unsigned int member) +{ + spv::Id intType = makeUintType(32); + Instruction* length = new Instruction(getUniqueId(), intType, OpArrayLength); + length->addIdOperand(base); + length->addImmediateOperand(member); + buildPoint->addInstruction(std::unique_ptr(length)); + + return length->getResultId(); +} + +Id Builder::createCooperativeMatrixLength(Id type) +{ + spv::Id intType = makeUintType(32); + + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector(1, type), std::vector()); + } + + Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV); + length->addIdOperand(type); + buildPoint->addInstruction(std::unique_ptr(length)); + + return length->getResultId(); +} + +Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCompositeExtract, typeId, std::vector(1, composite), + std::vector(1, index)); + } + Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract); + extract->addIdOperand(composite); + extract->addImmediateOperand(index); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createCompositeExtract(Id composite, Id typeId, const std::vector& indexes) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(OpCompositeExtract, typeId, std::vector(1, composite), indexes); + } + Instruction* extract = new Instruction(getUniqueId(), typeId, OpCompositeExtract); + extract->addIdOperand(composite); + for (int i = 0; i < (int)indexes.size(); ++i) + extract->addImmediateOperand(indexes[i]); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, unsigned index) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert); + insert->addIdOperand(object); + insert->addIdOperand(composite); + insert->addImmediateOperand(index); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +Id Builder::createCompositeInsert(Id object, Id composite, Id typeId, const std::vector& indexes) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpCompositeInsert); + insert->addIdOperand(object); + insert->addIdOperand(composite); + for (int i = 0; i < (int)indexes.size(); ++i) + insert->addImmediateOperand(indexes[i]); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +Id Builder::createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex) +{ + Instruction* extract = new Instruction(getUniqueId(), typeId, OpVectorExtractDynamic); + extract->addIdOperand(vector); + extract->addIdOperand(componentIndex); + buildPoint->addInstruction(std::unique_ptr(extract)); + + return extract->getResultId(); +} + +Id Builder::createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex) +{ + Instruction* insert = new Instruction(getUniqueId(), typeId, OpVectorInsertDynamic); + insert->addIdOperand(vector); + insert->addIdOperand(component); + insert->addIdOperand(componentIndex); + buildPoint->addInstruction(std::unique_ptr(insert)); + + return insert->getResultId(); +} + +// An opcode that has no operands, no result id, and no type +void Builder::createNoResultOp(Op opCode) +{ + Instruction* op = new Instruction(opCode); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one id operand, no result id, and no type +void Builder::createNoResultOp(Op opCode, Id operand) +{ + Instruction* op = new Instruction(opCode); + op->addIdOperand(operand); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one or more operands, no result id, and no type +void Builder::createNoResultOp(Op opCode, const std::vector& operands) +{ + Instruction* op = new Instruction(opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + op->addIdOperand(*it); + } + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has multiple operands, no result id, and no type +void Builder::createNoResultOp(Op opCode, const std::vector& operands) +{ + Instruction* op = new Instruction(opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + if (it->isId) + op->addIdOperand(it->word); + else + op->addImmediateOperand(it->word); + } + buildPoint->addInstruction(std::unique_ptr(op)); +} + +void Builder::createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask semantics) +{ + Instruction* op = new Instruction(OpControlBarrier); + op->addIdOperand(makeUintConstant(execution)); + op->addIdOperand(makeUintConstant(memory)); + op->addIdOperand(makeUintConstant(semantics)); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +void Builder::createMemoryBarrier(unsigned executionScope, unsigned memorySemantics) +{ + Instruction* op = new Instruction(OpMemoryBarrier); + op->addIdOperand(makeUintConstant(executionScope)); + op->addIdOperand(makeUintConstant(memorySemantics)); + buildPoint->addInstruction(std::unique_ptr(op)); +} + +// An opcode that has one operands, a result id, and a type +Id Builder::createUnaryOp(Op opCode, Id typeId, Id operand) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + return createSpecConstantOp(opCode, typeId, std::vector(1, operand), std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(operand); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createBinOp(Op opCode, Id typeId, Id left, Id right) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + std::vector operands(2); + operands[0] = left; operands[1] = right; + return createSpecConstantOp(opCode, typeId, operands, std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(left); + op->addIdOperand(right); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createTriOp(Op opCode, Id typeId, Id op1, Id op2, Id op3) +{ + // Generate code for spec constants if in spec constant operation + // generation mode. + if (generatingOpCodeForSpecConst) { + std::vector operands(3); + operands[0] = op1; + operands[1] = op2; + operands[2] = op3; + return createSpecConstantOp( + opCode, typeId, operands, std::vector()); + } + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + op->addIdOperand(op1); + op->addIdOperand(op2); + op->addIdOperand(op3); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createOp(Op opCode, Id typeId, const std::vector& operands) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) + op->addIdOperand(*it); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createOp(Op opCode, Id typeId, const std::vector& operands) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) { + if (it->isId) + op->addIdOperand(it->word); + else + op->addImmediateOperand(it->word); + } + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createSpecConstantOp(Op opCode, Id typeId, const std::vector& operands, + const std::vector& literals) +{ + Instruction* op = new Instruction(getUniqueId(), typeId, OpSpecConstantOp); + op->addImmediateOperand((unsigned) opCode); + for (auto it = operands.cbegin(); it != operands.cend(); ++it) + op->addIdOperand(*it); + for (auto it = literals.cbegin(); it != literals.cend(); ++it) + op->addImmediateOperand(*it); + module.mapInstruction(op); + constantsTypesGlobals.push_back(std::unique_ptr(op)); + + return op->getResultId(); +} + +Id Builder::createFunctionCall(spv::Function* function, const std::vector& args) +{ + Instruction* op = new Instruction(getUniqueId(), function->getReturnType(), OpFunctionCall); + op->addIdOperand(function->getId()); + for (int a = 0; a < (int)args.size(); ++a) + op->addIdOperand(args[a]); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +// Comments in header +Id Builder::createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector& channels) +{ + if (channels.size() == 1) + return setPrecision(createCompositeExtract(source, typeId, channels.front()), precision); + + if (generatingOpCodeForSpecConst) { + std::vector operands(2); + operands[0] = operands[1] = source; + return setPrecision(createSpecConstantOp(OpVectorShuffle, typeId, operands, channels), precision); + } + Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle); + assert(isVector(source)); + swizzle->addIdOperand(source); + swizzle->addIdOperand(source); + for (int i = 0; i < (int)channels.size(); ++i) + swizzle->addImmediateOperand(channels[i]); + buildPoint->addInstruction(std::unique_ptr(swizzle)); + + return setPrecision(swizzle->getResultId(), precision); +} + +// Comments in header +Id Builder::createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector& channels) +{ + if (channels.size() == 1 && getNumComponents(source) == 1) + return createCompositeInsert(source, target, typeId, channels.front()); + + Instruction* swizzle = new Instruction(getUniqueId(), typeId, OpVectorShuffle); + + assert(isVector(target)); + swizzle->addIdOperand(target); + + assert(getNumComponents(source) == (int)channels.size()); + assert(isVector(source)); + swizzle->addIdOperand(source); + + // Set up an identity shuffle from the base value to the result value + unsigned int components[4]; + int numTargetComponents = getNumComponents(target); + for (int i = 0; i < numTargetComponents; ++i) + components[i] = i; + + // Punch in the l-value swizzle + for (int i = 0; i < (int)channels.size(); ++i) + components[channels[i]] = numTargetComponents + i; + + // finish the instruction with these components selectors + for (int i = 0; i < numTargetComponents; ++i) + swizzle->addImmediateOperand(components[i]); + buildPoint->addInstruction(std::unique_ptr(swizzle)); + + return swizzle->getResultId(); +} + +// Comments in header +void Builder::promoteScalar(Decoration precision, Id& left, Id& right) +{ + int direction = getNumComponents(right) - getNumComponents(left); + + if (direction > 0) + left = smearScalar(precision, left, makeVectorType(getTypeId(left), getNumComponents(right))); + else if (direction < 0) + right = smearScalar(precision, right, makeVectorType(getTypeId(right), getNumComponents(left))); + + return; +} + +// Comments in header +Id Builder::smearScalar(Decoration precision, Id scalar, Id vectorType) +{ + assert(getNumComponents(scalar) == 1); + assert(getTypeId(scalar) == getScalarTypeId(vectorType)); + + int numComponents = getNumTypeComponents(vectorType); + if (numComponents == 1) + return scalar; + + Instruction* smear = nullptr; + if (generatingOpCodeForSpecConst) { + auto members = std::vector(numComponents, scalar); + // Sometime even in spec-constant-op mode, the temporary vector created by + // promoting a scalar might not be a spec constant. This should depend on + // the scalar. + // e.g.: + // const vec2 spec_const_result = a_spec_const_vec2 + a_front_end_const_scalar; + // In such cases, the temporary vector created from a_front_end_const_scalar + // is not a spec constant vector, even though the binary operation node is marked + // as 'specConstant' and we are in spec-constant-op mode. + auto result_id = makeCompositeConstant(vectorType, members, isSpecConstant(scalar)); + smear = module.getInstruction(result_id); + } else { + smear = new Instruction(getUniqueId(), vectorType, OpCompositeConstruct); + for (int c = 0; c < numComponents; ++c) + smear->addIdOperand(scalar); + buildPoint->addInstruction(std::unique_ptr(smear)); + } + + return setPrecision(smear->getResultId(), precision); +} + +// Comments in header +Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector& args) +{ + Instruction* inst = new Instruction(getUniqueId(), resultType, OpExtInst); + inst->addIdOperand(builtins); + inst->addImmediateOperand(entryPoint); + for (int arg = 0; arg < (int)args.size(); ++arg) + inst->addIdOperand(args[arg]); + + buildPoint->addInstruction(std::unique_ptr(inst)); + + return inst->getResultId(); +} + +// Accept all parameters needed to create a texture instruction. +// Create the correct instruction based on the inputs, and make the call. +Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, + bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask) +{ + static const int maxTextureArgs = 10; + Id texArgs[maxTextureArgs] = {}; + + // + // Set up the fixed arguments + // + int numArgs = 0; + bool explicitLod = false; + texArgs[numArgs++] = parameters.sampler; + texArgs[numArgs++] = parameters.coords; + if (parameters.Dref != NoResult) + texArgs[numArgs++] = parameters.Dref; + if (parameters.component != NoResult) + texArgs[numArgs++] = parameters.component; + +#ifndef GLSLANG_WEB + if (parameters.granularity != NoResult) + texArgs[numArgs++] = parameters.granularity; + if (parameters.coarse != NoResult) + texArgs[numArgs++] = parameters.coarse; +#endif + + // + // Set up the optional arguments + // + int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments + ++numArgs; // speculatively make room for the mask operand + ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand + if (parameters.bias) { + mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask); + texArgs[numArgs++] = parameters.bias; + } + if (parameters.lod) { + mask = (ImageOperandsMask)(mask | ImageOperandsLodMask); + texArgs[numArgs++] = parameters.lod; + explicitLod = true; + } else if (parameters.gradX) { + mask = (ImageOperandsMask)(mask | ImageOperandsGradMask); + texArgs[numArgs++] = parameters.gradX; + texArgs[numArgs++] = parameters.gradY; + explicitLod = true; + } else if (noImplicitLod && ! fetch && ! gather) { + // have to explicitly use lod of 0 if not allowed to have them be implicit, and + // we would otherwise be about to issue an implicit instruction + mask = (ImageOperandsMask)(mask | ImageOperandsLodMask); + texArgs[numArgs++] = makeFloatConstant(0.0); + explicitLod = true; + } + if (parameters.offset) { + if (isConstant(parameters.offset)) + mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetMask); + else { + addCapability(CapabilityImageGatherExtended); + mask = (ImageOperandsMask)(mask | ImageOperandsOffsetMask); + } + texArgs[numArgs++] = parameters.offset; + } + if (parameters.offsets) { + addCapability(CapabilityImageGatherExtended); + mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask); + texArgs[numArgs++] = parameters.offsets; + } +#ifndef GLSLANG_WEB + if (parameters.sample) { + mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask); + texArgs[numArgs++] = parameters.sample; + } + if (parameters.lodClamp) { + // capability if this bit is used + addCapability(CapabilityMinLod); + + mask = (ImageOperandsMask)(mask | ImageOperandsMinLodMask); + texArgs[numArgs++] = parameters.lodClamp; + } + if (parameters.nonprivate) { + mask = mask | ImageOperandsNonPrivateTexelKHRMask; + } + if (parameters.volatil) { + mask = mask | ImageOperandsVolatileTexelKHRMask; + } +#endif + mask = mask | signExtensionMask; + if (mask == ImageOperandsMaskNone) + --numArgs; // undo speculative reservation for the mask argument + else + texArgs[optArgNum] = mask; + + // + // Set up the instruction + // + Op opCode = OpNop; // All paths below need to set this + if (fetch) { + if (sparse) + opCode = OpImageSparseFetch; + else + opCode = OpImageFetch; +#ifndef GLSLANG_WEB + } else if (parameters.granularity && parameters.coarse) { + opCode = OpImageSampleFootprintNV; + } else if (gather) { + if (parameters.Dref) + if (sparse) + opCode = OpImageSparseDrefGather; + else + opCode = OpImageDrefGather; + else + if (sparse) + opCode = OpImageSparseGather; + else + opCode = OpImageGather; +#endif + } else if (explicitLod) { + if (parameters.Dref) { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjDrefExplicitLod; + else + opCode = OpImageSampleProjDrefExplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleDrefExplicitLod; + else + opCode = OpImageSampleDrefExplicitLod; + } else { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjExplicitLod; + else + opCode = OpImageSampleProjExplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleExplicitLod; + else + opCode = OpImageSampleExplicitLod; + } + } else { + if (parameters.Dref) { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjDrefImplicitLod; + else + opCode = OpImageSampleProjDrefImplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleDrefImplicitLod; + else + opCode = OpImageSampleDrefImplicitLod; + } else { + if (proj) + if (sparse) + opCode = OpImageSparseSampleProjImplicitLod; + else + opCode = OpImageSampleProjImplicitLod; + else + if (sparse) + opCode = OpImageSparseSampleImplicitLod; + else + opCode = OpImageSampleImplicitLod; + } + } + + // See if the result type is expecting a smeared result. + // This happens when a legacy shadow*() call is made, which + // gets a vec4 back instead of a float. + Id smearedType = resultType; + if (! isScalarType(resultType)) { + switch (opCode) { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + resultType = getScalarTypeId(resultType); + break; + default: + break; + } + } + + Id typeId0 = 0; + Id typeId1 = 0; + + if (sparse) { + typeId0 = resultType; + typeId1 = getDerefTypeId(parameters.texelOut); + resultType = makeStructResultType(typeId0, typeId1); + } + + // Build the SPIR-V instruction + Instruction* textureInst = new Instruction(getUniqueId(), resultType, opCode); + for (int op = 0; op < optArgNum; ++op) + textureInst->addIdOperand(texArgs[op]); + if (optArgNum < numArgs) + textureInst->addImmediateOperand(texArgs[optArgNum]); + for (int op = optArgNum + 1; op < numArgs; ++op) + textureInst->addIdOperand(texArgs[op]); + setPrecision(textureInst->getResultId(), precision); + buildPoint->addInstruction(std::unique_ptr(textureInst)); + + Id resultId = textureInst->getResultId(); + + if (sparse) { + // set capability + addCapability(CapabilitySparseResidency); + + // Decode the return type that was a special structure + createStore(createCompositeExtract(resultId, typeId1, 1), parameters.texelOut); + resultId = createCompositeExtract(resultId, typeId0, 0); + setPrecision(resultId, precision); + } else { + // When a smear is needed, do it, as per what was computed + // above when resultType was changed to a scalar type. + if (resultType != smearedType) + resultId = smearScalar(precision, resultId, smearedType); + } + + return resultId; +} + +// Comments in header +Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameters, bool isUnsignedResult) +{ + // Figure out the result type + Id resultType = 0; + switch (opCode) { + case OpImageQuerySize: + case OpImageQuerySizeLod: + { + int numComponents = 0; + switch (getTypeDimensionality(getImageType(parameters.sampler))) { + case Dim1D: + case DimBuffer: + numComponents = 1; + break; + case Dim2D: + case DimCube: + case DimRect: + case DimSubpassData: + numComponents = 2; + break; + case Dim3D: + numComponents = 3; + break; + + default: + assert(0); + break; + } + if (isArrayedImageType(getImageType(parameters.sampler))) + ++numComponents; + + Id intType = isUnsignedResult ? makeUintType(32) : makeIntType(32); + if (numComponents == 1) + resultType = intType; + else + resultType = makeVectorType(intType, numComponents); + + break; + } + case OpImageQueryLod: + resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2); + break; + case OpImageQueryLevels: + case OpImageQuerySamples: + resultType = isUnsignedResult ? makeUintType(32) : makeIntType(32); + break; + default: + assert(0); + break; + } + + Instruction* query = new Instruction(getUniqueId(), resultType, opCode); + query->addIdOperand(parameters.sampler); + if (parameters.coords) + query->addIdOperand(parameters.coords); + if (parameters.lod) + query->addIdOperand(parameters.lod); + buildPoint->addInstruction(std::unique_ptr(query)); + addCapability(CapabilityImageQuery); + + return query->getResultId(); +} + +// External comments in header. +// Operates recursively to visit the composite's hierarchy. +Id Builder::createCompositeCompare(Decoration precision, Id value1, Id value2, bool equal) +{ + Id boolType = makeBoolType(); + Id valueType = getTypeId(value1); + + Id resultId = NoResult; + + int numConstituents = getNumTypeConstituents(valueType); + + // Scalars and Vectors + + if (isScalarType(valueType) || isVectorType(valueType)) { + assert(valueType == getTypeId(value2)); + // These just need a single comparison, just have + // to figure out what it is. + Op op; + switch (getMostBasicTypeClass(valueType)) { + case OpTypeFloat: + op = equal ? OpFOrdEqual : OpFUnordNotEqual; + break; + case OpTypeInt: + default: + op = equal ? OpIEqual : OpINotEqual; + break; + case OpTypeBool: + op = equal ? OpLogicalEqual : OpLogicalNotEqual; + precision = NoPrecision; + break; + } + + if (isScalarType(valueType)) { + // scalar + resultId = createBinOp(op, boolType, value1, value2); + } else { + // vector + resultId = createBinOp(op, makeVectorType(boolType, numConstituents), value1, value2); + setPrecision(resultId, precision); + // reduce vector compares... + resultId = createUnaryOp(equal ? OpAll : OpAny, boolType, resultId); + } + + return setPrecision(resultId, precision); + } + + // Only structs, arrays, and matrices should be left. + // They share in common the reduction operation across their constituents. + assert(isAggregateType(valueType) || isMatrixType(valueType)); + + // Compare each pair of constituents + for (int constituent = 0; constituent < numConstituents; ++constituent) { + std::vector indexes(1, constituent); + Id constituentType1 = getContainedTypeId(getTypeId(value1), constituent); + Id constituentType2 = getContainedTypeId(getTypeId(value2), constituent); + Id constituent1 = createCompositeExtract(value1, constituentType1, indexes); + Id constituent2 = createCompositeExtract(value2, constituentType2, indexes); + + Id subResultId = createCompositeCompare(precision, constituent1, constituent2, equal); + + if (constituent == 0) + resultId = subResultId; + else + resultId = setPrecision(createBinOp(equal ? OpLogicalAnd : OpLogicalOr, boolType, resultId, subResultId), + precision); + } + + return resultId; +} + +// OpCompositeConstruct +Id Builder::createCompositeConstruct(Id typeId, const std::vector& constituents) +{ + assert(isAggregateType(typeId) || (getNumTypeConstituents(typeId) > 1 && + getNumTypeConstituents(typeId) == (int)constituents.size())); + + if (generatingOpCodeForSpecConst) { + // Sometime, even in spec-constant-op mode, the constant composite to be + // constructed may not be a specialization constant. + // e.g.: + // const mat2 m2 = mat2(a_spec_const, a_front_end_const, another_front_end_const, third_front_end_const); + // The first column vector should be a spec constant one, as a_spec_const is a spec constant. + // The second column vector should NOT be spec constant, as it does not contain any spec constants. + // To handle such cases, we check the constituents of the constant vector to determine whether this + // vector should be created as a spec constant. + return makeCompositeConstant(typeId, constituents, + std::any_of(constituents.begin(), constituents.end(), + [&](spv::Id id) { return isSpecConstant(id); })); + } + + Instruction* op = new Instruction(getUniqueId(), typeId, OpCompositeConstruct); + for (int c = 0; c < (int)constituents.size(); ++c) + op->addIdOperand(constituents[c]); + buildPoint->addInstruction(std::unique_ptr(op)); + + return op->getResultId(); +} + +// Vector or scalar constructor +Id Builder::createConstructor(Decoration precision, const std::vector& sources, Id resultTypeId) +{ + Id result = NoResult; + unsigned int numTargetComponents = getNumTypeComponents(resultTypeId); + unsigned int targetComponent = 0; + + // Special case: when calling a vector constructor with a single scalar + // argument, smear the scalar + if (sources.size() == 1 && isScalar(sources[0]) && numTargetComponents > 1) + return smearScalar(precision, sources[0], resultTypeId); + + // accumulate the arguments for OpCompositeConstruct + std::vector constituents; + Id scalarTypeId = getScalarTypeId(resultTypeId); + + // lambda to store the result of visiting an argument component + const auto latchResult = [&](Id comp) { + if (numTargetComponents > 1) + constituents.push_back(comp); + else + result = comp; + ++targetComponent; + }; + + // lambda to visit a vector argument's components + const auto accumulateVectorConstituents = [&](Id sourceArg) { + unsigned int sourceSize = getNumComponents(sourceArg); + unsigned int sourcesToUse = sourceSize; + if (sourcesToUse + targetComponent > numTargetComponents) + sourcesToUse = numTargetComponents - targetComponent; + + for (unsigned int s = 0; s < sourcesToUse; ++s) { + std::vector swiz; + swiz.push_back(s); + latchResult(createRvalueSwizzle(precision, scalarTypeId, sourceArg, swiz)); + } + }; + + // lambda to visit a matrix argument's components + const auto accumulateMatrixConstituents = [&](Id sourceArg) { + unsigned int sourceSize = getNumColumns(sourceArg) * getNumRows(sourceArg); + unsigned int sourcesToUse = sourceSize; + if (sourcesToUse + targetComponent > numTargetComponents) + sourcesToUse = numTargetComponents - targetComponent; + + int col = 0; + int row = 0; + for (unsigned int s = 0; s < sourcesToUse; ++s) { + if (row >= getNumRows(sourceArg)) { + row = 0; + col++; + } + std::vector indexes; + indexes.push_back(col); + indexes.push_back(row); + latchResult(createCompositeExtract(sourceArg, scalarTypeId, indexes)); + row++; + } + }; + + // Go through the source arguments, each one could have either + // a single or multiple components to contribute. + for (unsigned int i = 0; i < sources.size(); ++i) { + + if (isScalar(sources[i]) || isPointer(sources[i])) + latchResult(sources[i]); + else if (isVector(sources[i])) + accumulateVectorConstituents(sources[i]); + else if (isMatrix(sources[i])) + accumulateMatrixConstituents(sources[i]); + else + assert(0); + + if (targetComponent >= numTargetComponents) + break; + } + + // If the result is a vector, make it from the gathered constituents. + if (constituents.size() > 0) + result = createCompositeConstruct(resultTypeId, constituents); + + return setPrecision(result, precision); +} + +// Comments in header +Id Builder::createMatrixConstructor(Decoration precision, const std::vector& sources, Id resultTypeId) +{ + Id componentTypeId = getScalarTypeId(resultTypeId); + int numCols = getTypeNumColumns(resultTypeId); + int numRows = getTypeNumRows(resultTypeId); + + Instruction* instr = module.getInstruction(componentTypeId); +#ifdef GLSLANG_WEB + const unsigned bitCount = 32; + assert(bitCount == instr->getImmediateOperand(0)); +#else + const unsigned bitCount = instr->getImmediateOperand(0); +#endif + + // Optimize matrix constructed from a bigger matrix + if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) { + // To truncate the matrix to a smaller number of rows/columns, we need to: + // 1. For each column, extract the column and truncate it to the required size using shuffle + // 2. Assemble the resulting matrix from all columns + Id matrix = sources[0]; + Id columnTypeId = getContainedTypeId(resultTypeId); + Id sourceColumnTypeId = getContainedTypeId(getTypeId(matrix)); + + std::vector channels; + for (int row = 0; row < numRows; ++row) + channels.push_back(row); + + std::vector matrixColumns; + for (int col = 0; col < numCols; ++col) { + std::vector indexes; + indexes.push_back(col); + Id colv = createCompositeExtract(matrix, sourceColumnTypeId, indexes); + setPrecision(colv, precision); + + if (numRows != getNumRows(matrix)) { + matrixColumns.push_back(createRvalueSwizzle(precision, columnTypeId, colv, channels)); + } else { + matrixColumns.push_back(colv); + } + } + + return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision); + } + + // Otherwise, will use a two step process + // 1. make a compile-time 2D array of values + // 2. construct a matrix from that array + + // Step 1. + + // initialize the array to the identity matrix + Id ids[maxMatrixSize][maxMatrixSize]; + Id one = (bitCount == 64 ? makeDoubleConstant(1.0) : makeFloatConstant(1.0)); + Id zero = (bitCount == 64 ? makeDoubleConstant(0.0) : makeFloatConstant(0.0)); + for (int col = 0; col < 4; ++col) { + for (int row = 0; row < 4; ++row) { + if (col == row) + ids[col][row] = one; + else + ids[col][row] = zero; + } + } + + // modify components as dictated by the arguments + if (sources.size() == 1 && isScalar(sources[0])) { + // a single scalar; resets the diagonals + for (int col = 0; col < 4; ++col) + ids[col][col] = sources[0]; + } else if (isMatrix(sources[0])) { + // constructing from another matrix; copy over the parts that exist in both the argument and constructee + Id matrix = sources[0]; + int minCols = std::min(numCols, getNumColumns(matrix)); + int minRows = std::min(numRows, getNumRows(matrix)); + for (int col = 0; col < minCols; ++col) { + std::vector indexes; + indexes.push_back(col); + for (int row = 0; row < minRows; ++row) { + indexes.push_back(row); + ids[col][row] = createCompositeExtract(matrix, componentTypeId, indexes); + indexes.pop_back(); + setPrecision(ids[col][row], precision); + } + } + } else { + // fill in the matrix in column-major order with whatever argument components are available + int row = 0; + int col = 0; + + for (int arg = 0; arg < (int)sources.size(); ++arg) { + Id argComp = sources[arg]; + for (int comp = 0; comp < getNumComponents(sources[arg]); ++comp) { + if (getNumComponents(sources[arg]) > 1) { + argComp = createCompositeExtract(sources[arg], componentTypeId, comp); + setPrecision(argComp, precision); + } + ids[col][row++] = argComp; + if (row == numRows) { + row = 0; + col++; + } + } + } + } + + // Step 2: Construct a matrix from that array. + // First make the column vectors, then make the matrix. + + // make the column vectors + Id columnTypeId = getContainedTypeId(resultTypeId); + std::vector matrixColumns; + for (int col = 0; col < numCols; ++col) { + std::vector vectorComponents; + for (int row = 0; row < numRows; ++row) + vectorComponents.push_back(ids[col][row]); + Id column = createCompositeConstruct(columnTypeId, vectorComponents); + setPrecision(column, precision); + matrixColumns.push_back(column); + } + + // make the matrix + return setPrecision(createCompositeConstruct(resultTypeId, matrixColumns), precision); +} + +// Comments in header +Builder::If::If(Id cond, unsigned int ctrl, Builder& gb) : + builder(gb), + condition(cond), + control(ctrl), + elseBlock(0) +{ + function = &builder.getBuildPoint()->getParent(); + + // make the blocks, but only put the then-block into the function, + // the else-block and merge-block will be added later, in order, after + // earlier code is emitted + thenBlock = new Block(builder.getUniqueId(), *function); + mergeBlock = new Block(builder.getUniqueId(), *function); + + // Save the current block, so that we can add in the flow control split when + // makeEndIf is called. + headerBlock = builder.getBuildPoint(); + + function->addBlock(thenBlock); + builder.setBuildPoint(thenBlock); +} + +// Comments in header +void Builder::If::makeBeginElse() +{ + // Close out the "then" by having it jump to the mergeBlock + builder.createBranch(mergeBlock); + + // Make the first else block and add it to the function + elseBlock = new Block(builder.getUniqueId(), *function); + function->addBlock(elseBlock); + + // Start building the else block + builder.setBuildPoint(elseBlock); +} + +// Comments in header +void Builder::If::makeEndIf() +{ + // jump to the merge block + builder.createBranch(mergeBlock); + + // Go back to the headerBlock and make the flow control split + builder.setBuildPoint(headerBlock); + builder.createSelectionMerge(mergeBlock, control); + if (elseBlock) + builder.createConditionalBranch(condition, thenBlock, elseBlock); + else + builder.createConditionalBranch(condition, thenBlock, mergeBlock); + + // add the merge block to the function + function->addBlock(mergeBlock); + builder.setBuildPoint(mergeBlock); +} + +// Comments in header +void Builder::makeSwitch(Id selector, unsigned int control, int numSegments, const std::vector& caseValues, + const std::vector& valueIndexToSegment, int defaultSegment, + std::vector& segmentBlocks) +{ + Function& function = buildPoint->getParent(); + + // make all the blocks + for (int s = 0; s < numSegments; ++s) + segmentBlocks.push_back(new Block(getUniqueId(), function)); + + Block* mergeBlock = new Block(getUniqueId(), function); + + // make and insert the switch's selection-merge instruction + createSelectionMerge(mergeBlock, control); + + // make the switch instruction + Instruction* switchInst = new Instruction(NoResult, NoType, OpSwitch); + switchInst->addIdOperand(selector); + auto defaultOrMerge = (defaultSegment >= 0) ? segmentBlocks[defaultSegment] : mergeBlock; + switchInst->addIdOperand(defaultOrMerge->getId()); + defaultOrMerge->addPredecessor(buildPoint); + for (int i = 0; i < (int)caseValues.size(); ++i) { + switchInst->addImmediateOperand(caseValues[i]); + switchInst->addIdOperand(segmentBlocks[valueIndexToSegment[i]]->getId()); + segmentBlocks[valueIndexToSegment[i]]->addPredecessor(buildPoint); + } + buildPoint->addInstruction(std::unique_ptr(switchInst)); + + // push the merge block + switchMerges.push(mergeBlock); +} + +// Comments in header +void Builder::addSwitchBreak() +{ + // branch to the top of the merge block stack + createBranch(switchMerges.top()); + createAndSetNoPredecessorBlock("post-switch-break"); +} + +// Comments in header +void Builder::nextSwitchSegment(std::vector& segmentBlock, int nextSegment) +{ + int lastSegment = nextSegment - 1; + if (lastSegment >= 0) { + // Close out previous segment by jumping, if necessary, to next segment + if (! buildPoint->isTerminated()) + createBranch(segmentBlock[nextSegment]); + } + Block* block = segmentBlock[nextSegment]; + block->getParent().addBlock(block); + setBuildPoint(block); +} + +// Comments in header +void Builder::endSwitch(std::vector& /*segmentBlock*/) +{ + // Close out previous segment by jumping, if necessary, to next segment + if (! buildPoint->isTerminated()) + addSwitchBreak(); + + switchMerges.top()->getParent().addBlock(switchMerges.top()); + setBuildPoint(switchMerges.top()); + + switchMerges.pop(); +} + +Block& Builder::makeNewBlock() +{ + Function& function = buildPoint->getParent(); + auto block = new Block(getUniqueId(), function); + function.addBlock(block); + return *block; +} + +Builder::LoopBlocks& Builder::makeNewLoop() +{ + // This verbosity is needed to simultaneously get the same behavior + // everywhere (id's in the same order), have a syntax that works + // across lots of versions of C++, have no warnings from pedantic + // compilation modes, and leave the rest of the code alone. + Block& head = makeNewBlock(); + Block& body = makeNewBlock(); + Block& merge = makeNewBlock(); + Block& continue_target = makeNewBlock(); + LoopBlocks blocks(head, body, merge, continue_target); + loops.push(blocks); + return loops.top(); +} + +void Builder::createLoopContinue() +{ + createBranch(&loops.top().continue_target); + // Set up a block for dead code. + createAndSetNoPredecessorBlock("post-loop-continue"); +} + +void Builder::createLoopExit() +{ + createBranch(&loops.top().merge); + // Set up a block for dead code. + createAndSetNoPredecessorBlock("post-loop-break"); +} + +void Builder::closeLoop() +{ + loops.pop(); +} + +void Builder::clearAccessChain() +{ + accessChain.base = NoResult; + accessChain.indexChain.clear(); + accessChain.instr = NoResult; + accessChain.swizzle.clear(); + accessChain.component = NoResult; + accessChain.preSwizzleBaseType = NoType; + accessChain.isRValue = false; + accessChain.coherentFlags.clear(); + accessChain.alignment = 0; +} + +// Comments in header +void Builder::accessChainPushSwizzle(std::vector& swizzle, Id preSwizzleBaseType, + AccessChain::CoherentFlags coherentFlags, unsigned int alignment) +{ + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + + // swizzles can be stacked in GLSL, but simplified to a single + // one here; the base type doesn't change + if (accessChain.preSwizzleBaseType == NoType) + accessChain.preSwizzleBaseType = preSwizzleBaseType; + + // if needed, propagate the swizzle for the current access chain + if (accessChain.swizzle.size() > 0) { + std::vector oldSwizzle = accessChain.swizzle; + accessChain.swizzle.resize(0); + for (unsigned int i = 0; i < swizzle.size(); ++i) { + assert(swizzle[i] < oldSwizzle.size()); + accessChain.swizzle.push_back(oldSwizzle[swizzle[i]]); + } + } else + accessChain.swizzle = swizzle; + + // determine if we need to track this swizzle anymore + simplifyAccessChainSwizzle(); +} + +// Comments in header +void Builder::accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +{ + assert(accessChain.isRValue == false); + + transferAccessChainSwizzle(true); + Id base = collapseAccessChain(); + Id source = rvalue; + + // dynamic component should be gone + assert(accessChain.component == NoResult); + + // If swizzle still exists, it is out-of-order or not full, we must load the target vector, + // extract and insert elements to perform writeMask and/or swizzle. + if (accessChain.swizzle.size() > 0) { + Id tempBaseId = createLoad(base, spv::NoPrecision); + source = createLvalueSwizzle(getTypeId(tempBaseId), tempBaseId, source, accessChain.swizzle); + } + + // take LSB of alignment + alignment = alignment & ~(alignment & (alignment-1)); + if (getStorageClass(base) == StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + createStore(source, base, memoryAccess, scope, alignment); +} + +// Comments in header +Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resultType, + spv::MemoryAccessMask memoryAccess, spv::Scope scope, unsigned int alignment) +{ + Id id; + + if (accessChain.isRValue) { + // transfer access chain, but try to stay in registers + transferAccessChainSwizzle(false); + if (accessChain.indexChain.size() > 0) { + Id swizzleBase = accessChain.preSwizzleBaseType != NoType ? accessChain.preSwizzleBaseType : resultType; + + // if all the accesses are constants, we can use OpCompositeExtract + std::vector indexes; + bool constant = true; + for (int i = 0; i < (int)accessChain.indexChain.size(); ++i) { + if (isConstantScalar(accessChain.indexChain[i])) + indexes.push_back(getConstantScalar(accessChain.indexChain[i])); + else { + constant = false; + break; + } + } + + if (constant) { + id = createCompositeExtract(accessChain.base, swizzleBase, indexes); + setPrecision(id, precision); + } else { + Id lValue = NoResult; + if (spvVersion >= Spv_1_4 && isValidInitializer(accessChain.base)) { + // make a new function variable for this r-value, using an initializer, + // and mark it as NonWritable so that downstream it can be detected as a lookup + // table + lValue = createVariable(NoPrecision, StorageClassFunction, getTypeId(accessChain.base), + "indexable", accessChain.base); + addDecoration(lValue, DecorationNonWritable); + } else { + lValue = createVariable(NoPrecision, StorageClassFunction, getTypeId(accessChain.base), + "indexable"); + // store into it + createStore(accessChain.base, lValue); + } + // move base to the new variable + accessChain.base = lValue; + accessChain.isRValue = false; + + // load through the access chain + id = createLoad(collapseAccessChain(), precision); + } + } else + id = accessChain.base; // no precision, it was set when this was defined + } else { + transferAccessChainSwizzle(true); + + // take LSB of alignment + alignment = alignment & ~(alignment & (alignment-1)); + if (getStorageClass(accessChain.base) == StorageClassPhysicalStorageBufferEXT) { + memoryAccess = (spv::MemoryAccessMask)(memoryAccess | spv::MemoryAccessAlignedMask); + } + + // load through the access chain + id = collapseAccessChain(); + // Apply nonuniform both to the access chain and the loaded value. + // Buffer accesses need the access chain decorated, and this is where + // loaded image types get decorated. TODO: This should maybe move to + // createImageTextureFunctionCall. + addDecoration(id, nonUniform); + id = createLoad(id, precision, memoryAccess, scope, alignment); + addDecoration(id, nonUniform); + } + + // Done, unless there are swizzles to do + if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult) + return id; + + // Do remaining swizzling + + // Do the basic swizzle + if (accessChain.swizzle.size() > 0) { + Id swizzledType = getScalarTypeId(getTypeId(id)); + if (accessChain.swizzle.size() > 1) + swizzledType = makeVectorType(swizzledType, (int)accessChain.swizzle.size()); + id = createRvalueSwizzle(precision, swizzledType, id, accessChain.swizzle); + } + + // Do the dynamic component + if (accessChain.component != NoResult) + id = setPrecision(createVectorExtractDynamic(id, resultType, accessChain.component), precision); + + addDecoration(id, nonUniform); + return id; +} + +Id Builder::accessChainGetLValue() +{ + assert(accessChain.isRValue == false); + + transferAccessChainSwizzle(true); + Id lvalue = collapseAccessChain(); + + // If swizzle exists, it is out-of-order or not full, we must load the target vector, + // extract and insert elements to perform writeMask and/or swizzle. This does not + // go with getting a direct l-value pointer. + assert(accessChain.swizzle.size() == 0); + assert(accessChain.component == NoResult); + + return lvalue; +} + +// comment in header +Id Builder::accessChainGetInferredType() +{ + // anything to operate on? + if (accessChain.base == NoResult) + return NoType; + Id type = getTypeId(accessChain.base); + + // do initial dereference + if (! accessChain.isRValue) + type = getContainedTypeId(type); + + // dereference each index + for (auto it = accessChain.indexChain.cbegin(); it != accessChain.indexChain.cend(); ++it) { + if (isStructType(type)) + type = getContainedTypeId(type, getConstantScalar(*it)); + else + type = getContainedTypeId(type); + } + + // dereference swizzle + if (accessChain.swizzle.size() == 1) + type = getContainedTypeId(type); + else if (accessChain.swizzle.size() > 1) + type = makeVectorType(getContainedTypeId(type), (int)accessChain.swizzle.size()); + + // dereference component selection + if (accessChain.component) + type = getContainedTypeId(type); + + return type; +} + +void Builder::dump(std::vector& out) const +{ + // Header, before first instructions: + out.push_back(MagicNumber); + out.push_back(spvVersion); + out.push_back(builderNumber); + out.push_back(uniqueId + 1); + out.push_back(0); + + // Capabilities + for (auto it = capabilities.cbegin(); it != capabilities.cend(); ++it) { + Instruction capInst(0, 0, OpCapability); + capInst.addImmediateOperand(*it); + capInst.dump(out); + } + + for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) { + Instruction extInst(0, 0, OpExtension); + extInst.addStringOperand(it->c_str()); + extInst.dump(out); + } + + dumpInstructions(out, imports); + Instruction memInst(0, 0, OpMemoryModel); + memInst.addImmediateOperand(addressModel); + memInst.addImmediateOperand(memoryModel); + memInst.dump(out); + + // Instructions saved up while building: + dumpInstructions(out, entryPoints); + dumpInstructions(out, executionModes); + + // Debug instructions + dumpInstructions(out, strings); + dumpSourceInstructions(out); + for (int e = 0; e < (int)sourceExtensions.size(); ++e) { + Instruction sourceExtInst(0, 0, OpSourceExtension); + sourceExtInst.addStringOperand(sourceExtensions[e]); + sourceExtInst.dump(out); + } + dumpInstructions(out, names); + dumpModuleProcesses(out); + + // Annotation instructions + dumpInstructions(out, decorations); + + dumpInstructions(out, constantsTypesGlobals); + dumpInstructions(out, externals); + + // The functions + module.dump(out); +} + +// +// Protected methods. +// + +// Turn the described access chain in 'accessChain' into an instruction(s) +// computing its address. This *cannot* include complex swizzles, which must +// be handled after this is called. +// +// Can generate code. +Id Builder::collapseAccessChain() +{ + assert(accessChain.isRValue == false); + + // did we already emit an access chain for this? + if (accessChain.instr != NoResult) + return accessChain.instr; + + // If we have a dynamic component, we can still transfer + // that into a final operand to the access chain. We need to remap the + // dynamic component through the swizzle to get a new dynamic component to + // update. + // + // This was not done in transferAccessChainSwizzle() because it might + // generate code. + remapDynamicSwizzle(); + if (accessChain.component != NoResult) { + // transfer the dynamic component to the access chain + accessChain.indexChain.push_back(accessChain.component); + accessChain.component = NoResult; + } + + // note that non-trivial swizzling is left pending + + // do we have an access chain? + if (accessChain.indexChain.size() == 0) + return accessChain.base; + + // emit the access chain + StorageClass storageClass = (StorageClass)module.getStorageClass(getTypeId(accessChain.base)); + accessChain.instr = createAccessChain(storageClass, accessChain.base, accessChain.indexChain); + + return accessChain.instr; +} + +// For a dynamic component selection of a swizzle. +// +// Turn the swizzle and dynamic component into just a dynamic component. +// +// Generates code. +void Builder::remapDynamicSwizzle() +{ + // do we have a swizzle to remap a dynamic component through? + if (accessChain.component != NoResult && accessChain.swizzle.size() > 1) { + // build a vector of the swizzle for the component to map into + std::vector components; + for (int c = 0; c < (int)accessChain.swizzle.size(); ++c) + components.push_back(makeUintConstant(accessChain.swizzle[c])); + Id mapType = makeVectorType(makeUintType(32), (int)accessChain.swizzle.size()); + Id map = makeCompositeConstant(mapType, components); + + // use it + accessChain.component = createVectorExtractDynamic(map, makeUintType(32), accessChain.component); + accessChain.swizzle.clear(); + } +} + +// clear out swizzle if it is redundant, that is reselecting the same components +// that would be present without the swizzle. +void Builder::simplifyAccessChainSwizzle() +{ + // If the swizzle has fewer components than the vector, it is subsetting, and must stay + // to preserve that fact. + if (getNumTypeComponents(accessChain.preSwizzleBaseType) > (int)accessChain.swizzle.size()) + return; + + // if components are out of order, it is a swizzle + for (unsigned int i = 0; i < accessChain.swizzle.size(); ++i) { + if (i != accessChain.swizzle[i]) + return; + } + + // otherwise, there is no need to track this swizzle + accessChain.swizzle.clear(); + if (accessChain.component == NoResult) + accessChain.preSwizzleBaseType = NoType; +} + +// To the extent any swizzling can become part of the chain +// of accesses instead of a post operation, make it so. +// If 'dynamic' is true, include transferring the dynamic component, +// otherwise, leave it pending. +// +// Does not generate code. just updates the access chain. +void Builder::transferAccessChainSwizzle(bool dynamic) +{ + // non existent? + if (accessChain.swizzle.size() == 0 && accessChain.component == NoResult) + return; + + // too complex? + // (this requires either a swizzle, or generating code for a dynamic component) + if (accessChain.swizzle.size() > 1) + return; + + // single component, either in the swizzle and/or dynamic component + if (accessChain.swizzle.size() == 1) { + assert(accessChain.component == NoResult); + // handle static component selection + accessChain.indexChain.push_back(makeUintConstant(accessChain.swizzle.front())); + accessChain.swizzle.clear(); + accessChain.preSwizzleBaseType = NoType; + } else if (dynamic && accessChain.component != NoResult) { + assert(accessChain.swizzle.size() == 0); + // handle dynamic component + accessChain.indexChain.push_back(accessChain.component); + accessChain.preSwizzleBaseType = NoType; + accessChain.component = NoResult; + } +} + +// Utility method for creating a new block and setting the insert point to +// be in it. This is useful for flow-control operations that need a "dummy" +// block proceeding them (e.g. instructions after a discard, etc). +void Builder::createAndSetNoPredecessorBlock(const char* /*name*/) +{ + Block* block = new Block(getUniqueId(), buildPoint->getParent()); + block->setUnreachable(); + buildPoint->getParent().addBlock(block); + setBuildPoint(block); + + // if (name) + // addName(block->getId(), name); +} + +// Comments in header +void Builder::createBranch(Block* block) +{ + Instruction* branch = new Instruction(OpBranch); + branch->addIdOperand(block->getId()); + buildPoint->addInstruction(std::unique_ptr(branch)); + block->addPredecessor(buildPoint); +} + +void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control) +{ + Instruction* merge = new Instruction(OpSelectionMerge); + merge->addIdOperand(mergeBlock->getId()); + merge->addImmediateOperand(control); + buildPoint->addInstruction(std::unique_ptr(merge)); +} + +void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, + const std::vector& operands) +{ + Instruction* merge = new Instruction(OpLoopMerge); + merge->addIdOperand(mergeBlock->getId()); + merge->addIdOperand(continueBlock->getId()); + merge->addImmediateOperand(control); + for (int op = 0; op < (int)operands.size(); ++op) + merge->addImmediateOperand(operands[op]); + buildPoint->addInstruction(std::unique_ptr(merge)); +} + +void Builder::createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock) +{ + Instruction* branch = new Instruction(OpBranchConditional); + branch->addIdOperand(condition); + branch->addIdOperand(thenBlock->getId()); + branch->addIdOperand(elseBlock->getId()); + buildPoint->addInstruction(std::unique_ptr(branch)); + thenBlock->addPredecessor(buildPoint); + elseBlock->addPredecessor(buildPoint); +} + +// OpSource +// [OpSourceContinued] +// ... +void Builder::dumpSourceInstructions(const spv::Id fileId, const std::string& text, + std::vector& out) const +{ + const int maxWordCount = 0xFFFF; + const int opSourceWordCount = 4; + const int nonNullBytesPerInstruction = 4 * (maxWordCount - opSourceWordCount) - 1; + + if (source != SourceLanguageUnknown) { + // OpSource Language Version File Source + Instruction sourceInst(NoResult, NoType, OpSource); + sourceInst.addImmediateOperand(source); + sourceInst.addImmediateOperand(sourceVersion); + // File operand + if (fileId != NoResult) { + sourceInst.addIdOperand(fileId); + // Source operand + if (text.size() > 0) { + int nextByte = 0; + std::string subString; + while ((int)text.size() - nextByte > 0) { + subString = text.substr(nextByte, nonNullBytesPerInstruction); + if (nextByte == 0) { + // OpSource + sourceInst.addStringOperand(subString.c_str()); + sourceInst.dump(out); + } else { + // OpSourcContinued + Instruction sourceContinuedInst(OpSourceContinued); + sourceContinuedInst.addStringOperand(subString.c_str()); + sourceContinuedInst.dump(out); + } + nextByte += nonNullBytesPerInstruction; + } + } else + sourceInst.dump(out); + } else + sourceInst.dump(out); + } +} + +// Dump an OpSource[Continued] sequence for the source and every include file +void Builder::dumpSourceInstructions(std::vector& out) const +{ + dumpSourceInstructions(sourceFileStringId, sourceText, out); + for (auto iItr = includeFiles.begin(); iItr != includeFiles.end(); ++iItr) + dumpSourceInstructions(iItr->first, *iItr->second, out); +} + +void Builder::dumpInstructions(std::vector& out, + const std::vector >& instructions) const +{ + for (int i = 0; i < (int)instructions.size(); ++i) { + instructions[i]->dump(out); + } +} + +void Builder::dumpModuleProcesses(std::vector& out) const +{ + for (int i = 0; i < (int)moduleProcesses.size(); ++i) { + Instruction moduleProcessed(OpModuleProcessed); + moduleProcessed.addStringOperand(moduleProcesses[i]); + moduleProcessed.dump(out); + } +} + +}; // end spv namespace diff --git a/src/third_party/glslang/SPIRV/SpvBuilder.h b/src/third_party/glslang/SPIRV/SpvBuilder.h new file mode 100644 index 0000000..077945e --- /dev/null +++ b/src/third_party/glslang/SPIRV/SpvBuilder.h @@ -0,0 +1,854 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// "Builder" is an interface to fully build SPIR-V IR. Allocate one of +// these to build (a thread safe) internal SPIR-V representation (IR), +// and then dump it as a binary stream according to the SPIR-V specification. +// +// A Builder has a 1:1 relationship with a SPIR-V module. +// + +#pragma once +#ifndef SpvBuilder_H +#define SpvBuilder_H + +#include "Logger.h" +#include "spirv.hpp" +#include "spvIR.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace spv { + +typedef enum { + Spv_1_0 = (1 << 16), + Spv_1_1 = (1 << 16) | (1 << 8), + Spv_1_2 = (1 << 16) | (2 << 8), + Spv_1_3 = (1 << 16) | (3 << 8), + Spv_1_4 = (1 << 16) | (4 << 8), + Spv_1_5 = (1 << 16) | (5 << 8), +} SpvVersion; + +class Builder { +public: + Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger); + virtual ~Builder(); + + static const int maxMatrixSize = 4; + + unsigned int getSpvVersion() const { return spvVersion; } + + void setSource(spv::SourceLanguage lang, int version) + { + source = lang; + sourceVersion = version; + } + spv::Id getStringId(const std::string& str) + { + auto sItr = stringIds.find(str); + if (sItr != stringIds.end()) + return sItr->second; + spv::Id strId = getUniqueId(); + Instruction* fileString = new Instruction(strId, NoType, OpString); + const char* file_c_str = str.c_str(); + fileString->addStringOperand(file_c_str); + strings.push_back(std::unique_ptr(fileString)); + module.mapInstruction(fileString); + stringIds[file_c_str] = strId; + return strId; + } + void setSourceFile(const std::string& file) + { + sourceFileStringId = getStringId(file); + } + void setSourceText(const std::string& text) { sourceText = text; } + void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); } + void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); } + void setEmitOpLines() { emitOpLines = true; } + void addExtension(const char* ext) { extensions.insert(ext); } + void removeExtension(const char* ext) + { + extensions.erase(ext); + } + void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion) + { + if (getSpvVersion() < static_cast(incorporatedVersion)) + addExtension(ext); + } + void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion) + { + removeExtension(baseExt); + addIncorporatedExtension(promoExt, incorporatedVersion); + } + void addInclude(const std::string& name, const std::string& text) + { + spv::Id incId = getStringId(name); + includeFiles[incId] = &text; + } + Id import(const char*); + void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem) + { + addressModel = addr; + memoryModel = mem; + } + + void addCapability(spv::Capability cap) { capabilities.insert(cap); } + + // To get a new for anything needing a new one. + Id getUniqueId() { return ++uniqueId; } + + // To get a set of new s, e.g., for a set of function parameters + Id getUniqueIds(int numIds) + { + Id id = uniqueId + 1; + uniqueId += numIds; + return id; + } + + // Generate OpLine for non-filename-based #line directives (ie no filename + // seen yet): Log the current line, and if different than the last one, + // issue a new OpLine using the new line and current source file name. + void setLine(int line); + + // If filename null, generate OpLine for non-filename-based line directives, + // else do filename-based: Log the current line and file, and if different + // than the last one, issue a new OpLine using the new line and file + // name. + void setLine(int line, const char* filename); + // Low-level OpLine. See setLine() for a layered helper. + void addLine(Id fileName, int line, int column); + + // For creating new types (will return old type if the requested one was already made). + Id makeVoidType(); + Id makeBoolType(); + Id makePointer(StorageClass, Id pointee); + Id makeForwardPointer(StorageClass); + Id makePointerFromForwardPointer(StorageClass, Id forwardPointerType, Id pointee); + Id makeIntegerType(int width, bool hasSign); // generic + Id makeIntType(int width) { return makeIntegerType(width, true); } + Id makeUintType(int width) { return makeIntegerType(width, false); } + Id makeFloatType(int width); + Id makeStructType(const std::vector& members, const char*); + Id makeStructResultType(Id type0, Id type1); + Id makeVectorType(Id component, int size); + Id makeMatrixType(Id component, int cols, int rows); + Id makeArrayType(Id element, Id sizeId, int stride); // 0 stride means no stride decoration + Id makeRuntimeArray(Id element); + Id makeFunctionType(Id returnType, const std::vector& paramTypes); + Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format); + Id makeSamplerType(); + Id makeSampledImageType(Id imageType); + Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols); + + // accelerationStructureNV type + Id makeAccelerationStructureType(); + // rayQueryEXT type + Id makeRayQueryType(); + + // For querying about types. + Id getTypeId(Id resultId) const { return module.getTypeId(resultId); } + Id getDerefTypeId(Id resultId) const; + Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); } + Op getTypeClass(Id typeId) const { return getOpCode(typeId); } + Op getMostBasicTypeClass(Id typeId) const; + int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); } + int getNumTypeConstituents(Id typeId) const; + int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); } + Id getScalarTypeId(Id typeId) const; + Id getContainedTypeId(Id typeId) const; + Id getContainedTypeId(Id typeId, int) const; + StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); } + ImageFormat getImageTypeFormat(Id typeId) const + { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); } + + bool isPointer(Id resultId) const { return isPointerType(getTypeId(resultId)); } + bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); } + bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); } + bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); } + bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); } + bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); } + bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); } + + bool isBoolType(Id typeId) + { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); } + bool isIntType(Id typeId) const + { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; } + bool isUintType(Id typeId) const + { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; } + bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; } + bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; } + bool isScalarType(Id typeId) const + { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || + getTypeClass(typeId) == OpTypeBool; } + bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; } + bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; } + bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; } + bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; } +#ifdef GLSLANG_WEB + bool isCooperativeMatrixType(Id typeId)const { return false; } +#else + bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; } +#endif + bool isAggregateType(Id typeId) const + { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); } + bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; } + bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; } + bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; } + bool containsType(Id typeId, Op typeOp, unsigned int width) const; + bool containsPhysicalStorageBufferOrArray(Id typeId) const; + + bool isConstantOpCode(Op opcode) const; + bool isSpecConstantOpCode(Op opcode) const; + bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); } + bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; } + bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); } + unsigned int getConstantScalar(Id resultId) const + { return module.getInstruction(resultId)->getImmediateOperand(0); } + StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); } + + bool isVariableOpCode(Op opcode) const { return opcode == OpVariable; } + bool isVariable(Id resultId) const { return isVariableOpCode(getOpCode(resultId)); } + bool isGlobalStorage(Id resultId) const { return getStorageClass(resultId) != StorageClassFunction; } + bool isGlobalVariable(Id resultId) const { return isVariable(resultId) && isGlobalStorage(resultId); } + // See if a resultId is valid for use as an initializer. + bool isValidInitializer(Id resultId) const { return isConstant(resultId) || isGlobalVariable(resultId); } + + int getScalarTypeWidth(Id typeId) const + { + Id scalarTypeId = getScalarTypeId(typeId); + assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat); + return module.getInstruction(scalarTypeId)->getImmediateOperand(0); + } + + int getTypeNumColumns(Id typeId) const + { + assert(isMatrixType(typeId)); + return getNumTypeConstituents(typeId); + } + int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); } + int getTypeNumRows(Id typeId) const + { + assert(isMatrixType(typeId)); + return getNumTypeComponents(getContainedTypeId(typeId)); + } + int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); } + + Dim getTypeDimensionality(Id typeId) const + { + assert(isImageType(typeId)); + return (Dim)module.getInstruction(typeId)->getImmediateOperand(1); + } + Id getImageType(Id resultId) const + { + Id typeId = getTypeId(resultId); + assert(isImageType(typeId) || isSampledImageType(typeId)); + return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId; + } + bool isArrayedImageType(Id typeId) const + { + assert(isImageType(typeId)); + return module.getInstruction(typeId)->getImmediateOperand(3) != 0; + } + + // For making new constants (will return old constant if the requested one was already made). + Id makeBoolConstant(bool b, bool specConstant = false); + Id makeInt8Constant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(8), (unsigned)i, specConstant); } + Id makeUint8Constant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(8), u, specConstant); } + Id makeInt16Constant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(16), (unsigned)i, specConstant); } + Id makeUint16Constant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(16), u, specConstant); } + Id makeIntConstant(int i, bool specConstant = false) + { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); } + Id makeUintConstant(unsigned u, bool specConstant = false) + { return makeIntConstant(makeUintType(32), u, specConstant); } + Id makeInt64Constant(long long i, bool specConstant = false) + { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); } + Id makeUint64Constant(unsigned long long u, bool specConstant = false) + { return makeInt64Constant(makeUintType(64), u, specConstant); } + Id makeFloatConstant(float f, bool specConstant = false); + Id makeDoubleConstant(double d, bool specConstant = false); + Id makeFloat16Constant(float f16, bool specConstant = false); + Id makeFpConstant(Id type, double d, bool specConstant = false); + + // Turn the array of constants into a proper spv constant of the requested type. + Id makeCompositeConstant(Id type, const std::vector& comps, bool specConst = false); + + // Methods for adding information outside the CFG. + Instruction* addEntryPoint(ExecutionModel, Function*, const char* name); + void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1); + void addExecutionMode(Function*, ExecutionMode mode, const std::vector& literals); + void addExecutionModeId(Function*, ExecutionMode mode, const std::vector& operandIds); + void addName(Id, const char* name); + void addMemberName(Id, int member, const char* name); + void addDecoration(Id, Decoration, int num = -1); + void addDecoration(Id, Decoration, const char*); + void addDecoration(Id, Decoration, const std::vector& literals); + void addDecoration(Id, Decoration, const std::vector& strings); + void addDecorationId(Id id, Decoration, Id idDecoration); + void addDecorationId(Id id, Decoration, const std::vector& operandIds); + void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1); + void addMemberDecoration(Id, unsigned int member, Decoration, const char*); + void addMemberDecoration(Id, unsigned int member, Decoration, const std::vector& literals); + void addMemberDecoration(Id, unsigned int member, Decoration, const std::vector& strings); + + // At the end of what block do the next create*() instructions go? + void setBuildPoint(Block* bp) { buildPoint = bp; } + Block* getBuildPoint() const { return buildPoint; } + + // Make the entry-point function. The returned pointer is only valid + // for the lifetime of this builder. + Function* makeEntryPoint(const char*); + + // Make a shader-style function, and create its entry block if entry is non-zero. + // Return the function, pass back the entry. + // The returned pointer is only valid for the lifetime of this builder. + Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, + const std::vector& paramTypes, const std::vector>& precisions, Block **entry = 0); + + // Create a return. An 'implicit' return is one not appearing in the source + // code. In the case of an implicit return, no post-return block is inserted. + void makeReturn(bool implicit, Id retVal = 0); + + // Generate all the code needed to finish up a function. + void leaveFunction(); + + // Create a discard. + void makeDiscard(); + + // Create a global or function local or IO variable. + Id createVariable(Decoration precision, StorageClass, Id type, const char* name = nullptr, + Id initializer = NoResult); + + // Create an intermediate with an undefined value. + Id createUndefined(Id type); + + // Store into an Id and return the l-value + void createStore(Id rValue, Id lValue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // Load from an Id and return it + Id createLoad(Id lValue, spv::Decoration precision, + spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // Create an OpAccessChain instruction + Id createAccessChain(StorageClass, Id base, const std::vector& offsets); + + // Create an OpArrayLength instruction + Id createArrayLength(Id base, unsigned int member); + + // Create an OpCooperativeMatrixLengthNV instruction + Id createCooperativeMatrixLength(Id type); + + // Create an OpCompositeExtract instruction + Id createCompositeExtract(Id composite, Id typeId, unsigned index); + Id createCompositeExtract(Id composite, Id typeId, const std::vector& indexes); + Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index); + Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector& indexes); + + Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex); + Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex); + + void createNoResultOp(Op); + void createNoResultOp(Op, Id operand); + void createNoResultOp(Op, const std::vector& operands); + void createNoResultOp(Op, const std::vector& operands); + void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask); + void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics); + Id createUnaryOp(Op, Id typeId, Id operand); + Id createBinOp(Op, Id typeId, Id operand1, Id operand2); + Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3); + Id createOp(Op, Id typeId, const std::vector& operands); + Id createOp(Op, Id typeId, const std::vector& operands); + Id createFunctionCall(spv::Function*, const std::vector&); + Id createSpecConstantOp(Op, Id typeId, const std::vector& operands, const std::vector& literals); + + // Take an rvalue (source) and a set of channels to extract from it to + // make a new rvalue, which is returned. + Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector& channels); + + // Take a copy of an lvalue (target) and a source of components, and set the + // source components into the lvalue where the 'channels' say to put them. + // An updated version of the target is returned. + // (No true lvalue or stores are used.) + Id createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector& channels); + + // If both the id and precision are valid, the id + // gets tagged with the requested precision. + // The passed in id is always the returned id, to simplify use patterns. + Id setPrecision(Id id, Decoration precision) + { + if (precision != NoPrecision && id != NoResult) + addDecoration(id, precision); + + return id; + } + + // Can smear a scalar to a vector for the following forms: + // - promoteScalar(scalar, vector) // smear scalar to width of vector + // - promoteScalar(vector, scalar) // smear scalar to width of vector + // - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to + // - promoteScalar(scalar, scalar) // do nothing + // Other forms are not allowed. + // + // Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'. + // The type of the created vector is a vector of components of the same type as the scalar. + // + // Note: One of the arguments will change, with the result coming back that way rather than + // through the return value. + void promoteScalar(Decoration precision, Id& left, Id& right); + + // Make a value by smearing the scalar to fill the type. + // vectorType should be the correct type for making a vector of scalarVal. + // (No conversions are done.) + Id smearScalar(Decoration precision, Id scalarVal, Id vectorType); + + // Create a call to a built-in function. + Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector& args); + + // List of parameters used to create a texture operation + struct TextureParameters { + Id sampler; + Id coords; + Id bias; + Id lod; + Id Dref; + Id offset; + Id offsets; + Id gradX; + Id gradY; + Id sample; + Id component; + Id texelOut; + Id lodClamp; + Id granularity; + Id coarse; + bool nonprivate; + bool volatil; + }; + + // Select the correct texture operation based on all inputs, and emit the correct instruction + Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, + bool noImplicit, const TextureParameters&, ImageOperandsMask); + + // Emit the OpTextureQuery* instruction that was passed in. + // Figure out the right return value and type, and return it. + Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult); + + Id createSamplePositionCall(Decoration precision, Id, Id); + + Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned); + Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id); + + // Reduction comparison for composites: For equal and not-equal resulting in a scalar. + Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */); + + // OpCompositeConstruct + Id createCompositeConstruct(Id typeId, const std::vector& constituents); + + // vector or scalar constructor + Id createConstructor(Decoration precision, const std::vector& sources, Id resultTypeId); + + // matrix constructor + Id createMatrixConstructor(Decoration precision, const std::vector& sources, Id constructee); + + // Helper to use for building nested control flow with if-then-else. + class If { + public: + If(Id condition, unsigned int ctrl, Builder& builder); + ~If() {} + + void makeBeginElse(); + void makeEndIf(); + + private: + If(const If&); + If& operator=(If&); + + Builder& builder; + Id condition; + unsigned int control; + Function* function; + Block* headerBlock; + Block* thenBlock; + Block* elseBlock; + Block* mergeBlock; + }; + + // Make a switch statement. A switch has 'numSegments' of pieces of code, not containing + // any case/default labels, all separated by one or more case/default labels. Each possible + // case value v is a jump to the caseValues[v] segment. The defaultSegment is also in this + // number space. How to compute the value is given by 'condition', as in switch(condition). + // + // The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches. + // + // Use a defaultSegment < 0 if there is no default segment (to branch to post switch). + // + // Returns the right set of basic blocks to start each code segment with, so that the caller's + // recursion stack can hold the memory for it. + // + void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector& caseValues, + const std::vector& valueToSegment, int defaultSegment, std::vector& segmentBB); + + // Add a branch to the innermost switch's merge block. + void addSwitchBreak(); + + // Move to the next code segment, passing in the return argument in makeSwitch() + void nextSwitchSegment(std::vector& segmentBB, int segment); + + // Finish off the innermost switch. + void endSwitch(std::vector& segmentBB); + + struct LoopBlocks { + LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) : + head(head), body(body), merge(merge), continue_target(continue_target) { } + Block &head, &body, &merge, &continue_target; + private: + LoopBlocks(); + LoopBlocks& operator=(const LoopBlocks&) = delete; + }; + + // Start a new loop and prepare the builder to generate code for it. Until + // closeLoop() is called for this loop, createLoopContinue() and + // createLoopExit() will target its corresponding blocks. + LoopBlocks& makeNewLoop(); + + // Create a new block in the function containing the build point. Memory is + // owned by the function object. + Block& makeNewBlock(); + + // Add a branch to the continue_target of the current (innermost) loop. + void createLoopContinue(); + + // Add an exit (e.g. "break") from the innermost loop that we're currently + // in. + void createLoopExit(); + + // Close the innermost loop that you're in + void closeLoop(); + + // + // Access chain design for an R-Value vs. L-Value: + // + // There is a single access chain the builder is building at + // any particular time. Such a chain can be used to either to a load or + // a store, when desired. + // + // Expressions can be r-values, l-values, or both, or only r-values: + // a[b.c].d = .... // l-value + // ... = a[b.c].d; // r-value, that also looks like an l-value + // ++a[b.c].d; // r-value and l-value + // (x + y)[2]; // r-value only, can't possibly be l-value + // + // Computing an r-value means generating code. Hence, + // r-values should only be computed when they are needed, not speculatively. + // + // Computing an l-value means saving away information for later use in the compiler, + // no code is generated until the l-value is later dereferenced. It is okay + // to speculatively generate an l-value, just not okay to speculatively dereference it. + // + // The base of the access chain (the left-most variable or expression + // from which everything is based) can be set either as an l-value + // or as an r-value. Most efficient would be to set an l-value if one + // is available. If an expression was evaluated, the resulting r-value + // can be set as the chain base. + // + // The users of this single access chain can save and restore if they + // want to nest or manage multiple chains. + // + + struct AccessChain { + Id base; // for l-values, pointer to the base object, for r-values, the base object + std::vector indexChain; + Id instr; // cache the instruction that generates this access chain + std::vector swizzle; // each std::vector element selects the next GLSL component number + Id component; // a dynamic component index, can coexist with a swizzle, + // done after the swizzle, NoResult if not present + Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; + // NoType unless a swizzle or component is present + bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value + unsigned int alignment; // bitwise OR of alignment values passed in. Accumulates worst alignment. + // Only tracks base and (optional) component selection alignment. + + // Accumulate whether anything in the chain of structures has coherent decorations. + struct CoherentFlags { + CoherentFlags() { clear(); } +#ifdef GLSLANG_WEB + void clear() { } + bool isVolatile() const { return false; } + CoherentFlags operator |=(const CoherentFlags &other) { return *this; } +#else + bool isVolatile() const { return volatil; } + bool anyCoherent() const { + return coherent || devicecoherent || queuefamilycoherent || workgroupcoherent || + subgroupcoherent || shadercallcoherent; + } + + unsigned coherent : 1; + unsigned devicecoherent : 1; + unsigned queuefamilycoherent : 1; + unsigned workgroupcoherent : 1; + unsigned subgroupcoherent : 1; + unsigned shadercallcoherent : 1; + unsigned nonprivate : 1; + unsigned volatil : 1; + unsigned isImage : 1; + + void clear() { + coherent = 0; + devicecoherent = 0; + queuefamilycoherent = 0; + workgroupcoherent = 0; + subgroupcoherent = 0; + shadercallcoherent = 0; + nonprivate = 0; + volatil = 0; + isImage = 0; + } + + CoherentFlags operator |=(const CoherentFlags &other) { + coherent |= other.coherent; + devicecoherent |= other.devicecoherent; + queuefamilycoherent |= other.queuefamilycoherent; + workgroupcoherent |= other.workgroupcoherent; + subgroupcoherent |= other.subgroupcoherent; + shadercallcoherent |= other.shadercallcoherent; + nonprivate |= other.nonprivate; + volatil |= other.volatil; + isImage |= other.isImage; + return *this; + } +#endif + }; + CoherentFlags coherentFlags; + }; + + // + // the SPIR-V builder maintains a single active chain that + // the following methods operate on + // + + // for external save and restore + AccessChain getAccessChain() { return accessChain; } + void setAccessChain(AccessChain newChain) { accessChain = newChain; } + + // clear accessChain + void clearAccessChain(); + + // set new base as an l-value base + void setAccessChainLValue(Id lValue) + { + assert(isPointer(lValue)); + accessChain.base = lValue; + } + + // set new base value as an r-value + void setAccessChainRValue(Id rValue) + { + accessChain.isRValue = true; + accessChain.base = rValue; + } + + // push offset onto the end of the chain + void accessChainPush(Id offset, AccessChain::CoherentFlags coherentFlags, unsigned int alignment) + { + accessChain.indexChain.push_back(offset); + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + } + + // push new swizzle onto the end of any existing swizzle, merging into a single swizzle + void accessChainPushSwizzle(std::vector& swizzle, Id preSwizzleBaseType, + AccessChain::CoherentFlags coherentFlags, unsigned int alignment); + + // push a dynamic component selection onto the access chain, only applicable with a + // non-trivial swizzle or no swizzle + void accessChainPushComponent(Id component, Id preSwizzleBaseType, AccessChain::CoherentFlags coherentFlags, + unsigned int alignment) + { + if (accessChain.swizzle.size() != 1) { + accessChain.component = component; + if (accessChain.preSwizzleBaseType == NoType) + accessChain.preSwizzleBaseType = preSwizzleBaseType; + } + accessChain.coherentFlags |= coherentFlags; + accessChain.alignment |= alignment; + } + + // use accessChain and swizzle to store value + void accessChainStore(Id rvalue, spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, + spv::Scope scope = spv::ScopeMax, unsigned int alignment = 0); + + // use accessChain and swizzle to load an r-value + Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType, + spv::MemoryAccessMask memoryAccess = spv::MemoryAccessMaskNone, spv::Scope scope = spv::ScopeMax, + unsigned int alignment = 0); + + // Return whether or not the access chain can be represented in SPIR-V + // as an l-value. + // E.g., a[3].yx cannot be, while a[3].y and a[3].y[x] can be. + bool isSpvLvalue() const { return accessChain.swizzle.size() <= 1; } + + // get the direct pointer for an l-value + Id accessChainGetLValue(); + + // Get the inferred SPIR-V type of the result of the current access chain, + // based on the type of the base and the chain of dereferences. + Id accessChainGetInferredType(); + + // Add capabilities, extensions, remove unneeded decorations, etc., + // based on the resulting SPIR-V. + void postProcess(); + + // Prune unreachable blocks in the CFG and remove unneeded decorations. + void postProcessCFG(); + +#ifndef GLSLANG_WEB + // Add capabilities, extensions based on instructions in the module. + void postProcessFeatures(); + // Hook to visit each instruction in a block in a function + void postProcess(Instruction&); + // Hook to visit each non-32-bit sized float/int operation in a block. + void postProcessType(const Instruction&, spv::Id typeId); +#endif + + void dump(std::vector&) const; + + void createBranch(Block* block); + void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock); + void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, + const std::vector& operands); + + // Sets to generate opcode for specialization constants. + void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; } + // Sets to generate opcode for non-specialization constants (normal mode). + void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; } + // Check if the builder is generating code for spec constants. + bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; } + + protected: + Id makeIntConstant(Id typeId, unsigned value, bool specConstant); + Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant); + Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value); + Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2); + Id findCompositeConstant(Op typeClass, Id typeId, const std::vector& comps); + Id findStructConstant(Id typeId, const std::vector& comps); + Id collapseAccessChain(); + void remapDynamicSwizzle(); + void transferAccessChainSwizzle(bool dynamic); + void simplifyAccessChainSwizzle(); + void createAndSetNoPredecessorBlock(const char*); + void createSelectionMerge(Block* mergeBlock, unsigned int control); + void dumpSourceInstructions(std::vector&) const; + void dumpSourceInstructions(const spv::Id fileId, const std::string& text, std::vector&) const; + void dumpInstructions(std::vector&, const std::vector >&) const; + void dumpModuleProcesses(std::vector&) const; + spv::MemoryAccessMask sanitizeMemoryAccessForStorageClass(spv::MemoryAccessMask memoryAccess, StorageClass sc) + const; + + unsigned int spvVersion; // the version of SPIR-V to emit in the header + SourceLanguage source; + int sourceVersion; + spv::Id sourceFileStringId; + std::string sourceText; + int currentLine; + const char* currentFile; + bool emitOpLines; + std::set extensions; + std::vector sourceExtensions; + std::vector moduleProcesses; + AddressingModel addressModel; + MemoryModel memoryModel; + std::set capabilities; + int builderNumber; + Module module; + Block* buildPoint; + Id uniqueId; + Function* entryPointFunction; + bool generatingOpCodeForSpecConst; + AccessChain accessChain; + + // special blocks of instructions for output + std::vector > strings; + std::vector > imports; + std::vector > entryPoints; + std::vector > executionModes; + std::vector > names; + std::vector > decorations; + std::vector > constantsTypesGlobals; + std::vector > externals; + std::vector > functions; + + // not output, internally used for quick & dirty canonical (unique) creation + + // map type opcodes to constant inst. + std::unordered_map> groupedConstants; + // map struct-id to constant instructions + std::unordered_map> groupedStructConstants; + // map type opcodes to type instructions + std::unordered_map> groupedTypes; + + // stack of switches + std::stack switchMerges; + + // Our loop stack. + std::stack loops; + + // map from strings to their string ids + std::unordered_map stringIds; + + // map from include file name ids to their contents + std::map includeFiles; + + // The stream for outputting warnings and errors. + SpvBuildLogger* logger; +}; // end Builder class + +}; // end spv namespace + +#endif // SpvBuilder_H diff --git a/src/third_party/glslang/SPIRV/SpvPostProcess.cpp b/src/third_party/glslang/SPIRV/SpvPostProcess.cpp new file mode 100644 index 0000000..d40174d --- /dev/null +++ b/src/third_party/glslang/SPIRV/SpvPostProcess.cpp @@ -0,0 +1,450 @@ +// +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Post-processing for SPIR-V IR, in internal form, not standard binary form. +// + +#include +#include + +#include +#include +#include + +#include "SpvBuilder.h" + +#include "spirv.hpp" +#include "GlslangToSpv.h" +#include "SpvBuilder.h" +namespace spv { + #include "GLSL.std.450.h" + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" +} + +namespace spv { + +#ifndef GLSLANG_WEB +// Hook to visit each operand type and result type of an instruction. +// Will be called multiple times for one instruction, once for each typed +// operand and the result. +void Builder::postProcessType(const Instruction& inst, Id typeId) +{ + // Characterize the type being questioned + Id basicTypeOp = getMostBasicTypeClass(typeId); + int width = 0; + if (basicTypeOp == OpTypeFloat || basicTypeOp == OpTypeInt) + width = getScalarTypeWidth(typeId); + + // Do opcode-specific checks + switch (inst.getOpCode()) { + case OpLoad: + case OpStore: + if (basicTypeOp == OpTypeStruct) { + if (containsType(typeId, OpTypeInt, 8)) + addCapability(CapabilityInt8); + if (containsType(typeId, OpTypeInt, 16)) + addCapability(CapabilityInt16); + if (containsType(typeId, OpTypeFloat, 16)) + addCapability(CapabilityFloat16); + } else { + StorageClass storageClass = getStorageClass(inst.getIdOperand(0)); + if (width == 8) { + switch (storageClass) { + case StorageClassPhysicalStorageBufferEXT: + case StorageClassUniform: + case StorageClassStorageBuffer: + case StorageClassPushConstant: + break; + default: + addCapability(CapabilityInt8); + break; + } + } else if (width == 16) { + switch (storageClass) { + case StorageClassPhysicalStorageBufferEXT: + case StorageClassUniform: + case StorageClassStorageBuffer: + case StorageClassPushConstant: + case StorageClassInput: + case StorageClassOutput: + break; + default: + if (basicTypeOp == OpTypeInt) + addCapability(CapabilityInt16); + if (basicTypeOp == OpTypeFloat) + addCapability(CapabilityFloat16); + break; + } + } + } + break; + case OpAccessChain: + case OpPtrAccessChain: + case OpCopyObject: + break; + case OpFConvert: + case OpSConvert: + case OpUConvert: + // Look for any 8/16-bit storage capabilities. If there are none, assume that + // the convert instruction requires the Float16/Int8/16 capability. + if (containsType(typeId, OpTypeFloat, 16) || containsType(typeId, OpTypeInt, 16)) { + bool foundStorage = false; + for (auto it = capabilities.begin(); it != capabilities.end(); ++it) { + spv::Capability cap = *it; + if (cap == spv::CapabilityStorageInputOutput16 || + cap == spv::CapabilityStoragePushConstant16 || + cap == spv::CapabilityStorageUniformBufferBlock16 || + cap == spv::CapabilityStorageUniform16) { + foundStorage = true; + break; + } + } + if (!foundStorage) { + if (containsType(typeId, OpTypeFloat, 16)) + addCapability(CapabilityFloat16); + if (containsType(typeId, OpTypeInt, 16)) + addCapability(CapabilityInt16); + } + } + if (containsType(typeId, OpTypeInt, 8)) { + bool foundStorage = false; + for (auto it = capabilities.begin(); it != capabilities.end(); ++it) { + spv::Capability cap = *it; + if (cap == spv::CapabilityStoragePushConstant8 || + cap == spv::CapabilityUniformAndStorageBuffer8BitAccess || + cap == spv::CapabilityStorageBuffer8BitAccess) { + foundStorage = true; + break; + } + } + if (!foundStorage) { + addCapability(CapabilityInt8); + } + } + break; + case OpExtInst: + switch (inst.getImmediateOperand(1)) { + case GLSLstd450Frexp: + case GLSLstd450FrexpStruct: + if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeInt, 16)) + addExtension(spv::E_SPV_AMD_gpu_shader_int16); + break; + case GLSLstd450InterpolateAtCentroid: + case GLSLstd450InterpolateAtSample: + case GLSLstd450InterpolateAtOffset: + if (getSpvVersion() < glslang::EShTargetSpv_1_3 && containsType(typeId, OpTypeFloat, 16)) + addExtension(spv::E_SPV_AMD_gpu_shader_half_float); + break; + default: + break; + } + break; + default: + if (basicTypeOp == OpTypeFloat && width == 16) + addCapability(CapabilityFloat16); + if (basicTypeOp == OpTypeInt && width == 16) + addCapability(CapabilityInt16); + if (basicTypeOp == OpTypeInt && width == 8) + addCapability(CapabilityInt8); + break; + } +} + +// Called for each instruction that resides in a block. +void Builder::postProcess(Instruction& inst) +{ + // Add capabilities based simply on the opcode. + switch (inst.getOpCode()) { + case OpExtInst: + switch (inst.getImmediateOperand(1)) { + case GLSLstd450InterpolateAtCentroid: + case GLSLstd450InterpolateAtSample: + case GLSLstd450InterpolateAtOffset: + addCapability(CapabilityInterpolationFunction); + break; + default: + break; + } + break; + case OpDPdxFine: + case OpDPdyFine: + case OpFwidthFine: + case OpDPdxCoarse: + case OpDPdyCoarse: + case OpFwidthCoarse: + addCapability(CapabilityDerivativeControl); + break; + + case OpImageQueryLod: + case OpImageQuerySize: + case OpImageQuerySizeLod: + case OpImageQuerySamples: + case OpImageQueryLevels: + addCapability(CapabilityImageQuery); + break; + + case OpGroupNonUniformPartitionNV: + addExtension(E_SPV_NV_shader_subgroup_partitioned); + addCapability(CapabilityGroupNonUniformPartitionedNV); + break; + + case OpLoad: + case OpStore: + { + // For any load/store to a PhysicalStorageBufferEXT, walk the accesschain + // index list to compute the misalignment. The pre-existing alignment value + // (set via Builder::AccessChain::alignment) only accounts for the base of + // the reference type and any scalar component selection in the accesschain, + // and this function computes the rest from the SPIR-V Offset decorations. + Instruction *accessChain = module.getInstruction(inst.getIdOperand(0)); + if (accessChain->getOpCode() == OpAccessChain) { + Instruction *base = module.getInstruction(accessChain->getIdOperand(0)); + // Get the type of the base of the access chain. It must be a pointer type. + Id typeId = base->getTypeId(); + Instruction *type = module.getInstruction(typeId); + assert(type->getOpCode() == OpTypePointer); + if (type->getImmediateOperand(0) != StorageClassPhysicalStorageBufferEXT) { + break; + } + // Get the pointee type. + typeId = type->getIdOperand(1); + type = module.getInstruction(typeId); + // Walk the index list for the access chain. For each index, find any + // misalignment that can apply when accessing the member/element via + // Offset/ArrayStride/MatrixStride decorations, and bitwise OR them all + // together. + int alignment = 0; + for (int i = 1; i < accessChain->getNumOperands(); ++i) { + Instruction *idx = module.getInstruction(accessChain->getIdOperand(i)); + if (type->getOpCode() == OpTypeStruct) { + assert(idx->getOpCode() == OpConstant); + unsigned int c = idx->getImmediateOperand(0); + + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getOpCode() == OpMemberDecorate && + decoration.get()->getIdOperand(0) == typeId && + decoration.get()->getImmediateOperand(1) == c && + (decoration.get()->getImmediateOperand(2) == DecorationOffset || + decoration.get()->getImmediateOperand(2) == DecorationMatrixStride)) { + alignment |= decoration.get()->getImmediateOperand(3); + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + // get the next member type + typeId = type->getIdOperand(c); + type = module.getInstruction(typeId); + } else if (type->getOpCode() == OpTypeArray || + type->getOpCode() == OpTypeRuntimeArray) { + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getOpCode() == OpDecorate && + decoration.get()->getIdOperand(0) == typeId && + decoration.get()->getImmediateOperand(1) == DecorationArrayStride) { + alignment |= decoration.get()->getImmediateOperand(2); + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + // Get the element type + typeId = type->getIdOperand(0); + type = module.getInstruction(typeId); + } else { + // Once we get to any non-aggregate type, we're done. + break; + } + } + assert(inst.getNumOperands() >= 3); + unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1); + assert(memoryAccess & MemoryAccessAlignedMask); + static_cast(memoryAccess); + // Compute the index of the alignment operand. + int alignmentIdx = 2; + if (inst.getOpCode() == OpStore) + alignmentIdx++; + // Merge new and old (mis)alignment + alignment |= inst.getImmediateOperand(alignmentIdx); + // Pick the LSB + alignment = alignment & ~(alignment & (alignment-1)); + // update the Aligned operand + inst.setImmediateOperand(alignmentIdx, alignment); + } + break; + } + + default: + break; + } + + // Checks based on type + if (inst.getTypeId() != NoType) + postProcessType(inst, inst.getTypeId()); + for (int op = 0; op < inst.getNumOperands(); ++op) { + if (inst.isIdOperand(op)) { + // In blocks, these are always result ids, but we are relying on + // getTypeId() to return NoType for things like OpLabel. + if (getTypeId(inst.getIdOperand(op)) != NoType) + postProcessType(inst, getTypeId(inst.getIdOperand(op))); + } + } +} +#endif + +// comment in header +void Builder::postProcessCFG() +{ + // reachableBlocks is the set of blockss reached via control flow, or which are + // unreachable continue targert or unreachable merge. + std::unordered_set reachableBlocks; + std::unordered_map headerForUnreachableContinue; + std::unordered_set unreachableMerges; + std::unordered_set unreachableDefinitions; + // Collect IDs defined in unreachable blocks. For each function, label the + // reachable blocks first. Then for each unreachable block, collect the + // result IDs of the instructions in it. + for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { + Function* f = *fi; + Block* entry = f->getEntryBlock(); + inReadableOrder(entry, + [&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue] + (Block* b, ReachReason why, Block* header) { + reachableBlocks.insert(b); + if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header; + if (why == ReachDeadMerge) unreachableMerges.insert(b); + }); + for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { + Block* b = *bi; + if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) { + auto ii = b->getInstructions().cbegin(); + ++ii; // Keep potential decorations on the label. + for (; ii != b->getInstructions().cend(); ++ii) + unreachableDefinitions.insert(ii->get()->getResultId()); + } else if (reachableBlocks.count(b) == 0) { + // The normal case for unreachable code. All definitions are considered dead. + for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii) + unreachableDefinitions.insert(ii->get()->getResultId()); + } + } + } + + // Modify unreachable merge blocks and unreachable continue targets. + // Delete their contents. + for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) { + (*mergeIter)->rewriteAsCanonicalUnreachableMerge(); + } + for (auto continueIter = headerForUnreachableContinue.begin(); + continueIter != headerForUnreachableContinue.end(); + ++continueIter) { + Block* continue_target = continueIter->first; + Block* header = continueIter->second; + continue_target->rewriteAsCanonicalUnreachableContinue(header); + } + + // Remove unneeded decorations, for unreachable instructions + decorations.erase(std::remove_if(decorations.begin(), decorations.end(), + [&unreachableDefinitions](std::unique_ptr& I) -> bool { + Id decoration_id = I.get()->getIdOperand(0); + return unreachableDefinitions.count(decoration_id) != 0; + }), + decorations.end()); +} + +#ifndef GLSLANG_WEB +// comment in header +void Builder::postProcessFeatures() { + // Add per-instruction capabilities, extensions, etc., + + // Look for any 8/16 bit type in physical storage buffer class, and set the + // appropriate capability. This happens in createSpvVariable for other storage + // classes, but there isn't always a variable for physical storage buffer. + for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) { + Instruction* type = groupedTypes[OpTypePointer][t]; + if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) { + if (containsType(type->getIdOperand(1), OpTypeInt, 8)) { + addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5); + addCapability(spv::CapabilityStorageBuffer8BitAccess); + } + if (containsType(type->getIdOperand(1), OpTypeInt, 16) || + containsType(type->getIdOperand(1), OpTypeFloat, 16)) { + addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3); + addCapability(spv::CapabilityStorageBuffer16BitAccess); + } + } + } + + // process all block-contained instructions + for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) { + Function* f = *fi; + for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) { + Block* b = *bi; + for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++) + postProcess(*ii->get()); + + // For all local variables that contain pointers to PhysicalStorageBufferEXT, check whether + // there is an existing restrict/aliased decoration. If we don't find one, add Aliased as the + // default. + for (auto vi = b->getLocalVariables().cbegin(); vi != b->getLocalVariables().cend(); vi++) { + const Instruction& inst = *vi->get(); + Id resultId = inst.getResultId(); + if (containsPhysicalStorageBufferOrArray(getDerefTypeId(resultId))) { + bool foundDecoration = false; + const auto function = [&](const std::unique_ptr& decoration) { + if (decoration.get()->getIdOperand(0) == resultId && + decoration.get()->getOpCode() == OpDecorate && + (decoration.get()->getImmediateOperand(1) == spv::DecorationAliasedPointerEXT || + decoration.get()->getImmediateOperand(1) == spv::DecorationRestrictPointerEXT)) { + foundDecoration = true; + } + }; + std::for_each(decorations.begin(), decorations.end(), function); + if (!foundDecoration) { + addDecoration(resultId, spv::DecorationAliasedPointerEXT); + } + } + } + } + } +} +#endif + +// comment in header +void Builder::postProcess() { + postProcessCFG(); +#ifndef GLSLANG_WEB + postProcessFeatures(); +#endif +} + +}; // end spv namespace diff --git a/src/third_party/glslang/SPIRV/SpvTools.cpp b/src/third_party/glslang/SPIRV/SpvTools.cpp new file mode 100644 index 0000000..112ac33 --- /dev/null +++ b/src/third_party/glslang/SPIRV/SpvTools.cpp @@ -0,0 +1,241 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2018-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Call into SPIRV-Tools to disassemble, validate, and optimize. +// + +#if ENABLE_OPT + +#include +#include + +#include "SpvTools.h" +#include "spirv-tools/optimizer.hpp" +#include "spirv-tools/libspirv.h" + +namespace glslang { + +// Translate glslang's view of target versioning to what SPIRV-Tools uses. +spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger) +{ + switch (spvVersion.vulkan) { + case glslang::EShTargetVulkan_1_0: + return spv_target_env::SPV_ENV_VULKAN_1_0; + case glslang::EShTargetVulkan_1_1: + switch (spvVersion.spv) { + case EShTargetSpv_1_0: + case EShTargetSpv_1_1: + case EShTargetSpv_1_2: + case EShTargetSpv_1_3: + return spv_target_env::SPV_ENV_VULKAN_1_1; + case EShTargetSpv_1_4: + return spv_target_env::SPV_ENV_VULKAN_1_1_SPIRV_1_4; + default: + logger->missingFunctionality("Target version for SPIRV-Tools validator"); + return spv_target_env::SPV_ENV_VULKAN_1_1; + } + case glslang::EShTargetVulkan_1_2: + return spv_target_env::SPV_ENV_VULKAN_1_2; + default: + break; + } + + if (spvVersion.openGl > 0) + return spv_target_env::SPV_ENV_OPENGL_4_5; + + logger->missingFunctionality("Target version for SPIRV-Tools validator"); + return spv_target_env::SPV_ENV_UNIVERSAL_1_0; +} + +// Callback passed to spvtools::Optimizer::SetMessageConsumer +void OptimizerMesssageConsumer(spv_message_level_t level, const char *source, + const spv_position_t &position, const char *message) +{ + auto &out = std::cerr; + switch (level) + { + case SPV_MSG_FATAL: + case SPV_MSG_INTERNAL_ERROR: + case SPV_MSG_ERROR: + out << "error: "; + break; + case SPV_MSG_WARNING: + out << "warning: "; + break; + case SPV_MSG_INFO: + case SPV_MSG_DEBUG: + out << "info: "; + break; + default: + break; + } + if (source) + { + out << source << ":"; + } + out << position.line << ":" << position.column << ":" << position.index << ":"; + if (message) + { + out << " " << message; + } + out << std::endl; +} + +// Use the SPIRV-Tools disassembler to print SPIR-V. +void SpirvToolsDisassemble(std::ostream& out, const std::vector& spirv) +{ + // disassemble + spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3); + spv_text text; + spv_diagnostic diagnostic = nullptr; + spvBinaryToText(context, spirv.data(), spirv.size(), + SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT, + &text, &diagnostic); + + // dump + if (diagnostic == nullptr) + out << text->str; + else + spvDiagnosticPrint(diagnostic); + + // teardown + spvDiagnosticDestroy(diagnostic); + spvContextDestroy(context); +} + +// Apply the SPIRV-Tools validator to generated SPIR-V. +void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, bool prelegalization) +{ + // validate + spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); + spv_const_binary_t binary = { spirv.data(), spirv.size() }; + spv_diagnostic diagnostic = nullptr; + spv_validator_options options = spvValidatorOptionsCreate(); + spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets()); + spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization); + spvValidateWithOptions(context, options, &binary, &diagnostic); + + // report + if (diagnostic != nullptr) { + logger->error("SPIRV-Tools Validation Errors"); + logger->error(diagnostic->error); + } + + // tear down + spvValidatorOptionsDestroy(options); + spvDiagnosticDestroy(diagnostic); + spvContextDestroy(context); +} + +// Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process. +void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger* logger, const SpvOptions* options) +{ + spv_target_env target_env = MapToSpirvToolsEnv(intermediate.getSpv(), logger); + + spvtools::Optimizer optimizer(target_env); + optimizer.SetMessageConsumer(OptimizerMesssageConsumer); + + // If debug (specifically source line info) is being generated, propagate + // line information into all SPIR-V instructions. This avoids loss of + // information when instructions are deleted or moved. Later, remove + // redundant information to minimize final SPRIR-V size. + if (options->generateDebugInfo) { + optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass()); + } + else if (options->stripDebugInfo) { + optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass()); + } + optimizer.RegisterPass(spvtools::CreateWrapOpKillPass()); + optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); + optimizer.RegisterPass(spvtools::CreateMergeReturnPass()); + optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass()); + optimizer.RegisterPass(spvtools::CreateEliminateDeadFunctionsPass()); + optimizer.RegisterPass(spvtools::CreateScalarReplacementPass()); + optimizer.RegisterPass(spvtools::CreateLocalAccessChainConvertPass()); + optimizer.RegisterPass(spvtools::CreateLocalSingleBlockLoadStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateLocalSingleStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateSimplificationPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateVectorDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass()); + optimizer.RegisterPass(spvtools::CreateBlockMergePass()); + optimizer.RegisterPass(spvtools::CreateLocalMultiStoreElimPass()); + optimizer.RegisterPass(spvtools::CreateIfConversionPass()); + optimizer.RegisterPass(spvtools::CreateSimplificationPass()); + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateVectorDCEPass()); + optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass()); + if (options->optimizeSize) { + optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass()); + } + optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass()); + optimizer.RegisterPass(spvtools::CreateCFGCleanupPass()); + if (options->generateDebugInfo) { + optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass()); + } + + spvtools::OptimizerOptions spvOptOptions; + optimizer.SetTargetEnv(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); + spvOptOptions.set_run_validator(false); // The validator may run as a separate step later on + optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions); +} + +// Apply the SPIRV-Tools optimizer to strip debug info from SPIR-V. This is implicitly done by +// SpirvToolsTransform if spvOptions->stripDebugInfo is set, but can be called separately if +// optimization is disabled. +void SpirvToolsStripDebugInfo(const glslang::TIntermediate& intermediate, + std::vector& spirv, spv::SpvBuildLogger* logger) +{ + spv_target_env target_env = MapToSpirvToolsEnv(intermediate.getSpv(), logger); + + spvtools::Optimizer optimizer(target_env); + optimizer.SetMessageConsumer(OptimizerMesssageConsumer); + + optimizer.RegisterPass(spvtools::CreateStripDebugInfoPass()); + + spvtools::OptimizerOptions spvOptOptions; + optimizer.SetTargetEnv(MapToSpirvToolsEnv(intermediate.getSpv(), logger)); + spvOptOptions.set_run_validator(false); // The validator may run as a separate step later on + optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions); +} + +}; // end namespace glslang + +#endif diff --git a/src/third_party/glslang/SPIRV/SpvTools.h b/src/third_party/glslang/SPIRV/SpvTools.h new file mode 100644 index 0000000..49449cc --- /dev/null +++ b/src/third_party/glslang/SPIRV/SpvTools.h @@ -0,0 +1,88 @@ +// +// Copyright (C) 2014-2016 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Call into SPIRV-Tools to disassemble, validate, and optimize. +// + +#pragma once +#ifndef GLSLANG_SPV_TOOLS_H +#define GLSLANG_SPV_TOOLS_H + +#ifdef ENABLE_OPT +#include +#include +#endif + +#include "../glslang/MachineIndependent/localintermediate.h" +#include "Logger.h" + +namespace glslang { + +struct SpvOptions { + SpvOptions() : generateDebugInfo(false), stripDebugInfo(false), disableOptimizer(true), + optimizeSize(false), disassemble(false), validate(false) { } + bool generateDebugInfo; + bool stripDebugInfo; + bool disableOptimizer; + bool optimizeSize; + bool disassemble; + bool validate; +}; + +#ifdef ENABLE_OPT + +// Use the SPIRV-Tools disassembler to print SPIR-V. +void SpirvToolsDisassemble(std::ostream& out, const std::vector& spirv); + +// Apply the SPIRV-Tools validator to generated SPIR-V. +void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger*, bool prelegalization); + +// Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process. +void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector& spirv, + spv::SpvBuildLogger*, const SpvOptions*); + +// Apply the SPIRV-Tools optimizer to strip debug info from SPIR-V. This is implicitly done by +// SpirvToolsTransform if spvOptions->stripDebugInfo is set, but can be called separately if +// optimization is disabled. +void SpirvToolsStripDebugInfo(const glslang::TIntermediate& intermediate, + std::vector& spirv, spv::SpvBuildLogger*); + +#endif + +} // end namespace glslang + +#endif // GLSLANG_SPV_TOOLS_H diff --git a/src/third_party/glslang/SPIRV/bitutils.h b/src/third_party/glslang/SPIRV/bitutils.h new file mode 100644 index 0000000..22e44ce --- /dev/null +++ b/src/third_party/glslang/SPIRV/bitutils.h @@ -0,0 +1,81 @@ +// Copyright (c) 2015-2016 The Khronos Group Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIBSPIRV_UTIL_BITUTILS_H_ +#define LIBSPIRV_UTIL_BITUTILS_H_ + +#include +#include + +namespace spvutils { + +// Performs a bitwise copy of source to the destination type Dest. +template +Dest BitwiseCast(Src source) { + Dest dest; + static_assert(sizeof(source) == sizeof(dest), + "BitwiseCast: Source and destination must have the same size"); + std::memcpy(static_cast(&dest), &source, sizeof(dest)); + return dest; +} + +// SetBits returns an integer of type with bits set +// for position through , counting from the least +// significant bit. In particular when Num == 0, no positions are set to 1. +// A static assert will be triggered if First + Num > sizeof(T) * 8, that is, +// a bit that will not fit in the underlying type is set. +template +struct SetBits { + static_assert(First < sizeof(T) * 8, + "Tried to set a bit that is shifted too far."); + const static T get = (T(1) << First) | SetBits::get; +}; + +template +struct SetBits { + const static T get = T(0); +}; + +// This is all compile-time so we can put our tests right here. +static_assert(SetBits::get == uint32_t(0x00000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x00000001), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x80000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x00000006), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xc0000000), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0x7FFFFFFF), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xFFFFFFFF), + "SetBits failed"); +static_assert(SetBits::get == uint32_t(0xFFFF0000), + "SetBits failed"); + +static_assert(SetBits::get == uint64_t(0x0000000000000001LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x8000000000000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0xc000000000000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x0000000080000000LL), + "SetBits failed"); +static_assert(SetBits::get == uint64_t(0x00000000FFFF0000LL), + "SetBits failed"); + +} // namespace spvutils + +#endif // LIBSPIRV_UTIL_BITUTILS_H_ diff --git a/src/third_party/glslang/SPIRV/disassemble.cpp b/src/third_party/glslang/SPIRV/disassemble.cpp new file mode 100644 index 0000000..a95ded4 --- /dev/null +++ b/src/third_party/glslang/SPIRV/disassemble.cpp @@ -0,0 +1,747 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Disassembler for SPIR-V. +// + +#include +#include +#include +#include +#include +#include +#include + +#include "disassemble.h" +#include "doc.h" +#include "SpvTools.h" + +namespace spv { + extern "C" { + // Include C-based headers that don't have a namespace + #include "GLSL.std.450.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + } +} +const char* GlslStd450DebugNames[spv::GLSLstd450Count]; + +namespace spv { + +static const char* GLSLextAMDGetDebugNames(const char*, unsigned); +static const char* GLSLextNVGetDebugNames(const char*, unsigned); + +static void Kill(std::ostream& out, const char* message) +{ + out << std::endl << "Disassembly failed: " << message << std::endl; + exit(1); +} + +// used to identify the extended instruction library imported when printing +enum ExtInstSet { + GLSL450Inst, + GLSLextAMDInst, + GLSLextNVInst, + OpenCLExtInst, + NonSemanticDebugPrintfExtInst, +}; + +// Container class for a single instance of a SPIR-V stream, with methods for disassembly. +class SpirvStream { +public: + SpirvStream(std::ostream& out, const std::vector& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { } + virtual ~SpirvStream() { } + + void validate(); + void processInstructions(); + +protected: + SpirvStream(const SpirvStream&); + SpirvStream& operator=(const SpirvStream&); + Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; } + + // Output methods + void outputIndent(); + void formatId(Id id, std::stringstream&); + void outputResultId(Id id); + void outputTypeId(Id id); + void outputId(Id id); + void outputMask(OperandClass operandClass, unsigned mask); + void disassembleImmediates(int numOperands); + void disassembleIds(int numOperands); + int disassembleString(); + void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands); + + // Data + std::ostream& out; // where to write the disassembly + const std::vector& stream; // the actual word stream + int size; // the size of the word stream + int word; // the next word of the stream to read + + // map each to the instruction that created it + Id bound; + std::vector idInstruction; // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter) + + std::vector idDescriptor; // the best text string known for explaining the + + // schema + unsigned int schema; + + // stack of structured-merge points + std::stack nestedControl; + Id nextNestedControl; // need a slight delay for when we are nested +}; + +void SpirvStream::validate() +{ + size = (int)stream.size(); + if (size < 4) + Kill(out, "stream is too short"); + + // Magic number + if (stream[word++] != MagicNumber) { + out << "Bad magic number"; + return; + } + + // Version + out << "// Module Version " << std::hex << stream[word++] << std::endl; + + // Generator's magic number + out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl; + + // Result bound + bound = stream[word++]; + idInstruction.resize(bound); + idDescriptor.resize(bound); + out << "// Id's are bound by " << bound << std::endl; + out << std::endl; + + // Reserved schema, must be 0 for now + schema = stream[word++]; + if (schema != 0) + Kill(out, "bad schema, must be 0"); +} + +// Loop over all the instructions, in order, processing each. +// Boiler plate for each is handled here directly, the rest is dispatched. +void SpirvStream::processInstructions() +{ + // Instructions + while (word < size) { + int instructionStart = word; + + // Instruction wordCount and opcode + unsigned int firstWord = stream[word]; + unsigned wordCount = firstWord >> WordCountShift; + Op opCode = (Op)(firstWord & OpCodeMask); + int nextInst = word + wordCount; + ++word; + + // Presence of full instruction + if (nextInst > size) + Kill(out, "stream instruction terminated too early"); + + // Base for computing number of operands; will be updated as more is learned + unsigned numOperands = wordCount - 1; + + // Type + Id typeId = 0; + if (InstructionDesc[opCode].hasType()) { + typeId = stream[word++]; + --numOperands; + } + + // Result + Id resultId = 0; + if (InstructionDesc[opCode].hasResult()) { + resultId = stream[word++]; + --numOperands; + + // save instruction for future reference + idInstruction[resultId] = instructionStart; + } + + outputResultId(resultId); + outputTypeId(typeId); + outputIndent(); + + // Hand off the Op and all its operands + disassembleInstruction(resultId, typeId, opCode, numOperands); + if (word != nextInst) { + out << " ERROR, incorrect number of operands consumed. At " << word << " instead of " << nextInst << " instruction start was " << instructionStart; + word = nextInst; + } + out << std::endl; + } +} + +void SpirvStream::outputIndent() +{ + for (int i = 0; i < (int)nestedControl.size(); ++i) + out << " "; +} + +void SpirvStream::formatId(Id id, std::stringstream& idStream) +{ + if (id != 0) { + // On instructions with no IDs, this is called with "0", which does not + // have to be within ID bounds on null shaders. + if (id >= bound) + Kill(out, "Bad "); + + idStream << id; + if (idDescriptor[id].size() > 0) + idStream << "(" << idDescriptor[id] << ")"; + } +} + +void SpirvStream::outputResultId(Id id) +{ + const int width = 16; + std::stringstream idStream; + formatId(id, idStream); + out << std::setw(width) << std::right << idStream.str(); + if (id != 0) + out << ":"; + else + out << " "; + + if (nestedControl.size() && id == nestedControl.top()) + nestedControl.pop(); +} + +void SpirvStream::outputTypeId(Id id) +{ + const int width = 12; + std::stringstream idStream; + formatId(id, idStream); + out << std::setw(width) << std::right << idStream.str() << " "; +} + +void SpirvStream::outputId(Id id) +{ + if (id >= bound) + Kill(out, "Bad "); + + out << id; + if (idDescriptor[id].size() > 0) + out << "(" << idDescriptor[id] << ")"; +} + +void SpirvStream::outputMask(OperandClass operandClass, unsigned mask) +{ + if (mask == 0) + out << "None"; + else { + for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) { + if (mask & (1 << m)) + out << OperandClassParams[operandClass].getName(m) << " "; + } + } +} + +void SpirvStream::disassembleImmediates(int numOperands) +{ + for (int i = 0; i < numOperands; ++i) { + out << stream[word++]; + if (i < numOperands - 1) + out << " "; + } +} + +void SpirvStream::disassembleIds(int numOperands) +{ + for (int i = 0; i < numOperands; ++i) { + outputId(stream[word++]); + if (i < numOperands - 1) + out << " "; + } +} + +// return the number of operands consumed by the string +int SpirvStream::disassembleString() +{ + int startWord = word; + + out << " \""; + + const char* wordString; + bool done = false; + do { + unsigned int content = stream[word]; + wordString = (const char*)&content; + for (int charCount = 0; charCount < 4; ++charCount) { + if (*wordString == 0) { + done = true; + break; + } + out << *(wordString++); + } + ++word; + } while (! done); + + out << "\""; + + return word - startWord; +} + +void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands) +{ + // Process the opcode + + out << (OpcodeString(opCode) + 2); // leave out the "Op" + + if (opCode == OpLoopMerge || opCode == OpSelectionMerge) + nextNestedControl = stream[word]; + else if (opCode == OpBranchConditional || opCode == OpSwitch) { + if (nextNestedControl) { + nestedControl.push(nextNestedControl); + nextNestedControl = 0; + } + } else if (opCode == OpExtInstImport) { + idDescriptor[resultId] = (const char*)(&stream[word]); + } + else { + if (resultId != 0 && idDescriptor[resultId].size() == 0) { + switch (opCode) { + case OpTypeInt: + switch (stream[word]) { + case 8: idDescriptor[resultId] = "int8_t"; break; + case 16: idDescriptor[resultId] = "int16_t"; break; + default: assert(0); // fallthrough + case 32: idDescriptor[resultId] = "int"; break; + case 64: idDescriptor[resultId] = "int64_t"; break; + } + break; + case OpTypeFloat: + switch (stream[word]) { + case 16: idDescriptor[resultId] = "float16_t"; break; + default: assert(0); // fallthrough + case 32: idDescriptor[resultId] = "float"; break; + case 64: idDescriptor[resultId] = "float64_t"; break; + } + break; + case OpTypeBool: + idDescriptor[resultId] = "bool"; + break; + case OpTypeStruct: + idDescriptor[resultId] = "struct"; + break; + case OpTypePointer: + idDescriptor[resultId] = "ptr"; + break; + case OpTypeVector: + if (idDescriptor[stream[word]].size() > 0) { + idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1); + if (strstr(idDescriptor[stream[word]].c_str(), "8")) { + idDescriptor[resultId].append("8"); + } + if (strstr(idDescriptor[stream[word]].c_str(), "16")) { + idDescriptor[resultId].append("16"); + } + if (strstr(idDescriptor[stream[word]].c_str(), "64")) { + idDescriptor[resultId].append("64"); + } + } + idDescriptor[resultId].append("vec"); + switch (stream[word + 1]) { + case 2: idDescriptor[resultId].append("2"); break; + case 3: idDescriptor[resultId].append("3"); break; + case 4: idDescriptor[resultId].append("4"); break; + case 8: idDescriptor[resultId].append("8"); break; + case 16: idDescriptor[resultId].append("16"); break; + case 32: idDescriptor[resultId].append("32"); break; + default: break; + } + break; + default: + break; + } + } + } + + // Process the operands. Note, a new context-dependent set could be + // swapped in mid-traversal. + + // Handle images specially, so can put out helpful strings. + if (opCode == OpTypeImage) { + out << " "; + disassembleIds(1); + out << " " << DimensionString((Dim)stream[word++]); + out << (stream[word++] != 0 ? " depth" : ""); + out << (stream[word++] != 0 ? " array" : ""); + out << (stream[word++] != 0 ? " multi-sampled" : ""); + switch (stream[word++]) { + case 0: out << " runtime"; break; + case 1: out << " sampled"; break; + case 2: out << " nonsampled"; break; + } + out << " format:" << ImageFormatString((ImageFormat)stream[word++]); + + if (numOperands == 8) { + out << " " << AccessQualifierString(stream[word++]); + } + return; + } + + // Handle all the parameterized operands + for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) { + out << " "; + OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op); + switch (operandClass) { + case OperandId: + case OperandScope: + case OperandMemorySemantics: + disassembleIds(1); + --numOperands; + // Get names for printing "(XXX)" for readability, *after* this id + if (opCode == OpName) + idDescriptor[stream[word - 1]] = (const char*)(&stream[word]); + break; + case OperandVariableIds: + disassembleIds(numOperands); + return; + case OperandImageOperands: + outputMask(OperandImageOperands, stream[word++]); + --numOperands; + disassembleIds(numOperands); + return; + case OperandOptionalLiteral: + case OperandVariableLiterals: + if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) || + (opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) { + out << BuiltInString(stream[word++]); + --numOperands; + ++op; + } + disassembleImmediates(numOperands); + return; + case OperandVariableIdLiteral: + while (numOperands > 0) { + out << std::endl; + outputResultId(0); + outputTypeId(0); + outputIndent(); + out << " Type "; + disassembleIds(1); + out << ", member "; + disassembleImmediates(1); + numOperands -= 2; + } + return; + case OperandVariableLiteralId: + while (numOperands > 0) { + out << std::endl; + outputResultId(0); + outputTypeId(0); + outputIndent(); + out << " case "; + disassembleImmediates(1); + out << ": "; + disassembleIds(1); + numOperands -= 2; + } + return; + case OperandLiteralNumber: + disassembleImmediates(1); + --numOperands; + if (opCode == OpExtInst) { + ExtInstSet extInstSet = GLSL450Inst; + const char* name = idDescriptor[stream[word - 2]].c_str(); + if (strcmp("OpenCL.std", name) == 0) { + extInstSet = OpenCLExtInst; + } else if (strcmp("OpenCL.DebugInfo.100", name) == 0) { + extInstSet = OpenCLExtInst; + } else if (strcmp("NonSemantic.DebugPrintf", name) == 0) { + extInstSet = NonSemanticDebugPrintfExtInst; + } else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 || + strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 || + strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 || + strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) { + extInstSet = GLSLextAMDInst; + } else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 || + strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 || + strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 || + strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 || + strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 || + strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) { + extInstSet = GLSLextNVInst; + } + unsigned entrypoint = stream[word - 1]; + if (extInstSet == GLSL450Inst) { + if (entrypoint < GLSLstd450Count) { + out << "(" << GlslStd450DebugNames[entrypoint] << ")"; + } + } else if (extInstSet == GLSLextAMDInst) { + out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")"; + } + else if (extInstSet == GLSLextNVInst) { + out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")"; + } else if (extInstSet == NonSemanticDebugPrintfExtInst) { + out << "(DebugPrintf)"; + } + } + break; + case OperandOptionalLiteralString: + case OperandLiteralString: + numOperands -= disassembleString(); + break; + case OperandVariableLiteralStrings: + while (numOperands > 0) + numOperands -= disassembleString(); + return; + case OperandMemoryAccess: + outputMask(OperandMemoryAccess, stream[word++]); + --numOperands; + // Aligned is the only memory access operand that uses an immediate + // value, and it is also the first operand that uses a value at all. + if (stream[word-1] & MemoryAccessAlignedMask) { + disassembleImmediates(1); + numOperands--; + if (numOperands) + out << " "; + } + disassembleIds(numOperands); + return; + default: + assert(operandClass >= OperandSource && operandClass < OperandOpcode); + + if (OperandClassParams[operandClass].bitmask) + outputMask(operandClass, stream[word++]); + else + out << OperandClassParams[operandClass].getName(stream[word++]); + --numOperands; + + break; + } + } + + return; +} + +static void GLSLstd450GetDebugNames(const char** names) +{ + for (int i = 0; i < GLSLstd450Count; ++i) + names[i] = "Unknown"; + + names[GLSLstd450Round] = "Round"; + names[GLSLstd450RoundEven] = "RoundEven"; + names[GLSLstd450Trunc] = "Trunc"; + names[GLSLstd450FAbs] = "FAbs"; + names[GLSLstd450SAbs] = "SAbs"; + names[GLSLstd450FSign] = "FSign"; + names[GLSLstd450SSign] = "SSign"; + names[GLSLstd450Floor] = "Floor"; + names[GLSLstd450Ceil] = "Ceil"; + names[GLSLstd450Fract] = "Fract"; + names[GLSLstd450Radians] = "Radians"; + names[GLSLstd450Degrees] = "Degrees"; + names[GLSLstd450Sin] = "Sin"; + names[GLSLstd450Cos] = "Cos"; + names[GLSLstd450Tan] = "Tan"; + names[GLSLstd450Asin] = "Asin"; + names[GLSLstd450Acos] = "Acos"; + names[GLSLstd450Atan] = "Atan"; + names[GLSLstd450Sinh] = "Sinh"; + names[GLSLstd450Cosh] = "Cosh"; + names[GLSLstd450Tanh] = "Tanh"; + names[GLSLstd450Asinh] = "Asinh"; + names[GLSLstd450Acosh] = "Acosh"; + names[GLSLstd450Atanh] = "Atanh"; + names[GLSLstd450Atan2] = "Atan2"; + names[GLSLstd450Pow] = "Pow"; + names[GLSLstd450Exp] = "Exp"; + names[GLSLstd450Log] = "Log"; + names[GLSLstd450Exp2] = "Exp2"; + names[GLSLstd450Log2] = "Log2"; + names[GLSLstd450Sqrt] = "Sqrt"; + names[GLSLstd450InverseSqrt] = "InverseSqrt"; + names[GLSLstd450Determinant] = "Determinant"; + names[GLSLstd450MatrixInverse] = "MatrixInverse"; + names[GLSLstd450Modf] = "Modf"; + names[GLSLstd450ModfStruct] = "ModfStruct"; + names[GLSLstd450FMin] = "FMin"; + names[GLSLstd450SMin] = "SMin"; + names[GLSLstd450UMin] = "UMin"; + names[GLSLstd450FMax] = "FMax"; + names[GLSLstd450SMax] = "SMax"; + names[GLSLstd450UMax] = "UMax"; + names[GLSLstd450FClamp] = "FClamp"; + names[GLSLstd450SClamp] = "SClamp"; + names[GLSLstd450UClamp] = "UClamp"; + names[GLSLstd450FMix] = "FMix"; + names[GLSLstd450Step] = "Step"; + names[GLSLstd450SmoothStep] = "SmoothStep"; + names[GLSLstd450Fma] = "Fma"; + names[GLSLstd450Frexp] = "Frexp"; + names[GLSLstd450FrexpStruct] = "FrexpStruct"; + names[GLSLstd450Ldexp] = "Ldexp"; + names[GLSLstd450PackSnorm4x8] = "PackSnorm4x8"; + names[GLSLstd450PackUnorm4x8] = "PackUnorm4x8"; + names[GLSLstd450PackSnorm2x16] = "PackSnorm2x16"; + names[GLSLstd450PackUnorm2x16] = "PackUnorm2x16"; + names[GLSLstd450PackHalf2x16] = "PackHalf2x16"; + names[GLSLstd450PackDouble2x32] = "PackDouble2x32"; + names[GLSLstd450UnpackSnorm2x16] = "UnpackSnorm2x16"; + names[GLSLstd450UnpackUnorm2x16] = "UnpackUnorm2x16"; + names[GLSLstd450UnpackHalf2x16] = "UnpackHalf2x16"; + names[GLSLstd450UnpackSnorm4x8] = "UnpackSnorm4x8"; + names[GLSLstd450UnpackUnorm4x8] = "UnpackUnorm4x8"; + names[GLSLstd450UnpackDouble2x32] = "UnpackDouble2x32"; + names[GLSLstd450Length] = "Length"; + names[GLSLstd450Distance] = "Distance"; + names[GLSLstd450Cross] = "Cross"; + names[GLSLstd450Normalize] = "Normalize"; + names[GLSLstd450FaceForward] = "FaceForward"; + names[GLSLstd450Reflect] = "Reflect"; + names[GLSLstd450Refract] = "Refract"; + names[GLSLstd450FindILsb] = "FindILsb"; + names[GLSLstd450FindSMsb] = "FindSMsb"; + names[GLSLstd450FindUMsb] = "FindUMsb"; + names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid"; + names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample"; + names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset"; + names[GLSLstd450NMin] = "NMin"; + names[GLSLstd450NMax] = "NMax"; + names[GLSLstd450NClamp] = "NClamp"; +} + +static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint) +{ + if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) { + switch (entrypoint) { + case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD"; + case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD"; + case WriteInvocationAMD: return "WriteInvocationAMD"; + case MbcntAMD: return "MbcntAMD"; + default: return "Bad"; + } + } else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) { + switch (entrypoint) { + case FMin3AMD: return "FMin3AMD"; + case UMin3AMD: return "UMin3AMD"; + case SMin3AMD: return "SMin3AMD"; + case FMax3AMD: return "FMax3AMD"; + case UMax3AMD: return "UMax3AMD"; + case SMax3AMD: return "SMax3AMD"; + case FMid3AMD: return "FMid3AMD"; + case UMid3AMD: return "UMid3AMD"; + case SMid3AMD: return "SMid3AMD"; + default: return "Bad"; + } + } else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) { + switch (entrypoint) { + case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD"; + default: return "Bad"; + } + } + else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) { + switch (entrypoint) { + case CubeFaceIndexAMD: return "CubeFaceIndexAMD"; + case CubeFaceCoordAMD: return "CubeFaceCoordAMD"; + case TimeAMD: return "TimeAMD"; + default: + break; + } + } + + return "Bad"; +} + +static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint) +{ + if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 || + strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 || + strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 || + strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 || + strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 || + strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 || + strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 || + strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) { + switch (entrypoint) { + // NV builtins + case BuiltInViewportMaskNV: return "ViewportMaskNV"; + case BuiltInSecondaryPositionNV: return "SecondaryPositionNV"; + case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case BuiltInPositionPerViewNV: return "PositionPerViewNV"; + case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; + case BuiltInBaryCoordNV: return "BaryCoordNV"; + case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + case BuiltInTaskCountNV: return "TaskCountNV"; + case BuiltInPrimitiveCountNV: return "PrimitiveCountNV"; + case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV"; + case BuiltInLayerPerViewNV: return "LayerPerViewNV"; + case BuiltInMeshViewCountNV: return "MeshViewCountNV"; + case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV"; + + // NV Capabilities + case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV"; + case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV"; + case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV"; + case CapabilityPerViewAttributesNV: return "PerViewAttributesNV"; + case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV"; + case CapabilityMeshShadingNV: return "MeshShadingNV"; + case CapabilityImageFootprintNV: return "ImageFootprintNV"; + case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV"; + + // NV Decorations + case DecorationOverrideCoverageNV: return "OverrideCoverageNV"; + case DecorationPassthroughNV: return "PassthroughNV"; + case DecorationViewportRelativeNV: return "ViewportRelativeNV"; + case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV"; + case DecorationPerVertexNV: return "PerVertexNV"; + case DecorationPerPrimitiveNV: return "PerPrimitiveNV"; + case DecorationPerViewNV: return "PerViewNV"; + case DecorationPerTaskNV: return "PerTaskNV"; + + default: return "Bad"; + } + } + return "Bad"; +} + +void Disassemble(std::ostream& out, const std::vector& stream) +{ + SpirvStream SpirvStream(out, stream); + spv::Parameterize(); + GLSLstd450GetDebugNames(GlslStd450DebugNames); + SpirvStream.validate(); + SpirvStream.processInstructions(); +} + +}; // end namespace spv diff --git a/src/third_party/glslang/SPIRV/disassemble.h b/src/third_party/glslang/SPIRV/disassemble.h new file mode 100644 index 0000000..b6a4635 --- /dev/null +++ b/src/third_party/glslang/SPIRV/disassemble.h @@ -0,0 +1,53 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Disassembler for SPIR-V. +// + +#pragma once +#ifndef disassembler_H +#define disassembler_H + +#include +#include + +namespace spv { + + // disassemble with glslang custom disassembler + void Disassemble(std::ostream& out, const std::vector&); + +} // end namespace spv + +#endif // disassembler_H diff --git a/src/third_party/glslang/SPIRV/doc.cpp b/src/third_party/glslang/SPIRV/doc.cpp new file mode 100644 index 0000000..1d052f8 --- /dev/null +++ b/src/third_party/glslang/SPIRV/doc.cpp @@ -0,0 +1,2923 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// 1) Programmatically fill in instruction/operand information. +// This can be used for disassembly, printing documentation, etc. +// +// 2) Print documentation from this parameterization. +// + +#include "doc.h" + +#include +#include +#include + +namespace spv { + extern "C" { + // Include C-based headers that don't have a namespace + #include "GLSL.ext.KHR.h" + #include "GLSL.ext.EXT.h" + #include "GLSL.ext.AMD.h" + #include "GLSL.ext.NV.h" + } +} + +namespace spv { + +// +// Whole set of functions that translate enumerants to their text strings for +// the specification (or their sanitized versions for auto-generating the +// spirv headers. +// +// Also, for masks the ceilings are declared next to these, to help keep them in sync. +// Ceilings should be +// - one more than the maximum value an enumerant takes on, for non-mask enumerants +// (for non-sparse enums, this is the number of enumerants) +// - the number of bits consumed by the set of masks +// (for non-sparse mask enums, this is the number of enumerants) +// + +const char* SourceString(int source) +{ + switch (source) { + case 0: return "Unknown"; + case 1: return "ESSL"; + case 2: return "GLSL"; + case 3: return "OpenCL_C"; + case 4: return "OpenCL_CPP"; + case 5: return "HLSL"; + + default: return "Bad"; + } +} + +const char* ExecutionModelString(int model) +{ + switch (model) { + case 0: return "Vertex"; + case 1: return "TessellationControl"; + case 2: return "TessellationEvaluation"; + case 3: return "Geometry"; + case 4: return "Fragment"; + case 5: return "GLCompute"; + case 6: return "Kernel"; + case ExecutionModelTaskNV: return "TaskNV"; + case ExecutionModelMeshNV: return "MeshNV"; + + default: return "Bad"; + + case ExecutionModelRayGenerationKHR: return "RayGenerationKHR"; + case ExecutionModelIntersectionKHR: return "IntersectionKHR"; + case ExecutionModelAnyHitKHR: return "AnyHitKHR"; + case ExecutionModelClosestHitKHR: return "ClosestHitKHR"; + case ExecutionModelMissKHR: return "MissKHR"; + case ExecutionModelCallableKHR: return "CallableKHR"; + } +} + +const char* AddressingString(int addr) +{ + switch (addr) { + case 0: return "Logical"; + case 1: return "Physical32"; + case 2: return "Physical64"; + + case AddressingModelPhysicalStorageBuffer64EXT: return "PhysicalStorageBuffer64EXT"; + + default: return "Bad"; + } +} + +const char* MemoryString(int mem) +{ + switch (mem) { + case MemoryModelSimple: return "Simple"; + case MemoryModelGLSL450: return "GLSL450"; + case MemoryModelOpenCL: return "OpenCL"; + case MemoryModelVulkanKHR: return "VulkanKHR"; + + default: return "Bad"; + } +} + +const int ExecutionModeCeiling = 40; + +const char* ExecutionModeString(int mode) +{ + switch (mode) { + case 0: return "Invocations"; + case 1: return "SpacingEqual"; + case 2: return "SpacingFractionalEven"; + case 3: return "SpacingFractionalOdd"; + case 4: return "VertexOrderCw"; + case 5: return "VertexOrderCcw"; + case 6: return "PixelCenterInteger"; + case 7: return "OriginUpperLeft"; + case 8: return "OriginLowerLeft"; + case 9: return "EarlyFragmentTests"; + case 10: return "PointMode"; + case 11: return "Xfb"; + case 12: return "DepthReplacing"; + case 13: return "Bad"; + case 14: return "DepthGreater"; + case 15: return "DepthLess"; + case 16: return "DepthUnchanged"; + case 17: return "LocalSize"; + case 18: return "LocalSizeHint"; + case 19: return "InputPoints"; + case 20: return "InputLines"; + case 21: return "InputLinesAdjacency"; + case 22: return "Triangles"; + case 23: return "InputTrianglesAdjacency"; + case 24: return "Quads"; + case 25: return "Isolines"; + case 26: return "OutputVertices"; + case 27: return "OutputPoints"; + case 28: return "OutputLineStrip"; + case 29: return "OutputTriangleStrip"; + case 30: return "VecTypeHint"; + case 31: return "ContractionOff"; + case 32: return "Bad"; + + case ExecutionModeInitializer: return "Initializer"; + case ExecutionModeFinalizer: return "Finalizer"; + case ExecutionModeSubgroupSize: return "SubgroupSize"; + case ExecutionModeSubgroupsPerWorkgroup: return "SubgroupsPerWorkgroup"; + case ExecutionModeSubgroupsPerWorkgroupId: return "SubgroupsPerWorkgroupId"; + case ExecutionModeLocalSizeId: return "LocalSizeId"; + case ExecutionModeLocalSizeHintId: return "LocalSizeHintId"; + + case ExecutionModePostDepthCoverage: return "PostDepthCoverage"; + case ExecutionModeDenormPreserve: return "DenormPreserve"; + case ExecutionModeDenormFlushToZero: return "DenormFlushToZero"; + case ExecutionModeSignedZeroInfNanPreserve: return "SignedZeroInfNanPreserve"; + case ExecutionModeRoundingModeRTE: return "RoundingModeRTE"; + case ExecutionModeRoundingModeRTZ: return "RoundingModeRTZ"; + case ExecutionModeStencilRefReplacingEXT: return "StencilRefReplacingEXT"; + + case ExecutionModeOutputLinesNV: return "OutputLinesNV"; + case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV"; + case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV"; + case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV"; + case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV"; + + case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT"; + case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT"; + case ExecutionModeSampleInterlockOrderedEXT: return "SampleInterlockOrderedEXT"; + case ExecutionModeSampleInterlockUnorderedEXT: return "SampleInterlockUnorderedEXT"; + case ExecutionModeShadingRateInterlockOrderedEXT: return "ShadingRateInterlockOrderedEXT"; + case ExecutionModeShadingRateInterlockUnorderedEXT: return "ShadingRateInterlockUnorderedEXT"; + + case ExecutionModeMaxWorkgroupSizeINTEL: return "MaxWorkgroupSizeINTEL"; + case ExecutionModeMaxWorkDimINTEL: return "MaxWorkDimINTEL"; + case ExecutionModeNoGlobalOffsetINTEL: return "NoGlobalOffsetINTEL"; + case ExecutionModeNumSIMDWorkitemsINTEL: return "NumSIMDWorkitemsINTEL"; + + case ExecutionModeCeiling: + default: return "Bad"; + } +} + +const char* StorageClassString(int StorageClass) +{ + switch (StorageClass) { + case 0: return "UniformConstant"; + case 1: return "Input"; + case 2: return "Uniform"; + case 3: return "Output"; + case 4: return "Workgroup"; + case 5: return "CrossWorkgroup"; + case 6: return "Private"; + case 7: return "Function"; + case 8: return "Generic"; + case 9: return "PushConstant"; + case 10: return "AtomicCounter"; + case 11: return "Image"; + case 12: return "StorageBuffer"; + + case StorageClassRayPayloadKHR: return "RayPayloadKHR"; + case StorageClassHitAttributeKHR: return "HitAttributeKHR"; + case StorageClassIncomingRayPayloadKHR: return "IncomingRayPayloadKHR"; + case StorageClassShaderRecordBufferKHR: return "ShaderRecordBufferKHR"; + case StorageClassCallableDataKHR: return "CallableDataKHR"; + case StorageClassIncomingCallableDataKHR: return "IncomingCallableDataKHR"; + + case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT"; + + default: return "Bad"; + } +} + +const int DecorationCeiling = 45; + +const char* DecorationString(int decoration) +{ + switch (decoration) { + case 0: return "RelaxedPrecision"; + case 1: return "SpecId"; + case 2: return "Block"; + case 3: return "BufferBlock"; + case 4: return "RowMajor"; + case 5: return "ColMajor"; + case 6: return "ArrayStride"; + case 7: return "MatrixStride"; + case 8: return "GLSLShared"; + case 9: return "GLSLPacked"; + case 10: return "CPacked"; + case 11: return "BuiltIn"; + case 12: return "Bad"; + case 13: return "NoPerspective"; + case 14: return "Flat"; + case 15: return "Patch"; + case 16: return "Centroid"; + case 17: return "Sample"; + case 18: return "Invariant"; + case 19: return "Restrict"; + case 20: return "Aliased"; + case 21: return "Volatile"; + case 22: return "Constant"; + case 23: return "Coherent"; + case 24: return "NonWritable"; + case 25: return "NonReadable"; + case 26: return "Uniform"; + case 27: return "Bad"; + case 28: return "SaturatedConversion"; + case 29: return "Stream"; + case 30: return "Location"; + case 31: return "Component"; + case 32: return "Index"; + case 33: return "Binding"; + case 34: return "DescriptorSet"; + case 35: return "Offset"; + case 36: return "XfbBuffer"; + case 37: return "XfbStride"; + case 38: return "FuncParamAttr"; + case 39: return "FP Rounding Mode"; + case 40: return "FP Fast Math Mode"; + case 41: return "Linkage Attributes"; + case 42: return "NoContraction"; + case 43: return "InputAttachmentIndex"; + case 44: return "Alignment"; + + case DecorationCeiling: + default: return "Bad"; + + case DecorationExplicitInterpAMD: return "ExplicitInterpAMD"; + case DecorationOverrideCoverageNV: return "OverrideCoverageNV"; + case DecorationPassthroughNV: return "PassthroughNV"; + case DecorationViewportRelativeNV: return "ViewportRelativeNV"; + case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV"; + case DecorationPerPrimitiveNV: return "PerPrimitiveNV"; + case DecorationPerViewNV: return "PerViewNV"; + case DecorationPerTaskNV: return "PerTaskNV"; + case DecorationPerVertexNV: return "PerVertexNV"; + + case DecorationNonUniformEXT: return "DecorationNonUniformEXT"; + case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE"; + case DecorationHlslSemanticGOOGLE: return "DecorationHlslSemanticGOOGLE"; + case DecorationRestrictPointerEXT: return "DecorationRestrictPointerEXT"; + case DecorationAliasedPointerEXT: return "DecorationAliasedPointerEXT"; + } +} + +const char* BuiltInString(int builtIn) +{ + switch (builtIn) { + case 0: return "Position"; + case 1: return "PointSize"; + case 2: return "Bad"; + case 3: return "ClipDistance"; + case 4: return "CullDistance"; + case 5: return "VertexId"; + case 6: return "InstanceId"; + case 7: return "PrimitiveId"; + case 8: return "InvocationId"; + case 9: return "Layer"; + case 10: return "ViewportIndex"; + case 11: return "TessLevelOuter"; + case 12: return "TessLevelInner"; + case 13: return "TessCoord"; + case 14: return "PatchVertices"; + case 15: return "FragCoord"; + case 16: return "PointCoord"; + case 17: return "FrontFacing"; + case 18: return "SampleId"; + case 19: return "SamplePosition"; + case 20: return "SampleMask"; + case 21: return "Bad"; + case 22: return "FragDepth"; + case 23: return "HelperInvocation"; + case 24: return "NumWorkgroups"; + case 25: return "WorkgroupSize"; + case 26: return "WorkgroupId"; + case 27: return "LocalInvocationId"; + case 28: return "GlobalInvocationId"; + case 29: return "LocalInvocationIndex"; + case 30: return "WorkDim"; + case 31: return "GlobalSize"; + case 32: return "EnqueuedWorkgroupSize"; + case 33: return "GlobalOffset"; + case 34: return "GlobalLinearId"; + case 35: return "Bad"; + case 36: return "SubgroupSize"; + case 37: return "SubgroupMaxSize"; + case 38: return "NumSubgroups"; + case 39: return "NumEnqueuedSubgroups"; + case 40: return "SubgroupId"; + case 41: return "SubgroupLocalInvocationId"; + case 42: return "VertexIndex"; // TBD: put next to VertexId? + case 43: return "InstanceIndex"; // TBD: put next to InstanceId? + + case 4416: return "SubgroupEqMaskKHR"; + case 4417: return "SubgroupGeMaskKHR"; + case 4418: return "SubgroupGtMaskKHR"; + case 4419: return "SubgroupLeMaskKHR"; + case 4420: return "SubgroupLtMaskKHR"; + case 4438: return "DeviceIndex"; + case 4440: return "ViewIndex"; + case 4424: return "BaseVertex"; + case 4425: return "BaseInstance"; + case 4426: return "DrawIndex"; + case 5014: return "FragStencilRefEXT"; + + case 4992: return "BaryCoordNoPerspAMD"; + case 4993: return "BaryCoordNoPerspCentroidAMD"; + case 4994: return "BaryCoordNoPerspSampleAMD"; + case 4995: return "BaryCoordSmoothAMD"; + case 4996: return "BaryCoordSmoothCentroidAMD"; + case 4997: return "BaryCoordSmoothSampleAMD"; + case 4998: return "BaryCoordPullModelAMD"; + case BuiltInLaunchIdKHR: return "LaunchIdKHR"; + case BuiltInLaunchSizeKHR: return "LaunchSizeKHR"; + case BuiltInWorldRayOriginKHR: return "WorldRayOriginKHR"; + case BuiltInWorldRayDirectionKHR: return "WorldRayDirectionKHR"; + case BuiltInObjectRayOriginKHR: return "ObjectRayOriginKHR"; + case BuiltInObjectRayDirectionKHR: return "ObjectRayDirectionKHR"; + case BuiltInRayTminKHR: return "RayTminKHR"; + case BuiltInRayTmaxKHR: return "RayTmaxKHR"; + case BuiltInInstanceCustomIndexKHR: return "InstanceCustomIndexKHR"; + case BuiltInRayGeometryIndexKHR: return "RayGeometryIndexKHR"; + case BuiltInObjectToWorldKHR: return "ObjectToWorldKHR"; + case BuiltInWorldToObjectKHR: return "WorldToObjectKHR"; + case BuiltInHitTKHR: return "HitTKHR"; + case BuiltInHitKindKHR: return "HitKindKHR"; + case BuiltInIncomingRayFlagsKHR: return "IncomingRayFlagsKHR"; + case BuiltInViewportMaskNV: return "ViewportMaskNV"; + case BuiltInSecondaryPositionNV: return "SecondaryPositionNV"; + case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case BuiltInPositionPerViewNV: return "PositionPerViewNV"; + case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; +// case BuiltInFragmentSizeNV: return "FragmentSizeNV"; // superseded by BuiltInFragSizeEXT +// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT + case BuiltInBaryCoordNV: return "BaryCoordNV"; + case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + + case BuiltInFragSizeEXT: return "FragSizeEXT"; + case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT"; + + case 5264: return "FullyCoveredEXT"; + + case BuiltInTaskCountNV: return "TaskCountNV"; + case BuiltInPrimitiveCountNV: return "PrimitiveCountNV"; + case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case BuiltInClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case BuiltInCullDistancePerViewNV: return "CullDistancePerViewNV"; + case BuiltInLayerPerViewNV: return "LayerPerViewNV"; + case BuiltInMeshViewCountNV: return "MeshViewCountNV"; + case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV"; + case BuiltInWarpsPerSMNV: return "WarpsPerSMNV"; + case BuiltInSMCountNV: return "SMCountNV"; + case BuiltInWarpIDNV: return "WarpIDNV"; + case BuiltInSMIDNV: return "SMIDNV"; + + default: return "Bad"; + } +} + +const char* DimensionString(int dim) +{ + switch (dim) { + case 0: return "1D"; + case 1: return "2D"; + case 2: return "3D"; + case 3: return "Cube"; + case 4: return "Rect"; + case 5: return "Buffer"; + case 6: return "SubpassData"; + + default: return "Bad"; + } +} + +const char* SamplerAddressingModeString(int mode) +{ + switch (mode) { + case 0: return "None"; + case 1: return "ClampToEdge"; + case 2: return "Clamp"; + case 3: return "Repeat"; + case 4: return "RepeatMirrored"; + + default: return "Bad"; + } +} + +const char* SamplerFilterModeString(int mode) +{ + switch (mode) { + case 0: return "Nearest"; + case 1: return "Linear"; + + default: return "Bad"; + } +} + +const char* ImageFormatString(int format) +{ + switch (format) { + case 0: return "Unknown"; + + // ES/Desktop float + case 1: return "Rgba32f"; + case 2: return "Rgba16f"; + case 3: return "R32f"; + case 4: return "Rgba8"; + case 5: return "Rgba8Snorm"; + + // Desktop float + case 6: return "Rg32f"; + case 7: return "Rg16f"; + case 8: return "R11fG11fB10f"; + case 9: return "R16f"; + case 10: return "Rgba16"; + case 11: return "Rgb10A2"; + case 12: return "Rg16"; + case 13: return "Rg8"; + case 14: return "R16"; + case 15: return "R8"; + case 16: return "Rgba16Snorm"; + case 17: return "Rg16Snorm"; + case 18: return "Rg8Snorm"; + case 19: return "R16Snorm"; + case 20: return "R8Snorm"; + + // ES/Desktop int + case 21: return "Rgba32i"; + case 22: return "Rgba16i"; + case 23: return "Rgba8i"; + case 24: return "R32i"; + + // Desktop int + case 25: return "Rg32i"; + case 26: return "Rg16i"; + case 27: return "Rg8i"; + case 28: return "R16i"; + case 29: return "R8i"; + + // ES/Desktop uint + case 30: return "Rgba32ui"; + case 31: return "Rgba16ui"; + case 32: return "Rgba8ui"; + case 33: return "R32ui"; + + // Desktop uint + case 34: return "Rgb10a2ui"; + case 35: return "Rg32ui"; + case 36: return "Rg16ui"; + case 37: return "Rg8ui"; + case 38: return "R16ui"; + case 39: return "R8ui"; + + default: + return "Bad"; + } +} + +const char* ImageChannelOrderString(int format) +{ + switch (format) { + case 0: return "R"; + case 1: return "A"; + case 2: return "RG"; + case 3: return "RA"; + case 4: return "RGB"; + case 5: return "RGBA"; + case 6: return "BGRA"; + case 7: return "ARGB"; + case 8: return "Intensity"; + case 9: return "Luminance"; + case 10: return "Rx"; + case 11: return "RGx"; + case 12: return "RGBx"; + case 13: return "Depth"; + case 14: return "DepthStencil"; + case 15: return "sRGB"; + case 16: return "sRGBx"; + case 17: return "sRGBA"; + case 18: return "sBGRA"; + + default: + return "Bad"; + } +} + +const char* ImageChannelDataTypeString(int type) +{ + switch (type) + { + case 0: return "SnormInt8"; + case 1: return "SnormInt16"; + case 2: return "UnormInt8"; + case 3: return "UnormInt16"; + case 4: return "UnormShort565"; + case 5: return "UnormShort555"; + case 6: return "UnormInt101010"; + case 7: return "SignedInt8"; + case 8: return "SignedInt16"; + case 9: return "SignedInt32"; + case 10: return "UnsignedInt8"; + case 11: return "UnsignedInt16"; + case 12: return "UnsignedInt32"; + case 13: return "HalfFloat"; + case 14: return "Float"; + case 15: return "UnormInt24"; + case 16: return "UnormInt101010_2"; + + default: + return "Bad"; + } +} + +const int ImageOperandsCeiling = 14; + +const char* ImageOperandsString(int format) +{ + switch (format) { + case ImageOperandsBiasShift: return "Bias"; + case ImageOperandsLodShift: return "Lod"; + case ImageOperandsGradShift: return "Grad"; + case ImageOperandsConstOffsetShift: return "ConstOffset"; + case ImageOperandsOffsetShift: return "Offset"; + case ImageOperandsConstOffsetsShift: return "ConstOffsets"; + case ImageOperandsSampleShift: return "Sample"; + case ImageOperandsMinLodShift: return "MinLod"; + case ImageOperandsMakeTexelAvailableKHRShift: return "MakeTexelAvailableKHR"; + case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR"; + case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR"; + case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR"; + case ImageOperandsSignExtendShift: return "SignExtend"; + case ImageOperandsZeroExtendShift: return "ZeroExtend"; + + case ImageOperandsCeiling: + default: + return "Bad"; + } +} + +const char* FPFastMathString(int mode) +{ + switch (mode) { + case 0: return "NotNaN"; + case 1: return "NotInf"; + case 2: return "NSZ"; + case 3: return "AllowRecip"; + case 4: return "Fast"; + + default: return "Bad"; + } +} + +const char* FPRoundingModeString(int mode) +{ + switch (mode) { + case 0: return "RTE"; + case 1: return "RTZ"; + case 2: return "RTP"; + case 3: return "RTN"; + + default: return "Bad"; + } +} + +const char* LinkageTypeString(int type) +{ + switch (type) { + case 0: return "Export"; + case 1: return "Import"; + + default: return "Bad"; + } +} + +const char* FuncParamAttrString(int attr) +{ + switch (attr) { + case 0: return "Zext"; + case 1: return "Sext"; + case 2: return "ByVal"; + case 3: return "Sret"; + case 4: return "NoAlias"; + case 5: return "NoCapture"; + case 6: return "NoWrite"; + case 7: return "NoReadWrite"; + + default: return "Bad"; + } +} + +const char* AccessQualifierString(int attr) +{ + switch (attr) { + case 0: return "ReadOnly"; + case 1: return "WriteOnly"; + case 2: return "ReadWrite"; + + default: return "Bad"; + } +} + +const int SelectControlCeiling = 2; + +const char* SelectControlString(int cont) +{ + switch (cont) { + case 0: return "Flatten"; + case 1: return "DontFlatten"; + + case SelectControlCeiling: + default: return "Bad"; + } +} + +const int LoopControlCeiling = LoopControlPartialCountShift + 1; + +const char* LoopControlString(int cont) +{ + switch (cont) { + case LoopControlUnrollShift: return "Unroll"; + case LoopControlDontUnrollShift: return "DontUnroll"; + case LoopControlDependencyInfiniteShift: return "DependencyInfinite"; + case LoopControlDependencyLengthShift: return "DependencyLength"; + case LoopControlMinIterationsShift: return "MinIterations"; + case LoopControlMaxIterationsShift: return "MaxIterations"; + case LoopControlIterationMultipleShift: return "IterationMultiple"; + case LoopControlPeelCountShift: return "PeelCount"; + case LoopControlPartialCountShift: return "PartialCount"; + + case LoopControlCeiling: + default: return "Bad"; + } +} + +const int FunctionControlCeiling = 4; + +const char* FunctionControlString(int cont) +{ + switch (cont) { + case 0: return "Inline"; + case 1: return "DontInline"; + case 2: return "Pure"; + case 3: return "Const"; + + case FunctionControlCeiling: + default: return "Bad"; + } +} + +const char* MemorySemanticsString(int mem) +{ + // Note: No bits set (None) means "Relaxed" + switch (mem) { + case 0: return "Bad"; // Note: this is a placeholder for 'Consume' + case 1: return "Acquire"; + case 2: return "Release"; + case 3: return "AcquireRelease"; + case 4: return "SequentiallyConsistent"; + case 5: return "Bad"; // Note: reserved for future expansion + case 6: return "UniformMemory"; + case 7: return "SubgroupMemory"; + case 8: return "WorkgroupMemory"; + case 9: return "CrossWorkgroupMemory"; + case 10: return "AtomicCounterMemory"; + case 11: return "ImageMemory"; + + default: return "Bad"; + } +} + +const int MemoryAccessCeiling = 6; + +const char* MemoryAccessString(int mem) +{ + switch (mem) { + case MemoryAccessVolatileShift: return "Volatile"; + case MemoryAccessAlignedShift: return "Aligned"; + case MemoryAccessNontemporalShift: return "Nontemporal"; + case MemoryAccessMakePointerAvailableKHRShift: return "MakePointerAvailableKHR"; + case MemoryAccessMakePointerVisibleKHRShift: return "MakePointerVisibleKHR"; + case MemoryAccessNonPrivatePointerKHRShift: return "NonPrivatePointerKHR"; + + default: return "Bad"; + } +} + +const char* ScopeString(int mem) +{ + switch (mem) { + case 0: return "CrossDevice"; + case 1: return "Device"; + case 2: return "Workgroup"; + case 3: return "Subgroup"; + case 4: return "Invocation"; + + default: return "Bad"; + } +} + +const char* GroupOperationString(int gop) +{ + + switch (gop) + { + case GroupOperationReduce: return "Reduce"; + case GroupOperationInclusiveScan: return "InclusiveScan"; + case GroupOperationExclusiveScan: return "ExclusiveScan"; + case GroupOperationClusteredReduce: return "ClusteredReduce"; + case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV"; + case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV"; + case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV"; + + default: return "Bad"; + } +} + +const char* KernelEnqueueFlagsString(int flag) +{ + switch (flag) + { + case 0: return "NoWait"; + case 1: return "WaitKernel"; + case 2: return "WaitWorkGroup"; + + default: return "Bad"; + } +} + +const char* KernelProfilingInfoString(int info) +{ + switch (info) + { + case 0: return "CmdExecTime"; + + default: return "Bad"; + } +} + +const char* CapabilityString(int info) +{ + switch (info) + { + case 0: return "Matrix"; + case 1: return "Shader"; + case 2: return "Geometry"; + case 3: return "Tessellation"; + case 4: return "Addresses"; + case 5: return "Linkage"; + case 6: return "Kernel"; + case 7: return "Vector16"; + case 8: return "Float16Buffer"; + case 9: return "Float16"; + case 10: return "Float64"; + case 11: return "Int64"; + case 12: return "Int64Atomics"; + case 13: return "ImageBasic"; + case 14: return "ImageReadWrite"; + case 15: return "ImageMipmap"; + case 16: return "Bad"; + case 17: return "Pipes"; + case 18: return "Groups"; + case 19: return "DeviceEnqueue"; + case 20: return "LiteralSampler"; + case 21: return "AtomicStorage"; + case 22: return "Int16"; + case 23: return "TessellationPointSize"; + case 24: return "GeometryPointSize"; + case 25: return "ImageGatherExtended"; + case 26: return "Bad"; + case 27: return "StorageImageMultisample"; + case 28: return "UniformBufferArrayDynamicIndexing"; + case 29: return "SampledImageArrayDynamicIndexing"; + case 30: return "StorageBufferArrayDynamicIndexing"; + case 31: return "StorageImageArrayDynamicIndexing"; + case 32: return "ClipDistance"; + case 33: return "CullDistance"; + case 34: return "ImageCubeArray"; + case 35: return "SampleRateShading"; + case 36: return "ImageRect"; + case 37: return "SampledRect"; + case 38: return "GenericPointer"; + case 39: return "Int8"; + case 40: return "InputAttachment"; + case 41: return "SparseResidency"; + case 42: return "MinLod"; + case 43: return "Sampled1D"; + case 44: return "Image1D"; + case 45: return "SampledCubeArray"; + case 46: return "SampledBuffer"; + case 47: return "ImageBuffer"; + case 48: return "ImageMSArray"; + case 49: return "StorageImageExtendedFormats"; + case 50: return "ImageQuery"; + case 51: return "DerivativeControl"; + case 52: return "InterpolationFunction"; + case 53: return "TransformFeedback"; + case 54: return "GeometryStreams"; + case 55: return "StorageImageReadWithoutFormat"; + case 56: return "StorageImageWriteWithoutFormat"; + case 57: return "MultiViewport"; + case 61: return "GroupNonUniform"; + case 62: return "GroupNonUniformVote"; + case 63: return "GroupNonUniformArithmetic"; + case 64: return "GroupNonUniformBallot"; + case 65: return "GroupNonUniformShuffle"; + case 66: return "GroupNonUniformShuffleRelative"; + case 67: return "GroupNonUniformClustered"; + case 68: return "GroupNonUniformQuad"; + + case CapabilitySubgroupBallotKHR: return "SubgroupBallotKHR"; + case CapabilityDrawParameters: return "DrawParameters"; + case CapabilitySubgroupVoteKHR: return "SubgroupVoteKHR"; + + case CapabilityStorageUniformBufferBlock16: return "StorageUniformBufferBlock16"; + case CapabilityStorageUniform16: return "StorageUniform16"; + case CapabilityStoragePushConstant16: return "StoragePushConstant16"; + case CapabilityStorageInputOutput16: return "StorageInputOutput16"; + + case CapabilityStorageBuffer8BitAccess: return "StorageBuffer8BitAccess"; + case CapabilityUniformAndStorageBuffer8BitAccess: return "UniformAndStorageBuffer8BitAccess"; + case CapabilityStoragePushConstant8: return "StoragePushConstant8"; + + case CapabilityDeviceGroup: return "DeviceGroup"; + case CapabilityMultiView: return "MultiView"; + + case CapabilityStencilExportEXT: return "StencilExportEXT"; + + case CapabilityFloat16ImageAMD: return "Float16ImageAMD"; + case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD"; + case CapabilityFragmentMaskAMD: return "FragmentMaskAMD"; + case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD"; + + case CapabilityAtomicStorageOps: return "AtomicStorageOps"; + + case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage"; + case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV"; + case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV"; + case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV"; + case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV"; + case CapabilityPerViewAttributesNV: return "PerViewAttributesNV"; + case CapabilityGroupNonUniformPartitionedNV: return "GroupNonUniformPartitionedNV"; + case CapabilityRayTracingNV: return "RayTracingNV"; + case CapabilityRayTracingProvisionalKHR: return "RayTracingProvisionalKHR"; + case CapabilityRayQueryProvisionalKHR: return "RayQueryProvisionalKHR"; + case CapabilityRayTraversalPrimitiveCullingProvisionalKHR: return "RayTraversalPrimitiveCullingProvisionalKHR"; + case CapabilityComputeDerivativeGroupQuadsNV: return "ComputeDerivativeGroupQuadsNV"; + case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV"; + case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV"; + case CapabilityMeshShadingNV: return "MeshShadingNV"; + case CapabilityImageFootprintNV: return "ImageFootprintNV"; +// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT + case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV"; + case CapabilityFragmentDensityEXT: return "FragmentDensityEXT"; + + case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT"; + + case CapabilityShaderNonUniformEXT: return "ShaderNonUniformEXT"; + case CapabilityRuntimeDescriptorArrayEXT: return "RuntimeDescriptorArrayEXT"; + case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "InputAttachmentArrayDynamicIndexingEXT"; + case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "UniformTexelBufferArrayDynamicIndexingEXT"; + case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "StorageTexelBufferArrayDynamicIndexingEXT"; + case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "UniformBufferArrayNonUniformIndexingEXT"; + case CapabilitySampledImageArrayNonUniformIndexingEXT: return "SampledImageArrayNonUniformIndexingEXT"; + case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "StorageBufferArrayNonUniformIndexingEXT"; + case CapabilityStorageImageArrayNonUniformIndexingEXT: return "StorageImageArrayNonUniformIndexingEXT"; + case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "InputAttachmentArrayNonUniformIndexingEXT"; + case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "UniformTexelBufferArrayNonUniformIndexingEXT"; + case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "StorageTexelBufferArrayNonUniformIndexingEXT"; + + case CapabilityVulkanMemoryModelKHR: return "VulkanMemoryModelKHR"; + case CapabilityVulkanMemoryModelDeviceScopeKHR: return "VulkanMemoryModelDeviceScopeKHR"; + + case CapabilityPhysicalStorageBufferAddressesEXT: return "PhysicalStorageBufferAddressesEXT"; + + case CapabilityVariablePointers: return "VariablePointers"; + + case CapabilityCooperativeMatrixNV: return "CooperativeMatrixNV"; + case CapabilityShaderSMBuiltinsNV: return "ShaderSMBuiltinsNV"; + + case CapabilityFragmentShaderSampleInterlockEXT: return "CapabilityFragmentShaderSampleInterlockEXT"; + case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT"; + case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT"; + + case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT"; + case CapabilityShaderClockKHR: return "ShaderClockKHR"; + + case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL"; + + case CapabilityAtomicFloat32AddEXT: return "AtomicFloat32AddEXT"; + case CapabilityAtomicFloat64AddEXT: return "AtomicFloat64AddEXT"; + + default: return "Bad"; + } +} + +const char* OpcodeString(int op) +{ + switch (op) { + case 0: return "OpNop"; + case 1: return "OpUndef"; + case 2: return "OpSourceContinued"; + case 3: return "OpSource"; + case 4: return "OpSourceExtension"; + case 5: return "OpName"; + case 6: return "OpMemberName"; + case 7: return "OpString"; + case 8: return "OpLine"; + case 9: return "Bad"; + case 10: return "OpExtension"; + case 11: return "OpExtInstImport"; + case 12: return "OpExtInst"; + case 13: return "Bad"; + case 14: return "OpMemoryModel"; + case 15: return "OpEntryPoint"; + case 16: return "OpExecutionMode"; + case 17: return "OpCapability"; + case 18: return "Bad"; + case 19: return "OpTypeVoid"; + case 20: return "OpTypeBool"; + case 21: return "OpTypeInt"; + case 22: return "OpTypeFloat"; + case 23: return "OpTypeVector"; + case 24: return "OpTypeMatrix"; + case 25: return "OpTypeImage"; + case 26: return "OpTypeSampler"; + case 27: return "OpTypeSampledImage"; + case 28: return "OpTypeArray"; + case 29: return "OpTypeRuntimeArray"; + case 30: return "OpTypeStruct"; + case 31: return "OpTypeOpaque"; + case 32: return "OpTypePointer"; + case 33: return "OpTypeFunction"; + case 34: return "OpTypeEvent"; + case 35: return "OpTypeDeviceEvent"; + case 36: return "OpTypeReserveId"; + case 37: return "OpTypeQueue"; + case 38: return "OpTypePipe"; + case 39: return "OpTypeForwardPointer"; + case 40: return "Bad"; + case 41: return "OpConstantTrue"; + case 42: return "OpConstantFalse"; + case 43: return "OpConstant"; + case 44: return "OpConstantComposite"; + case 45: return "OpConstantSampler"; + case 46: return "OpConstantNull"; + case 47: return "Bad"; + case 48: return "OpSpecConstantTrue"; + case 49: return "OpSpecConstantFalse"; + case 50: return "OpSpecConstant"; + case 51: return "OpSpecConstantComposite"; + case 52: return "OpSpecConstantOp"; + case 53: return "Bad"; + case 54: return "OpFunction"; + case 55: return "OpFunctionParameter"; + case 56: return "OpFunctionEnd"; + case 57: return "OpFunctionCall"; + case 58: return "Bad"; + case 59: return "OpVariable"; + case 60: return "OpImageTexelPointer"; + case 61: return "OpLoad"; + case 62: return "OpStore"; + case 63: return "OpCopyMemory"; + case 64: return "OpCopyMemorySized"; + case 65: return "OpAccessChain"; + case 66: return "OpInBoundsAccessChain"; + case 67: return "OpPtrAccessChain"; + case 68: return "OpArrayLength"; + case 69: return "OpGenericPtrMemSemantics"; + case 70: return "OpInBoundsPtrAccessChain"; + case 71: return "OpDecorate"; + case 72: return "OpMemberDecorate"; + case 73: return "OpDecorationGroup"; + case 74: return "OpGroupDecorate"; + case 75: return "OpGroupMemberDecorate"; + case 76: return "Bad"; + case 77: return "OpVectorExtractDynamic"; + case 78: return "OpVectorInsertDynamic"; + case 79: return "OpVectorShuffle"; + case 80: return "OpCompositeConstruct"; + case 81: return "OpCompositeExtract"; + case 82: return "OpCompositeInsert"; + case 83: return "OpCopyObject"; + case 84: return "OpTranspose"; + case OpCopyLogical: return "OpCopyLogical"; + case 85: return "Bad"; + case 86: return "OpSampledImage"; + case 87: return "OpImageSampleImplicitLod"; + case 88: return "OpImageSampleExplicitLod"; + case 89: return "OpImageSampleDrefImplicitLod"; + case 90: return "OpImageSampleDrefExplicitLod"; + case 91: return "OpImageSampleProjImplicitLod"; + case 92: return "OpImageSampleProjExplicitLod"; + case 93: return "OpImageSampleProjDrefImplicitLod"; + case 94: return "OpImageSampleProjDrefExplicitLod"; + case 95: return "OpImageFetch"; + case 96: return "OpImageGather"; + case 97: return "OpImageDrefGather"; + case 98: return "OpImageRead"; + case 99: return "OpImageWrite"; + case 100: return "OpImage"; + case 101: return "OpImageQueryFormat"; + case 102: return "OpImageQueryOrder"; + case 103: return "OpImageQuerySizeLod"; + case 104: return "OpImageQuerySize"; + case 105: return "OpImageQueryLod"; + case 106: return "OpImageQueryLevels"; + case 107: return "OpImageQuerySamples"; + case 108: return "Bad"; + case 109: return "OpConvertFToU"; + case 110: return "OpConvertFToS"; + case 111: return "OpConvertSToF"; + case 112: return "OpConvertUToF"; + case 113: return "OpUConvert"; + case 114: return "OpSConvert"; + case 115: return "OpFConvert"; + case 116: return "OpQuantizeToF16"; + case 117: return "OpConvertPtrToU"; + case 118: return "OpSatConvertSToU"; + case 119: return "OpSatConvertUToS"; + case 120: return "OpConvertUToPtr"; + case 121: return "OpPtrCastToGeneric"; + case 122: return "OpGenericCastToPtr"; + case 123: return "OpGenericCastToPtrExplicit"; + case 124: return "OpBitcast"; + case 125: return "Bad"; + case 126: return "OpSNegate"; + case 127: return "OpFNegate"; + case 128: return "OpIAdd"; + case 129: return "OpFAdd"; + case 130: return "OpISub"; + case 131: return "OpFSub"; + case 132: return "OpIMul"; + case 133: return "OpFMul"; + case 134: return "OpUDiv"; + case 135: return "OpSDiv"; + case 136: return "OpFDiv"; + case 137: return "OpUMod"; + case 138: return "OpSRem"; + case 139: return "OpSMod"; + case 140: return "OpFRem"; + case 141: return "OpFMod"; + case 142: return "OpVectorTimesScalar"; + case 143: return "OpMatrixTimesScalar"; + case 144: return "OpVectorTimesMatrix"; + case 145: return "OpMatrixTimesVector"; + case 146: return "OpMatrixTimesMatrix"; + case 147: return "OpOuterProduct"; + case 148: return "OpDot"; + case 149: return "OpIAddCarry"; + case 150: return "OpISubBorrow"; + case 151: return "OpUMulExtended"; + case 152: return "OpSMulExtended"; + case 153: return "Bad"; + case 154: return "OpAny"; + case 155: return "OpAll"; + case 156: return "OpIsNan"; + case 157: return "OpIsInf"; + case 158: return "OpIsFinite"; + case 159: return "OpIsNormal"; + case 160: return "OpSignBitSet"; + case 161: return "OpLessOrGreater"; + case 162: return "OpOrdered"; + case 163: return "OpUnordered"; + case 164: return "OpLogicalEqual"; + case 165: return "OpLogicalNotEqual"; + case 166: return "OpLogicalOr"; + case 167: return "OpLogicalAnd"; + case 168: return "OpLogicalNot"; + case 169: return "OpSelect"; + case 170: return "OpIEqual"; + case 171: return "OpINotEqual"; + case 172: return "OpUGreaterThan"; + case 173: return "OpSGreaterThan"; + case 174: return "OpUGreaterThanEqual"; + case 175: return "OpSGreaterThanEqual"; + case 176: return "OpULessThan"; + case 177: return "OpSLessThan"; + case 178: return "OpULessThanEqual"; + case 179: return "OpSLessThanEqual"; + case 180: return "OpFOrdEqual"; + case 181: return "OpFUnordEqual"; + case 182: return "OpFOrdNotEqual"; + case 183: return "OpFUnordNotEqual"; + case 184: return "OpFOrdLessThan"; + case 185: return "OpFUnordLessThan"; + case 186: return "OpFOrdGreaterThan"; + case 187: return "OpFUnordGreaterThan"; + case 188: return "OpFOrdLessThanEqual"; + case 189: return "OpFUnordLessThanEqual"; + case 190: return "OpFOrdGreaterThanEqual"; + case 191: return "OpFUnordGreaterThanEqual"; + case 192: return "Bad"; + case 193: return "Bad"; + case 194: return "OpShiftRightLogical"; + case 195: return "OpShiftRightArithmetic"; + case 196: return "OpShiftLeftLogical"; + case 197: return "OpBitwiseOr"; + case 198: return "OpBitwiseXor"; + case 199: return "OpBitwiseAnd"; + case 200: return "OpNot"; + case 201: return "OpBitFieldInsert"; + case 202: return "OpBitFieldSExtract"; + case 203: return "OpBitFieldUExtract"; + case 204: return "OpBitReverse"; + case 205: return "OpBitCount"; + case 206: return "Bad"; + case 207: return "OpDPdx"; + case 208: return "OpDPdy"; + case 209: return "OpFwidth"; + case 210: return "OpDPdxFine"; + case 211: return "OpDPdyFine"; + case 212: return "OpFwidthFine"; + case 213: return "OpDPdxCoarse"; + case 214: return "OpDPdyCoarse"; + case 215: return "OpFwidthCoarse"; + case 216: return "Bad"; + case 217: return "Bad"; + case 218: return "OpEmitVertex"; + case 219: return "OpEndPrimitive"; + case 220: return "OpEmitStreamVertex"; + case 221: return "OpEndStreamPrimitive"; + case 222: return "Bad"; + case 223: return "Bad"; + case 224: return "OpControlBarrier"; + case 225: return "OpMemoryBarrier"; + case 226: return "Bad"; + case 227: return "OpAtomicLoad"; + case 228: return "OpAtomicStore"; + case 229: return "OpAtomicExchange"; + case 230: return "OpAtomicCompareExchange"; + case 231: return "OpAtomicCompareExchangeWeak"; + case 232: return "OpAtomicIIncrement"; + case 233: return "OpAtomicIDecrement"; + case 234: return "OpAtomicIAdd"; + case 235: return "OpAtomicISub"; + case 236: return "OpAtomicSMin"; + case 237: return "OpAtomicUMin"; + case 238: return "OpAtomicSMax"; + case 239: return "OpAtomicUMax"; + case 240: return "OpAtomicAnd"; + case 241: return "OpAtomicOr"; + case 242: return "OpAtomicXor"; + case 243: return "Bad"; + case 244: return "Bad"; + case 245: return "OpPhi"; + case 246: return "OpLoopMerge"; + case 247: return "OpSelectionMerge"; + case 248: return "OpLabel"; + case 249: return "OpBranch"; + case 250: return "OpBranchConditional"; + case 251: return "OpSwitch"; + case 252: return "OpKill"; + case 253: return "OpReturn"; + case 254: return "OpReturnValue"; + case 255: return "OpUnreachable"; + case 256: return "OpLifetimeStart"; + case 257: return "OpLifetimeStop"; + case 258: return "Bad"; + case 259: return "OpGroupAsyncCopy"; + case 260: return "OpGroupWaitEvents"; + case 261: return "OpGroupAll"; + case 262: return "OpGroupAny"; + case 263: return "OpGroupBroadcast"; + case 264: return "OpGroupIAdd"; + case 265: return "OpGroupFAdd"; + case 266: return "OpGroupFMin"; + case 267: return "OpGroupUMin"; + case 268: return "OpGroupSMin"; + case 269: return "OpGroupFMax"; + case 270: return "OpGroupUMax"; + case 271: return "OpGroupSMax"; + case 272: return "Bad"; + case 273: return "Bad"; + case 274: return "OpReadPipe"; + case 275: return "OpWritePipe"; + case 276: return "OpReservedReadPipe"; + case 277: return "OpReservedWritePipe"; + case 278: return "OpReserveReadPipePackets"; + case 279: return "OpReserveWritePipePackets"; + case 280: return "OpCommitReadPipe"; + case 281: return "OpCommitWritePipe"; + case 282: return "OpIsValidReserveId"; + case 283: return "OpGetNumPipePackets"; + case 284: return "OpGetMaxPipePackets"; + case 285: return "OpGroupReserveReadPipePackets"; + case 286: return "OpGroupReserveWritePipePackets"; + case 287: return "OpGroupCommitReadPipe"; + case 288: return "OpGroupCommitWritePipe"; + case 289: return "Bad"; + case 290: return "Bad"; + case 291: return "OpEnqueueMarker"; + case 292: return "OpEnqueueKernel"; + case 293: return "OpGetKernelNDrangeSubGroupCount"; + case 294: return "OpGetKernelNDrangeMaxSubGroupSize"; + case 295: return "OpGetKernelWorkGroupSize"; + case 296: return "OpGetKernelPreferredWorkGroupSizeMultiple"; + case 297: return "OpRetainEvent"; + case 298: return "OpReleaseEvent"; + case 299: return "OpCreateUserEvent"; + case 300: return "OpIsValidEvent"; + case 301: return "OpSetUserEventStatus"; + case 302: return "OpCaptureEventProfilingInfo"; + case 303: return "OpGetDefaultQueue"; + case 304: return "OpBuildNDRange"; + case 305: return "OpImageSparseSampleImplicitLod"; + case 306: return "OpImageSparseSampleExplicitLod"; + case 307: return "OpImageSparseSampleDrefImplicitLod"; + case 308: return "OpImageSparseSampleDrefExplicitLod"; + case 309: return "OpImageSparseSampleProjImplicitLod"; + case 310: return "OpImageSparseSampleProjExplicitLod"; + case 311: return "OpImageSparseSampleProjDrefImplicitLod"; + case 312: return "OpImageSparseSampleProjDrefExplicitLod"; + case 313: return "OpImageSparseFetch"; + case 314: return "OpImageSparseGather"; + case 315: return "OpImageSparseDrefGather"; + case 316: return "OpImageSparseTexelsResident"; + case 317: return "OpNoLine"; + case 318: return "OpAtomicFlagTestAndSet"; + case 319: return "OpAtomicFlagClear"; + case 320: return "OpImageSparseRead"; + + case OpModuleProcessed: return "OpModuleProcessed"; + case OpExecutionModeId: return "OpExecutionModeId"; + case OpDecorateId: return "OpDecorateId"; + + case 333: return "OpGroupNonUniformElect"; + case 334: return "OpGroupNonUniformAll"; + case 335: return "OpGroupNonUniformAny"; + case 336: return "OpGroupNonUniformAllEqual"; + case 337: return "OpGroupNonUniformBroadcast"; + case 338: return "OpGroupNonUniformBroadcastFirst"; + case 339: return "OpGroupNonUniformBallot"; + case 340: return "OpGroupNonUniformInverseBallot"; + case 341: return "OpGroupNonUniformBallotBitExtract"; + case 342: return "OpGroupNonUniformBallotBitCount"; + case 343: return "OpGroupNonUniformBallotFindLSB"; + case 344: return "OpGroupNonUniformBallotFindMSB"; + case 345: return "OpGroupNonUniformShuffle"; + case 346: return "OpGroupNonUniformShuffleXor"; + case 347: return "OpGroupNonUniformShuffleUp"; + case 348: return "OpGroupNonUniformShuffleDown"; + case 349: return "OpGroupNonUniformIAdd"; + case 350: return "OpGroupNonUniformFAdd"; + case 351: return "OpGroupNonUniformIMul"; + case 352: return "OpGroupNonUniformFMul"; + case 353: return "OpGroupNonUniformSMin"; + case 354: return "OpGroupNonUniformUMin"; + case 355: return "OpGroupNonUniformFMin"; + case 356: return "OpGroupNonUniformSMax"; + case 357: return "OpGroupNonUniformUMax"; + case 358: return "OpGroupNonUniformFMax"; + case 359: return "OpGroupNonUniformBitwiseAnd"; + case 360: return "OpGroupNonUniformBitwiseOr"; + case 361: return "OpGroupNonUniformBitwiseXor"; + case 362: return "OpGroupNonUniformLogicalAnd"; + case 363: return "OpGroupNonUniformLogicalOr"; + case 364: return "OpGroupNonUniformLogicalXor"; + case 365: return "OpGroupNonUniformQuadBroadcast"; + case 366: return "OpGroupNonUniformQuadSwap"; + + case 4421: return "OpSubgroupBallotKHR"; + case 4422: return "OpSubgroupFirstInvocationKHR"; + case 4428: return "OpSubgroupAllKHR"; + case 4429: return "OpSubgroupAnyKHR"; + case 4430: return "OpSubgroupAllEqualKHR"; + case 4432: return "OpSubgroupReadInvocationKHR"; + + case OpAtomicFAddEXT: return "OpAtomicFAddEXT"; + + case 5000: return "OpGroupIAddNonUniformAMD"; + case 5001: return "OpGroupFAddNonUniformAMD"; + case 5002: return "OpGroupFMinNonUniformAMD"; + case 5003: return "OpGroupUMinNonUniformAMD"; + case 5004: return "OpGroupSMinNonUniformAMD"; + case 5005: return "OpGroupFMaxNonUniformAMD"; + case 5006: return "OpGroupUMaxNonUniformAMD"; + case 5007: return "OpGroupSMaxNonUniformAMD"; + + case 5011: return "OpFragmentMaskFetchAMD"; + case 5012: return "OpFragmentFetchAMD"; + + case OpReadClockKHR: return "OpReadClockKHR"; + + case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE"; + case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE"; + + case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV"; + case OpReportIntersectionKHR: return "OpReportIntersectionKHR"; + case OpIgnoreIntersectionKHR: return "OpIgnoreIntersectionKHR"; + case OpTerminateRayKHR: return "OpTerminateRayKHR"; + case OpTraceRayKHR: return "OpTraceRayKHR"; + case OpTypeAccelerationStructureKHR: return "OpTypeAccelerationStructureKHR"; + case OpExecuteCallableKHR: return "OpExecuteCallableKHR"; + case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV"; + case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV"; + + case OpTypeRayQueryProvisionalKHR: return "OpTypeRayQueryProvisionalKHR"; + case OpRayQueryInitializeKHR: return "OpRayQueryInitializeKHR"; + case OpRayQueryTerminateKHR: return "OpRayQueryTerminateKHR"; + case OpRayQueryGenerateIntersectionKHR: return "OpRayQueryGenerateIntersectionKHR"; + case OpRayQueryConfirmIntersectionKHR: return "OpRayQueryConfirmIntersectionKHR"; + case OpRayQueryProceedKHR: return "OpRayQueryProceedKHR"; + case OpRayQueryGetIntersectionTypeKHR: return "OpRayQueryGetIntersectionTypeKHR"; + case OpRayQueryGetRayTMinKHR: return "OpRayQueryGetRayTMinKHR"; + case OpRayQueryGetRayFlagsKHR: return "OpRayQueryGetRayFlagsKHR"; + case OpRayQueryGetIntersectionTKHR: return "OpRayQueryGetIntersectionTKHR"; + case OpRayQueryGetIntersectionInstanceCustomIndexKHR: return "OpRayQueryGetIntersectionInstanceCustomIndexKHR"; + case OpRayQueryGetIntersectionInstanceIdKHR: return "OpRayQueryGetIntersectionInstanceIdKHR"; + case OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: return "OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR"; + case OpRayQueryGetIntersectionGeometryIndexKHR: return "OpRayQueryGetIntersectionGeometryIndexKHR"; + case OpRayQueryGetIntersectionPrimitiveIndexKHR: return "OpRayQueryGetIntersectionPrimitiveIndexKHR"; + case OpRayQueryGetIntersectionBarycentricsKHR: return "OpRayQueryGetIntersectionBarycentricsKHR"; + case OpRayQueryGetIntersectionFrontFaceKHR: return "OpRayQueryGetIntersectionFrontFaceKHR"; + case OpRayQueryGetIntersectionCandidateAABBOpaqueKHR: return "OpRayQueryGetIntersectionCandidateAABBOpaqueKHR"; + case OpRayQueryGetIntersectionObjectRayDirectionKHR: return "OpRayQueryGetIntersectionObjectRayDirectionKHR"; + case OpRayQueryGetIntersectionObjectRayOriginKHR: return "OpRayQueryGetIntersectionObjectRayOriginKHR"; + case OpRayQueryGetWorldRayDirectionKHR: return "OpRayQueryGetWorldRayDirectionKHR"; + case OpRayQueryGetWorldRayOriginKHR: return "OpRayQueryGetWorldRayOriginKHR"; + case OpRayQueryGetIntersectionObjectToWorldKHR: return "OpRayQueryGetIntersectionObjectToWorldKHR"; + case OpRayQueryGetIntersectionWorldToObjectKHR: return "OpRayQueryGetIntersectionWorldToObjectKHR"; + + case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV"; + case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV"; + case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV"; + case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV"; + case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV"; + case OpDemoteToHelperInvocationEXT: return "OpDemoteToHelperInvocationEXT"; + case OpIsHelperInvocationEXT: return "OpIsHelperInvocationEXT"; + + case OpBeginInvocationInterlockEXT: return "OpBeginInvocationInterlockEXT"; + case OpEndInvocationInterlockEXT: return "OpEndInvocationInterlockEXT"; + + default: + return "Bad"; + } +} + +// The set of objects that hold all the instruction/operand +// parameterization information. +InstructionParameters InstructionDesc[OpCodeMask + 1]; +OperandParameters ExecutionModeOperands[ExecutionModeCeiling]; +OperandParameters DecorationOperands[DecorationCeiling]; + +EnumDefinition OperandClassParams[OperandCount]; +EnumParameters ExecutionModeParams[ExecutionModeCeiling]; +EnumParameters ImageOperandsParams[ImageOperandsCeiling]; +EnumParameters DecorationParams[DecorationCeiling]; +EnumParameters LoopControlParams[FunctionControlCeiling]; +EnumParameters SelectionControlParams[SelectControlCeiling]; +EnumParameters FunctionControlParams[FunctionControlCeiling]; +EnumParameters MemoryAccessParams[MemoryAccessCeiling]; + +// Set up all the parameterizing descriptions of the opcodes, operands, etc. +void Parameterize() +{ + // only do this once. + static bool initialized = false; + if (initialized) + return; + initialized = true; + + // Exceptions to having a result and a resulting type . + // (Everything is initialized to have both). + + InstructionDesc[OpNop].setResultAndType(false, false); + InstructionDesc[OpSource].setResultAndType(false, false); + InstructionDesc[OpSourceContinued].setResultAndType(false, false); + InstructionDesc[OpSourceExtension].setResultAndType(false, false); + InstructionDesc[OpExtension].setResultAndType(false, false); + InstructionDesc[OpExtInstImport].setResultAndType(true, false); + InstructionDesc[OpCapability].setResultAndType(false, false); + InstructionDesc[OpMemoryModel].setResultAndType(false, false); + InstructionDesc[OpEntryPoint].setResultAndType(false, false); + InstructionDesc[OpExecutionMode].setResultAndType(false, false); + InstructionDesc[OpExecutionModeId].setResultAndType(false, false); + InstructionDesc[OpTypeVoid].setResultAndType(true, false); + InstructionDesc[OpTypeBool].setResultAndType(true, false); + InstructionDesc[OpTypeInt].setResultAndType(true, false); + InstructionDesc[OpTypeFloat].setResultAndType(true, false); + InstructionDesc[OpTypeVector].setResultAndType(true, false); + InstructionDesc[OpTypeMatrix].setResultAndType(true, false); + InstructionDesc[OpTypeImage].setResultAndType(true, false); + InstructionDesc[OpTypeSampler].setResultAndType(true, false); + InstructionDesc[OpTypeSampledImage].setResultAndType(true, false); + InstructionDesc[OpTypeArray].setResultAndType(true, false); + InstructionDesc[OpTypeRuntimeArray].setResultAndType(true, false); + InstructionDesc[OpTypeStruct].setResultAndType(true, false); + InstructionDesc[OpTypeOpaque].setResultAndType(true, false); + InstructionDesc[OpTypePointer].setResultAndType(true, false); + InstructionDesc[OpTypeForwardPointer].setResultAndType(false, false); + InstructionDesc[OpTypeFunction].setResultAndType(true, false); + InstructionDesc[OpTypeEvent].setResultAndType(true, false); + InstructionDesc[OpTypeDeviceEvent].setResultAndType(true, false); + InstructionDesc[OpTypeReserveId].setResultAndType(true, false); + InstructionDesc[OpTypeQueue].setResultAndType(true, false); + InstructionDesc[OpTypePipe].setResultAndType(true, false); + InstructionDesc[OpFunctionEnd].setResultAndType(false, false); + InstructionDesc[OpStore].setResultAndType(false, false); + InstructionDesc[OpImageWrite].setResultAndType(false, false); + InstructionDesc[OpDecorationGroup].setResultAndType(true, false); + InstructionDesc[OpDecorate].setResultAndType(false, false); + InstructionDesc[OpDecorateId].setResultAndType(false, false); + InstructionDesc[OpDecorateStringGOOGLE].setResultAndType(false, false); + InstructionDesc[OpMemberDecorate].setResultAndType(false, false); + InstructionDesc[OpMemberDecorateStringGOOGLE].setResultAndType(false, false); + InstructionDesc[OpGroupDecorate].setResultAndType(false, false); + InstructionDesc[OpGroupMemberDecorate].setResultAndType(false, false); + InstructionDesc[OpName].setResultAndType(false, false); + InstructionDesc[OpMemberName].setResultAndType(false, false); + InstructionDesc[OpString].setResultAndType(true, false); + InstructionDesc[OpLine].setResultAndType(false, false); + InstructionDesc[OpNoLine].setResultAndType(false, false); + InstructionDesc[OpCopyMemory].setResultAndType(false, false); + InstructionDesc[OpCopyMemorySized].setResultAndType(false, false); + InstructionDesc[OpEmitVertex].setResultAndType(false, false); + InstructionDesc[OpEndPrimitive].setResultAndType(false, false); + InstructionDesc[OpEmitStreamVertex].setResultAndType(false, false); + InstructionDesc[OpEndStreamPrimitive].setResultAndType(false, false); + InstructionDesc[OpControlBarrier].setResultAndType(false, false); + InstructionDesc[OpMemoryBarrier].setResultAndType(false, false); + InstructionDesc[OpAtomicStore].setResultAndType(false, false); + InstructionDesc[OpLoopMerge].setResultAndType(false, false); + InstructionDesc[OpSelectionMerge].setResultAndType(false, false); + InstructionDesc[OpLabel].setResultAndType(true, false); + InstructionDesc[OpBranch].setResultAndType(false, false); + InstructionDesc[OpBranchConditional].setResultAndType(false, false); + InstructionDesc[OpSwitch].setResultAndType(false, false); + InstructionDesc[OpKill].setResultAndType(false, false); + InstructionDesc[OpReturn].setResultAndType(false, false); + InstructionDesc[OpReturnValue].setResultAndType(false, false); + InstructionDesc[OpUnreachable].setResultAndType(false, false); + InstructionDesc[OpLifetimeStart].setResultAndType(false, false); + InstructionDesc[OpLifetimeStop].setResultAndType(false, false); + InstructionDesc[OpCommitReadPipe].setResultAndType(false, false); + InstructionDesc[OpCommitWritePipe].setResultAndType(false, false); + InstructionDesc[OpGroupCommitWritePipe].setResultAndType(false, false); + InstructionDesc[OpGroupCommitReadPipe].setResultAndType(false, false); + InstructionDesc[OpCaptureEventProfilingInfo].setResultAndType(false, false); + InstructionDesc[OpSetUserEventStatus].setResultAndType(false, false); + InstructionDesc[OpRetainEvent].setResultAndType(false, false); + InstructionDesc[OpReleaseEvent].setResultAndType(false, false); + InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false); + InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false); + InstructionDesc[OpModuleProcessed].setResultAndType(false, false); + InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false); + InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false); + InstructionDesc[OpBeginInvocationInterlockEXT].setResultAndType(false, false); + InstructionDesc[OpEndInvocationInterlockEXT].setResultAndType(false, false); + + // Specific additional context-dependent operands + + ExecutionModeOperands[ExecutionModeInvocations].push(OperandLiteralNumber, "'Number of <>'"); + + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'x size'"); + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'y size'"); + ExecutionModeOperands[ExecutionModeLocalSize].push(OperandLiteralNumber, "'z size'"); + + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'x size'"); + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'y size'"); + ExecutionModeOperands[ExecutionModeLocalSizeHint].push(OperandLiteralNumber, "'z size'"); + + ExecutionModeOperands[ExecutionModeOutputVertices].push(OperandLiteralNumber, "'Vertex count'"); + ExecutionModeOperands[ExecutionModeVecTypeHint].push(OperandLiteralNumber, "'Vector type'"); + + DecorationOperands[DecorationStream].push(OperandLiteralNumber, "'Stream Number'"); + DecorationOperands[DecorationLocation].push(OperandLiteralNumber, "'Location'"); + DecorationOperands[DecorationComponent].push(OperandLiteralNumber, "'Component'"); + DecorationOperands[DecorationIndex].push(OperandLiteralNumber, "'Index'"); + DecorationOperands[DecorationBinding].push(OperandLiteralNumber, "'Binding Point'"); + DecorationOperands[DecorationDescriptorSet].push(OperandLiteralNumber, "'Descriptor Set'"); + DecorationOperands[DecorationOffset].push(OperandLiteralNumber, "'Byte Offset'"); + DecorationOperands[DecorationXfbBuffer].push(OperandLiteralNumber, "'XFB Buffer Number'"); + DecorationOperands[DecorationXfbStride].push(OperandLiteralNumber, "'XFB Stride'"); + DecorationOperands[DecorationArrayStride].push(OperandLiteralNumber, "'Array Stride'"); + DecorationOperands[DecorationMatrixStride].push(OperandLiteralNumber, "'Matrix Stride'"); + DecorationOperands[DecorationBuiltIn].push(OperandLiteralNumber, "See <>"); + DecorationOperands[DecorationFPRoundingMode].push(OperandFPRoundingMode, "'Floating-Point Rounding Mode'"); + DecorationOperands[DecorationFPFastMathMode].push(OperandFPFastMath, "'Fast-Math Mode'"); + DecorationOperands[DecorationLinkageAttributes].push(OperandLiteralString, "'Name'"); + DecorationOperands[DecorationLinkageAttributes].push(OperandLinkageType, "'Linkage Type'"); + DecorationOperands[DecorationFuncParamAttr].push(OperandFuncParamAttr, "'Function Parameter Attribute'"); + DecorationOperands[DecorationSpecId].push(OperandLiteralNumber, "'Specialization Constant ID'"); + DecorationOperands[DecorationInputAttachmentIndex].push(OperandLiteralNumber, "'Attachment Index'"); + DecorationOperands[DecorationAlignment].push(OperandLiteralNumber, "'Alignment'"); + + OperandClassParams[OperandSource].set(0, SourceString, 0); + OperandClassParams[OperandExecutionModel].set(0, ExecutionModelString, nullptr); + OperandClassParams[OperandAddressing].set(0, AddressingString, nullptr); + OperandClassParams[OperandMemory].set(0, MemoryString, nullptr); + OperandClassParams[OperandExecutionMode].set(ExecutionModeCeiling, ExecutionModeString, ExecutionModeParams); + OperandClassParams[OperandExecutionMode].setOperands(ExecutionModeOperands); + OperandClassParams[OperandStorage].set(0, StorageClassString, nullptr); + OperandClassParams[OperandDimensionality].set(0, DimensionString, nullptr); + OperandClassParams[OperandSamplerAddressingMode].set(0, SamplerAddressingModeString, nullptr); + OperandClassParams[OperandSamplerFilterMode].set(0, SamplerFilterModeString, nullptr); + OperandClassParams[OperandSamplerImageFormat].set(0, ImageFormatString, nullptr); + OperandClassParams[OperandImageChannelOrder].set(0, ImageChannelOrderString, nullptr); + OperandClassParams[OperandImageChannelDataType].set(0, ImageChannelDataTypeString, nullptr); + OperandClassParams[OperandImageOperands].set(ImageOperandsCeiling, ImageOperandsString, ImageOperandsParams, true); + OperandClassParams[OperandFPFastMath].set(0, FPFastMathString, nullptr, true); + OperandClassParams[OperandFPRoundingMode].set(0, FPRoundingModeString, nullptr); + OperandClassParams[OperandLinkageType].set(0, LinkageTypeString, nullptr); + OperandClassParams[OperandFuncParamAttr].set(0, FuncParamAttrString, nullptr); + OperandClassParams[OperandAccessQualifier].set(0, AccessQualifierString, nullptr); + OperandClassParams[OperandDecoration].set(DecorationCeiling, DecorationString, DecorationParams); + OperandClassParams[OperandDecoration].setOperands(DecorationOperands); + OperandClassParams[OperandBuiltIn].set(0, BuiltInString, nullptr); + OperandClassParams[OperandSelect].set(SelectControlCeiling, SelectControlString, SelectionControlParams, true); + OperandClassParams[OperandLoop].set(LoopControlCeiling, LoopControlString, LoopControlParams, true); + OperandClassParams[OperandFunction].set(FunctionControlCeiling, FunctionControlString, FunctionControlParams, true); + OperandClassParams[OperandMemorySemantics].set(0, MemorySemanticsString, nullptr, true); + OperandClassParams[OperandMemoryAccess].set(MemoryAccessCeiling, MemoryAccessString, MemoryAccessParams, true); + OperandClassParams[OperandScope].set(0, ScopeString, nullptr); + OperandClassParams[OperandGroupOperation].set(0, GroupOperationString, nullptr); + OperandClassParams[OperandKernelEnqueueFlags].set(0, KernelEnqueueFlagsString, nullptr); + OperandClassParams[OperandKernelProfilingInfo].set(0, KernelProfilingInfoString, nullptr, true); + OperandClassParams[OperandCapability].set(0, CapabilityString, nullptr); + OperandClassParams[OperandOpcode].set(OpCodeMask + 1, OpcodeString, 0); + + // set name of operator, an initial set of style operands, and the description + + InstructionDesc[OpSource].operands.push(OperandSource, ""); + InstructionDesc[OpSource].operands.push(OperandLiteralNumber, "'Version'"); + InstructionDesc[OpSource].operands.push(OperandId, "'File'", true); + InstructionDesc[OpSource].operands.push(OperandLiteralString, "'Source'", true); + + InstructionDesc[OpSourceContinued].operands.push(OperandLiteralString, "'Continued Source'"); + + InstructionDesc[OpSourceExtension].operands.push(OperandLiteralString, "'Extension'"); + + InstructionDesc[OpName].operands.push(OperandId, "'Target'"); + InstructionDesc[OpName].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpMemberName].operands.push(OperandId, "'Type'"); + InstructionDesc[OpMemberName].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberName].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpString].operands.push(OperandLiteralString, "'String'"); + + InstructionDesc[OpLine].operands.push(OperandId, "'File'"); + InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Line'"); + InstructionDesc[OpLine].operands.push(OperandLiteralNumber, "'Column'"); + + InstructionDesc[OpExtension].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpExtInstImport].operands.push(OperandLiteralString, "'Name'"); + + InstructionDesc[OpCapability].operands.push(OperandCapability, "'Capability'"); + + InstructionDesc[OpMemoryModel].operands.push(OperandAddressing, ""); + InstructionDesc[OpMemoryModel].operands.push(OperandMemory, ""); + + InstructionDesc[OpEntryPoint].operands.push(OperandExecutionModel, ""); + InstructionDesc[OpEntryPoint].operands.push(OperandId, "'Entry Point'"); + InstructionDesc[OpEntryPoint].operands.push(OperandLiteralString, "'Name'"); + InstructionDesc[OpEntryPoint].operands.push(OperandVariableIds, "'Interface'"); + + InstructionDesc[OpExecutionMode].operands.push(OperandId, "'Entry Point'"); + InstructionDesc[OpExecutionMode].operands.push(OperandExecutionMode, "'Mode'"); + InstructionDesc[OpExecutionMode].operands.push(OperandOptionalLiteral, "See <>"); + + InstructionDesc[OpExecutionModeId].operands.push(OperandId, "'Entry Point'"); + InstructionDesc[OpExecutionModeId].operands.push(OperandExecutionMode, "'Mode'"); + InstructionDesc[OpExecutionModeId].operands.push(OperandVariableIds, "See <>"); + + InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Width'"); + InstructionDesc[OpTypeInt].operands.push(OperandLiteralNumber, "'Signedness'"); + + InstructionDesc[OpTypeFloat].operands.push(OperandLiteralNumber, "'Width'"); + + InstructionDesc[OpTypeVector].operands.push(OperandId, "'Component Type'"); + InstructionDesc[OpTypeVector].operands.push(OperandLiteralNumber, "'Component Count'"); + + InstructionDesc[OpTypeMatrix].operands.push(OperandId, "'Column Type'"); + InstructionDesc[OpTypeMatrix].operands.push(OperandLiteralNumber, "'Column Count'"); + + InstructionDesc[OpTypeImage].operands.push(OperandId, "'Sampled Type'"); + InstructionDesc[OpTypeImage].operands.push(OperandDimensionality, ""); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Depth'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Arrayed'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'MS'"); + InstructionDesc[OpTypeImage].operands.push(OperandLiteralNumber, "'Sampled'"); + InstructionDesc[OpTypeImage].operands.push(OperandSamplerImageFormat, ""); + InstructionDesc[OpTypeImage].operands.push(OperandAccessQualifier, "", true); + + InstructionDesc[OpTypeSampledImage].operands.push(OperandId, "'Image Type'"); + + InstructionDesc[OpTypeArray].operands.push(OperandId, "'Element Type'"); + InstructionDesc[OpTypeArray].operands.push(OperandId, "'Length'"); + + InstructionDesc[OpTypeRuntimeArray].operands.push(OperandId, "'Element Type'"); + + InstructionDesc[OpTypeStruct].operands.push(OperandVariableIds, "'Member 0 type', +\n'member 1 type', +\n..."); + + InstructionDesc[OpTypeOpaque].operands.push(OperandLiteralString, "The name of the opaque type."); + + InstructionDesc[OpTypePointer].operands.push(OperandStorage, ""); + InstructionDesc[OpTypePointer].operands.push(OperandId, "'Type'"); + + InstructionDesc[OpTypeForwardPointer].operands.push(OperandId, "'Pointer Type'"); + InstructionDesc[OpTypeForwardPointer].operands.push(OperandStorage, ""); + + InstructionDesc[OpTypePipe].operands.push(OperandAccessQualifier, "'Qualifier'"); + + InstructionDesc[OpTypeFunction].operands.push(OperandId, "'Return Type'"); + InstructionDesc[OpTypeFunction].operands.push(OperandVariableIds, "'Parameter 0 Type', +\n'Parameter 1 Type', +\n..."); + + InstructionDesc[OpConstant].operands.push(OperandVariableLiterals, "'Value'"); + + InstructionDesc[OpConstantComposite].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpConstantSampler].operands.push(OperandSamplerAddressingMode, ""); + InstructionDesc[OpConstantSampler].operands.push(OperandLiteralNumber, "'Param'"); + InstructionDesc[OpConstantSampler].operands.push(OperandSamplerFilterMode, ""); + + InstructionDesc[OpSpecConstant].operands.push(OperandVariableLiterals, "'Value'"); + + InstructionDesc[OpSpecConstantComposite].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpSpecConstantOp].operands.push(OperandLiteralNumber, "'Opcode'"); + InstructionDesc[OpSpecConstantOp].operands.push(OperandVariableIds, "'Operands'"); + + InstructionDesc[OpVariable].operands.push(OperandStorage, ""); + InstructionDesc[OpVariable].operands.push(OperandId, "'Initializer'", true); + + InstructionDesc[OpFunction].operands.push(OperandFunction, ""); + InstructionDesc[OpFunction].operands.push(OperandId, "'Function Type'"); + + InstructionDesc[OpFunctionCall].operands.push(OperandId, "'Function'"); + InstructionDesc[OpFunctionCall].operands.push(OperandVariableIds, "'Argument 0', +\n'Argument 1', +\n..."); + + InstructionDesc[OpExtInst].operands.push(OperandId, "'Set'"); + InstructionDesc[OpExtInst].operands.push(OperandLiteralNumber, "'Instruction'"); + InstructionDesc[OpExtInst].operands.push(OperandVariableIds, "'Operand 1', +\n'Operand 2', +\n..."); + + InstructionDesc[OpLoad].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLoad].operands.push(OperandMemoryAccess, "", true); + InstructionDesc[OpLoad].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpLoad].operands.push(OperandId, "", true); + + InstructionDesc[OpStore].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpStore].operands.push(OperandId, "'Object'"); + InstructionDesc[OpStore].operands.push(OperandMemoryAccess, "", true); + InstructionDesc[OpStore].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpStore].operands.push(OperandId, "", true); + + InstructionDesc[OpPhi].operands.push(OperandVariableIds, "'Variable, Parent, ...'"); + + InstructionDesc[OpDecorate].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorate].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorate].operands.push(OperandVariableLiterals, "See <>."); + + InstructionDesc[OpDecorateId].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorateId].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorateId].operands.push(OperandVariableIds, "See <>."); + + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandId, "'Target'"); + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandDecoration, ""); + InstructionDesc[OpDecorateStringGOOGLE].operands.push(OperandVariableLiteralStrings, "'Literal Strings'"); + + InstructionDesc[OpMemberDecorate].operands.push(OperandId, "'Structure Type'"); + InstructionDesc[OpMemberDecorate].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberDecorate].operands.push(OperandDecoration, ""); + InstructionDesc[OpMemberDecorate].operands.push(OperandVariableLiterals, "See <>."); + + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandId, "'Structure Type'"); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandLiteralNumber, "'Member'"); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandDecoration, ""); + InstructionDesc[OpMemberDecorateStringGOOGLE].operands.push(OperandVariableLiteralStrings, "'Literal Strings'"); + + InstructionDesc[OpGroupDecorate].operands.push(OperandId, "'Decoration Group'"); + InstructionDesc[OpGroupDecorate].operands.push(OperandVariableIds, "'Targets'"); + + InstructionDesc[OpGroupMemberDecorate].operands.push(OperandId, "'Decoration Group'"); + InstructionDesc[OpGroupMemberDecorate].operands.push(OperandVariableIdLiteral, "'Targets'"); + + InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorExtractDynamic].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Component'"); + InstructionDesc[OpVectorInsertDynamic].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpVectorShuffle].operands.push(OperandId, "'Vector 2'"); + InstructionDesc[OpVectorShuffle].operands.push(OperandVariableLiterals, "'Components'"); + + InstructionDesc[OpCompositeConstruct].operands.push(OperandVariableIds, "'Constituents'"); + + InstructionDesc[OpCompositeExtract].operands.push(OperandId, "'Composite'"); + InstructionDesc[OpCompositeExtract].operands.push(OperandVariableLiterals, "'Indexes'"); + + InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Object'"); + InstructionDesc[OpCompositeInsert].operands.push(OperandId, "'Composite'"); + InstructionDesc[OpCompositeInsert].operands.push(OperandVariableLiterals, "'Indexes'"); + + InstructionDesc[OpCopyObject].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Target'"); + InstructionDesc[OpCopyMemory].operands.push(OperandId, "'Source'"); + InstructionDesc[OpCopyMemory].operands.push(OperandMemoryAccess, "", true); + + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Target'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Source'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandId, "'Size'"); + InstructionDesc[OpCopyMemorySized].operands.push(OperandMemoryAccess, "", true); + + InstructionDesc[OpSampledImage].operands.push(OperandId, "'Image'"); + InstructionDesc[OpSampledImage].operands.push(OperandId, "'Sampler'"); + + InstructionDesc[OpImage].operands.push(OperandId, "'Sampled Image'"); + + InstructionDesc[OpImageRead].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageRead].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageRead].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageRead].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageWrite].operands.push(OperandId, "'Texel'"); + InstructionDesc[OpImageWrite].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageWrite].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageFetch].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageFetch].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageFetch].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageFetch].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageGather].operands.push(OperandId, "'Component'"); + InstructionDesc[OpImageGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageDrefGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageDrefGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjDrefImplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseSampleProjDrefExplicitLod].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageSparseFetch].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseFetch].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseFetch].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandId, "'Component'"); + InstructionDesc[OpImageSparseGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandId, "'D~ref~'"); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseDrefGather].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageSparseRead].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSparseRead].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSparseRead].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpImageSparseTexelsResident].operands.push(OperandId, "'Resident Code'"); + + InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageQuerySizeLod].operands.push(OperandId, "'Level of Detail'"); + + InstructionDesc[OpImageQuerySize].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageQueryLod].operands.push(OperandId, "'Coordinate'"); + + InstructionDesc[OpImageQueryLevels].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQuerySamples].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryFormat].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpImageQueryOrder].operands.push(OperandId, "'Image'"); + + InstructionDesc[OpAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpInBoundsAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpInBoundsAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpPtrAccessChain].operands.push(OperandId, "'Element'"); + InstructionDesc[OpPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Base'"); + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandId, "'Element'"); + InstructionDesc[OpInBoundsPtrAccessChain].operands.push(OperandVariableIds, "'Indexes'"); + + InstructionDesc[OpSNegate].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpFNegate].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpNot].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpAny].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpAll].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpConvertFToU].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpConvertFToS].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpConvertSToF].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpConvertUToF].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpUConvert].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpSConvert].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpFConvert].operands.push(OperandId, "'Float Value'"); + + InstructionDesc[OpSatConvertSToU].operands.push(OperandId, "'Signed Value'"); + + InstructionDesc[OpSatConvertUToS].operands.push(OperandId, "'Unsigned Value'"); + + InstructionDesc[OpConvertPtrToU].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpConvertUToPtr].operands.push(OperandId, "'Integer Value'"); + + InstructionDesc[OpPtrCastToGeneric].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpGenericCastToPtr].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpGenericCastToPtrExplicit].operands.push(OperandStorage, "'Storage'"); + + InstructionDesc[OpGenericPtrMemSemantics].operands.push(OperandId, "'Pointer'"); + + InstructionDesc[OpBitcast].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpQuantizeToF16].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'"); + + InstructionDesc[OpCopyLogical].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpIsNan].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsInf].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsFinite].operands.push(OperandId, "'x'"); + + InstructionDesc[OpIsNormal].operands.push(OperandId, "'x'"); + + InstructionDesc[OpSignBitSet].operands.push(OperandId, "'x'"); + + InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'x'"); + InstructionDesc[OpLessOrGreater].operands.push(OperandId, "'y'"); + + InstructionDesc[OpOrdered].operands.push(OperandId, "'x'"); + InstructionDesc[OpOrdered].operands.push(OperandId, "'y'"); + + InstructionDesc[OpUnordered].operands.push(OperandId, "'x'"); + InstructionDesc[OpUnordered].operands.push(OperandId, "'y'"); + + InstructionDesc[OpArrayLength].operands.push(OperandId, "'Structure'"); + InstructionDesc[OpArrayLength].operands.push(OperandLiteralNumber, "'Array member'"); + + InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIAdd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFAdd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpISub].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpISub].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFSub].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIMul].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFMul].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFDiv].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSRem].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFRem].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFMod].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorTimesScalar].operands.push(OperandId, "'Scalar'"); + + InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Matrix'"); + InstructionDesc[OpMatrixTimesScalar].operands.push(OperandId, "'Scalar'"); + + InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Vector'"); + InstructionDesc[OpVectorTimesMatrix].operands.push(OperandId, "'Matrix'"); + + InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Matrix'"); + InstructionDesc[OpMatrixTimesVector].operands.push(OperandId, "'Vector'"); + + InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'LeftMatrix'"); + InstructionDesc[OpMatrixTimesMatrix].operands.push(OperandId, "'RightMatrix'"); + + InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpOuterProduct].operands.push(OperandId, "'Vector 2'"); + + InstructionDesc[OpDot].operands.push(OperandId, "'Vector 1'"); + InstructionDesc[OpDot].operands.push(OperandId, "'Vector 2'"); + + InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIAddCarry].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpISubBorrow].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUMulExtended].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSMulExtended].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftRightLogical].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftRightArithmetic].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Base'"); + InstructionDesc[OpShiftLeftLogical].operands.push(OperandId, "'Shift'"); + + InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalOr].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalAnd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpLogicalNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpLogicalNot].operands.push(OperandId, "'Operand'"); + + InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseOr].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseXor].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpBitwiseAnd].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Insert'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldInsert].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldSExtract].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Base'"); + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Offset'"); + InstructionDesc[OpBitFieldUExtract].operands.push(OperandId, "'Count'"); + + InstructionDesc[OpBitReverse].operands.push(OperandId, "'Base'"); + + InstructionDesc[OpBitCount].operands.push(OperandId, "'Base'"); + + InstructionDesc[OpSelect].operands.push(OperandId, "'Condition'"); + InstructionDesc[OpSelect].operands.push(OperandId, "'Object 1'"); + InstructionDesc[OpSelect].operands.push(OperandId, "'Object 2'"); + + InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpIEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpINotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordNotEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpULessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordLessThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordGreaterThan].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpULessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordLessThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpUGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpSGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFOrdGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 1'"); + InstructionDesc[OpFUnordGreaterThanEqual].operands.push(OperandId, "'Operand 2'"); + + InstructionDesc[OpDPdx].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdy].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidth].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdxFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdyFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidthFine].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdxCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpDPdyCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpFwidthCoarse].operands.push(OperandId, "'P'"); + + InstructionDesc[OpEmitStreamVertex].operands.push(OperandId, "'Stream'"); + + InstructionDesc[OpEndStreamPrimitive].operands.push(OperandId, "'Stream'"); + + InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpControlBarrier].operands.push(OperandScope, "'Memory'"); + InstructionDesc[OpControlBarrier].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpMemoryBarrier].operands.push(OperandScope, "'Memory'"); + InstructionDesc[OpMemoryBarrier].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Image'"); + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageTexelPointer].operands.push(OperandId, "'Sample'"); + + InstructionDesc[OpAtomicLoad].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicLoad].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicLoad].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicStore].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicStore].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicStore].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicExchange].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Equal'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandMemorySemantics, "'Unequal'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Value'"); + InstructionDesc[OpAtomicCompareExchange].operands.push(OperandId, "'Comparator'"); + + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Equal'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandMemorySemantics, "'Unequal'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Value'"); + InstructionDesc[OpAtomicCompareExchangeWeak].operands.push(OperandId, "'Comparator'"); + + InstructionDesc[OpAtomicIIncrement].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIIncrement].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIIncrement].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicIDecrement].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIDecrement].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIDecrement].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicIAdd].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicFAddEXT].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicFAddEXT].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicFAddEXT].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicFAddEXT].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicISub].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicISub].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicISub].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicUMin].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicUMax].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicSMin].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicSMax].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicAnd].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicOr].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicOr].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicOr].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicXor].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicXor].operands.push(OperandMemorySemantics, "'Semantics'"); + InstructionDesc[OpAtomicXor].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicFlagTestAndSet].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpAtomicFlagClear].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpAtomicFlagClear].operands.push(OperandScope, "'Scope'"); + InstructionDesc[OpAtomicFlagClear].operands.push(OperandMemorySemantics, "'Semantics'"); + + InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Merge Block'"); + InstructionDesc[OpLoopMerge].operands.push(OperandId, "'Continue Target'"); + InstructionDesc[OpLoopMerge].operands.push(OperandLoop, ""); + InstructionDesc[OpLoopMerge].operands.push(OperandOptionalLiteral, ""); + + InstructionDesc[OpSelectionMerge].operands.push(OperandId, "'Merge Block'"); + InstructionDesc[OpSelectionMerge].operands.push(OperandSelect, ""); + + InstructionDesc[OpBranch].operands.push(OperandId, "'Target Label'"); + + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'Condition'"); + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'True Label'"); + InstructionDesc[OpBranchConditional].operands.push(OperandId, "'False Label'"); + InstructionDesc[OpBranchConditional].operands.push(OperandVariableLiterals, "'Branch weights'"); + + InstructionDesc[OpSwitch].operands.push(OperandId, "'Selector'"); + InstructionDesc[OpSwitch].operands.push(OperandId, "'Default'"); + InstructionDesc[OpSwitch].operands.push(OperandVariableLiteralId, "'Target'"); + + + InstructionDesc[OpReturnValue].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpLifetimeStart].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLifetimeStart].operands.push(OperandLiteralNumber, "'Size'"); + + InstructionDesc[OpLifetimeStop].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpLifetimeStop].operands.push(OperandLiteralNumber, "'Size'"); + + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Destination'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Source'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Num Elements'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpGroupAsyncCopy].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpGroupWaitEvents].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpGroupWaitEvents].operands.push(OperandId, "'Events List'"); + + InstructionDesc[OpGroupAll].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAll].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpGroupAny].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupAny].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpGroupBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'Value'"); + InstructionDesc[OpGroupBroadcast].operands.push(OperandId, "'LocalId'"); + + InstructionDesc[OpGroupIAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupIAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupIAdd].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupFAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFAdd].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupUMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMin].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupSMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMin].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMin].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupUMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMax].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupSMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMax].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMax].operands.push(OperandId, "X"); + + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Index'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReservedReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Index'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReservedWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpCommitReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpCommitWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpIsValidReserveId].operands.push(OperandId, "'Reserve Id'"); + + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGetNumPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGetMaxPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupReserveReadPipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Num Packets'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupReserveWritePipePackets].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupCommitReadPipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Pipe'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Reserve Id'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Size'"); + InstructionDesc[OpGroupCommitWritePipe].operands.push(OperandId, "'Packet Alignment'"); + + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkSize'"); + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'LocalWorkSize'"); + InstructionDesc[OpBuildNDRange].operands.push(OperandId, "'GlobalWorkOffset'"); + + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Event'"); + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Profiling Info'"); + InstructionDesc[OpCaptureEventProfilingInfo].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Event'"); + InstructionDesc[OpSetUserEventStatus].operands.push(OperandId, "'Status'"); + + InstructionDesc[OpIsValidEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpRetainEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpReleaseEvent].operands.push(OperandId, "'Event'"); + + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelWorkGroupSize].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelPreferredWorkGroupSizeMultiple].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelNDrangeSubGroupCount].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpGetKernelNDrangeMaxSubGroupSize].operands.push(OperandId, "'Param Align'"); + + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Queue'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Flags'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'ND Range'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Wait Events'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Ret Event'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Invoke'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Size'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandId, "'Param Align'"); + InstructionDesc[OpEnqueueKernel].operands.push(OperandVariableIds, "'Local Size'"); + + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Queue'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Num Events'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Wait Events'"); + InstructionDesc[OpEnqueueMarker].operands.push(OperandId, "'Ret Event'"); + + InstructionDesc[OpGroupNonUniformElect].operands.push(OperandScope, "'Execution'"); + + InstructionDesc[OpGroupNonUniformAll].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAll].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformAny].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAny].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformAllEqual].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBroadcast].operands.push(OperandId, "ID"); + + InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBroadcastFirst].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallot].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformInverseBallot].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBallotBitExtract].operands.push(OperandId, "Bit"); + + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBallotBitCount].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotFindLSB].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBallotFindMSB].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffle].operands.push(OperandId, "'Id'"); + + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleXor].operands.push(OperandId, "Mask"); + + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleUp].operands.push(OperandId, "Offset"); + + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformShuffleDown].operands.push(OperandId, "Offset"); + + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformIAdd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFAdd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformIMul].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMul].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformSMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformUMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMin].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformSMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformUMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformFMax].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseAnd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseOr].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformBitwiseXor].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalAnd].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalOr].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformLogicalXor].operands.push(OperandId, "'ClusterSize'", true); + + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformQuadBroadcast].operands.push(OperandId, "'Id'"); + + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandId, "X"); + InstructionDesc[OpGroupNonUniformQuadSwap].operands.push(OperandLiteralNumber, "'Direction'"); + + InstructionDesc[OpSubgroupBallotKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupFirstInvocationKHR].operands.push(OperandId, "'Value'"); + + InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAnyKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupAllKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAllKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpSubgroupAllEqualKHR].operands.push(OperandId, "'Predicate'"); + + InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Value'"); + InstructionDesc[OpSubgroupReadInvocationKHR].operands.push(OperandId, "'Index'"); + + InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'"); + + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFAddNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMinNonUniformAMD].operands.push(OperandId, "'X'"); + + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMinNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMinNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupUMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupSMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandScope, "'Execution'"); + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'"); + InstructionDesc[OpGroupFMaxNonUniformAMD].operands.push(OperandId, "X"); + + InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Image'"); + InstructionDesc[OpFragmentMaskFetchAMD].operands.push(OperandId, "'Coordinate'"); + + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'"); + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'"); + + InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X"); + + InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); + + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'NV Acceleration Structure'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Flags'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Cull Mask'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Offset'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'SBT Record Stride'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Miss Index'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Origin'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'TMin'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Ray Direction'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'TMax'"); + InstructionDesc[OpTraceRayKHR].operands.push(OperandId, "'Payload'"); + InstructionDesc[OpTraceRayKHR].setResultAndType(false, false); + + InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Parameter'"); + InstructionDesc[OpReportIntersectionKHR].operands.push(OperandId, "'Hit Kind'"); + + InstructionDesc[OpIgnoreIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpTerminateRayKHR].setResultAndType(false, false); + + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "SBT Record Index"); + InstructionDesc[OpExecuteCallableKHR].operands.push(OperandId, "CallableData ID"); + InstructionDesc[OpExecuteCallableKHR].setResultAndType(false, false); + + // Ray Query + InstructionDesc[OpTypeAccelerationStructureKHR].setResultAndType(true, false); + InstructionDesc[OpTypeRayQueryProvisionalKHR].setResultAndType(true, false); + + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'AccelerationS'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'RayFlags'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'CullMask'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Origin'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Tmin'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Direction'"); + InstructionDesc[OpRayQueryInitializeKHR].operands.push(OperandId, "'Tmax'"); + InstructionDesc[OpRayQueryInitializeKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryTerminateKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryTerminateKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryGenerateIntersectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGenerateIntersectionKHR].operands.push(OperandId, "'THit'"); + InstructionDesc[OpRayQueryGenerateIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryConfirmIntersectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryConfirmIntersectionKHR].setResultAndType(false, false); + + InstructionDesc[OpRayQueryProceedKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryProceedKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionTypeKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetRayTMinKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetRayTMinKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetRayFlagsKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetRayFlagsKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionTKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionTKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionTKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceCustomIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceIdKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionGeometryIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionPrimitiveIndexKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionBarycentricsKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionFrontFaceKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionCandidateAABBOpaqueKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionCandidateAABBOpaqueKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayDirectionKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectRayOriginKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetWorldRayDirectionKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetWorldRayDirectionKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetWorldRayOriginKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetWorldRayOriginKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionObjectToWorldKHR].setResultAndType(true, true); + + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].operands.push(OperandId, "'RayQuery'"); + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].operands.push(OperandId, "'Committed'"); + InstructionDesc[OpRayQueryGetIntersectionWorldToObjectKHR].setResultAndType(true, true); + + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Sampled Image'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coordinate'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Granularity'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandId, "'Coarse'"); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandImageOperands, "", true); + InstructionDesc[OpImageSampleFootprintNV].operands.push(OperandVariableIds, "", true); + + InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'"); + InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'"); + + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'"); + InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'"); + + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'"); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true); + + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'"); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true); + InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true); + + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'"); + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'"); + InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'"); + + InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'"); + + InstructionDesc[OpDemoteToHelperInvocationEXT].setResultAndType(false, false); + + InstructionDesc[OpReadClockKHR].operands.push(OperandScope, "'Scope'"); +} + +}; // end spv namespace diff --git a/src/third_party/glslang/SPIRV/doc.h b/src/third_party/glslang/SPIRV/doc.h new file mode 100644 index 0000000..2a0b28c --- /dev/null +++ b/src/third_party/glslang/SPIRV/doc.h @@ -0,0 +1,259 @@ +// +// Copyright (C) 2014-2015 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Parameterize the SPIR-V enumerants. +// + +#pragma once + +#include "spirv.hpp" + +#include + +namespace spv { + +// Fill in all the parameters +void Parameterize(); + +// Return the English names of all the enums. +const char* SourceString(int); +const char* AddressingString(int); +const char* MemoryString(int); +const char* ExecutionModelString(int); +const char* ExecutionModeString(int); +const char* StorageClassString(int); +const char* DecorationString(int); +const char* BuiltInString(int); +const char* DimensionString(int); +const char* SelectControlString(int); +const char* LoopControlString(int); +const char* FunctionControlString(int); +const char* SamplerAddressingModeString(int); +const char* SamplerFilterModeString(int); +const char* ImageFormatString(int); +const char* ImageChannelOrderString(int); +const char* ImageChannelTypeString(int); +const char* ImageChannelDataTypeString(int type); +const char* ImageOperandsString(int format); +const char* ImageOperands(int); +const char* FPFastMathString(int); +const char* FPRoundingModeString(int); +const char* LinkageTypeString(int); +const char* FuncParamAttrString(int); +const char* AccessQualifierString(int); +const char* MemorySemanticsString(int); +const char* MemoryAccessString(int); +const char* ExecutionScopeString(int); +const char* GroupOperationString(int); +const char* KernelEnqueueFlagsString(int); +const char* KernelProfilingInfoString(int); +const char* CapabilityString(int); +const char* OpcodeString(int); +const char* ScopeString(int mem); + +// For grouping opcodes into subsections +enum OpcodeClass { + OpClassMisc, + OpClassDebug, + OpClassAnnotate, + OpClassExtension, + OpClassMode, + OpClassType, + OpClassConstant, + OpClassMemory, + OpClassFunction, + OpClassImage, + OpClassConvert, + OpClassComposite, + OpClassArithmetic, + OpClassBit, + OpClassRelationalLogical, + OpClassDerivative, + OpClassFlowControl, + OpClassAtomic, + OpClassPrimitive, + OpClassBarrier, + OpClassGroup, + OpClassDeviceSideEnqueue, + OpClassPipe, + + OpClassCount, + OpClassMissing // all instructions start out as missing +}; + +// For parameterizing operands. +enum OperandClass { + OperandNone, + OperandId, + OperandVariableIds, + OperandOptionalLiteral, + OperandOptionalLiteralString, + OperandVariableLiterals, + OperandVariableIdLiteral, + OperandVariableLiteralId, + OperandLiteralNumber, + OperandLiteralString, + OperandVariableLiteralStrings, + OperandSource, + OperandExecutionModel, + OperandAddressing, + OperandMemory, + OperandExecutionMode, + OperandStorage, + OperandDimensionality, + OperandSamplerAddressingMode, + OperandSamplerFilterMode, + OperandSamplerImageFormat, + OperandImageChannelOrder, + OperandImageChannelDataType, + OperandImageOperands, + OperandFPFastMath, + OperandFPRoundingMode, + OperandLinkageType, + OperandAccessQualifier, + OperandFuncParamAttr, + OperandDecoration, + OperandBuiltIn, + OperandSelect, + OperandLoop, + OperandFunction, + OperandMemorySemantics, + OperandMemoryAccess, + OperandScope, + OperandGroupOperation, + OperandKernelEnqueueFlags, + OperandKernelProfilingInfo, + OperandCapability, + + OperandOpcode, + + OperandCount +}; + +// Any specific enum can have a set of capabilities that allow it: +typedef std::vector EnumCaps; + +// Parameterize a set of operands with their OperandClass(es) and descriptions. +class OperandParameters { +public: + OperandParameters() { } + void push(OperandClass oc, const char* d, bool opt = false) + { + opClass.push_back(oc); + desc.push_back(d); + optional.push_back(opt); + } + void setOptional(); + OperandClass getClass(int op) const { return opClass[op]; } + const char* getDesc(int op) const { return desc[op]; } + bool isOptional(int op) const { return optional[op]; } + int getNum() const { return (int)opClass.size(); } + +protected: + std::vector opClass; + std::vector desc; + std::vector optional; +}; + +// Parameterize an enumerant +class EnumParameters { +public: + EnumParameters() : desc(0) { } + const char* desc; +}; + +// Parameterize a set of enumerants that form an enum +class EnumDefinition : public EnumParameters { +public: + EnumDefinition() : + ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { } + void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false) + { + ceiling = ceil; + getName = name; + bitmask = mask; + enumParams = ep; + } + void setOperands(OperandParameters* op) { operandParams = op; } + int ceiling; // ceiling of enumerants + bool bitmask; // true if these enumerants combine into a bitmask + const char* (*getName)(int); // a function that returns the name for each enumerant value (or shift) + EnumParameters* enumParams; // parameters for each individual enumerant + OperandParameters* operandParams; // sets of operands +}; + +// Parameterize an instruction's logical format, including its known set of operands, +// per OperandParameters above. +class InstructionParameters { +public: + InstructionParameters() : + opDesc("TBD"), + opClass(OpClassMissing), + typePresent(true), // most normal, only exceptions have to be spelled out + resultPresent(true) // most normal, only exceptions have to be spelled out + { } + + void setResultAndType(bool r, bool t) + { + resultPresent = r; + typePresent = t; + } + + bool hasResult() const { return resultPresent != 0; } + bool hasType() const { return typePresent != 0; } + + const char* opDesc; + OpcodeClass opClass; + OperandParameters operands; + +protected: + int typePresent : 1; + int resultPresent : 1; +}; + +// The set of objects that hold all the instruction/operand +// parameterization information. +extern InstructionParameters InstructionDesc[]; + +// These hold definitions of the enumerants used for operands +extern EnumDefinition OperandClassParams[]; + +const char* GetOperandDesc(OperandClass operand); +void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false); +const char* AccessQualifierString(int attr); + +void PrintOperands(const OperandParameters& operands, int reservedOperands); + +} // end namespace spv diff --git a/src/third_party/glslang/SPIRV/hex_float.h b/src/third_party/glslang/SPIRV/hex_float.h new file mode 100644 index 0000000..8be8e9f --- /dev/null +++ b/src/third_party/glslang/SPIRV/hex_float.h @@ -0,0 +1,1078 @@ +// Copyright (c) 2015-2016 The Khronos Group Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef LIBSPIRV_UTIL_HEX_FLOAT_H_ +#define LIBSPIRV_UTIL_HEX_FLOAT_H_ + +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1800 +namespace std { +bool isnan(double f) +{ + return ::_isnan(f) != 0; +} +bool isinf(double f) +{ + return ::_finite(f) == 0; +} +} +#endif + +#include "bitutils.h" + +namespace spvutils { + +class Float16 { + public: + Float16(uint16_t v) : val(v) {} + Float16() {} + static bool isNan(const Float16& val) { + return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) != 0); + } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(const Float16& val) { + return ((val.val & 0x7C00) == 0x7C00) && ((val.val & 0x3FF) == 0); + } + Float16(const Float16& other) { val = other.val; } + uint16_t get_value() const { return val; } + + // Returns the maximum normal value. + static Float16 max() { return Float16(0x7bff); } + // Returns the lowest normal value. + static Float16 lowest() { return Float16(0xfbff); } + + private: + uint16_t val; +}; + +// To specialize this type, you must override uint_type to define +// an unsigned integer that can fit your floating point type. +// You must also add a isNan function that returns true if +// a value is Nan. +template +struct FloatProxyTraits { + typedef void uint_type; +}; + +template <> +struct FloatProxyTraits { + typedef uint32_t uint_type; + static bool isNan(float f) { return std::isnan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(float f) { return std::isinf(f); } + // Returns the maximum normal value. + static float max() { return std::numeric_limits::max(); } + // Returns the lowest normal value. + static float lowest() { return std::numeric_limits::lowest(); } +}; + +template <> +struct FloatProxyTraits { + typedef uint64_t uint_type; + static bool isNan(double f) { return std::isnan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(double f) { return std::isinf(f); } + // Returns the maximum normal value. + static double max() { return std::numeric_limits::max(); } + // Returns the lowest normal value. + static double lowest() { return std::numeric_limits::lowest(); } +}; + +template <> +struct FloatProxyTraits { + typedef uint16_t uint_type; + static bool isNan(Float16 f) { return Float16::isNan(f); } + // Returns true if the given value is any kind of infinity. + static bool isInfinity(Float16 f) { return Float16::isInfinity(f); } + // Returns the maximum normal value. + static Float16 max() { return Float16::max(); } + // Returns the lowest normal value. + static Float16 lowest() { return Float16::lowest(); } +}; + +// Since copying a floating point number (especially if it is NaN) +// does not guarantee that bits are preserved, this class lets us +// store the type and use it as a float when necessary. +template +class FloatProxy { + public: + typedef typename FloatProxyTraits::uint_type uint_type; + + // Since this is to act similar to the normal floats, + // do not initialize the data by default. + FloatProxy() {} + + // Intentionally non-explicit. This is a proxy type so + // implicit conversions allow us to use it more transparently. + FloatProxy(T val) { data_ = BitwiseCast(val); } + + // Intentionally non-explicit. This is a proxy type so + // implicit conversions allow us to use it more transparently. + FloatProxy(uint_type val) { data_ = val; } + + // This is helpful to have and is guaranteed not to stomp bits. + FloatProxy operator-() const { + return static_cast(data_ ^ + (uint_type(0x1) << (sizeof(T) * 8 - 1))); + } + + // Returns the data as a floating point value. + T getAsFloat() const { return BitwiseCast(data_); } + + // Returns the raw data. + uint_type data() const { return data_; } + + // Returns true if the value represents any type of NaN. + bool isNan() { return FloatProxyTraits::isNan(getAsFloat()); } + // Returns true if the value represents any type of infinity. + bool isInfinity() { return FloatProxyTraits::isInfinity(getAsFloat()); } + + // Returns the maximum normal value. + static FloatProxy max() { + return FloatProxy(FloatProxyTraits::max()); + } + // Returns the lowest normal value. + static FloatProxy lowest() { + return FloatProxy(FloatProxyTraits::lowest()); + } + + private: + uint_type data_; +}; + +template +bool operator==(const FloatProxy& first, const FloatProxy& second) { + return first.data() == second.data(); +} + +// Reads a FloatProxy value as a normal float from a stream. +template +std::istream& operator>>(std::istream& is, FloatProxy& value) { + T float_val; + is >> float_val; + value = FloatProxy(float_val); + return is; +} + +// This is an example traits. It is not meant to be used in practice, but will +// be the default for any non-specialized type. +template +struct HexFloatTraits { + // Integer type that can store this hex-float. + typedef void uint_type; + // Signed integer type that can store this hex-float. + typedef void int_type; + // The numerical type that this HexFloat represents. + typedef void underlying_type; + // The type needed to construct the underlying type. + typedef void native_type; + // The number of bits that are actually relevant in the uint_type. + // This allows us to deal with, for example, 24-bit values in a 32-bit + // integer. + static const uint32_t num_used_bits = 0; + // Number of bits that represent the exponent. + static const uint32_t num_exponent_bits = 0; + // Number of bits that represent the fractional part. + static const uint32_t num_fraction_bits = 0; + // The bias of the exponent. (How much we need to subtract from the stored + // value to get the correct value.) + static const uint32_t exponent_bias = 0; +}; + +// Traits for IEEE float. +// 1 sign bit, 8 exponent bits, 23 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint32_t uint_type; + typedef int32_t int_type; + typedef FloatProxy underlying_type; + typedef float native_type; + static const uint_type num_used_bits = 32; + static const uint_type num_exponent_bits = 8; + static const uint_type num_fraction_bits = 23; + static const uint_type exponent_bias = 127; +}; + +// Traits for IEEE double. +// 1 sign bit, 11 exponent bits, 52 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint64_t uint_type; + typedef int64_t int_type; + typedef FloatProxy underlying_type; + typedef double native_type; + static const uint_type num_used_bits = 64; + static const uint_type num_exponent_bits = 11; + static const uint_type num_fraction_bits = 52; + static const uint_type exponent_bias = 1023; +}; + +// Traits for IEEE half. +// 1 sign bit, 5 exponent bits, 10 fractional bits. +template <> +struct HexFloatTraits> { + typedef uint16_t uint_type; + typedef int16_t int_type; + typedef uint16_t underlying_type; + typedef uint16_t native_type; + static const uint_type num_used_bits = 16; + static const uint_type num_exponent_bits = 5; + static const uint_type num_fraction_bits = 10; + static const uint_type exponent_bias = 15; +}; + +enum round_direction { + kRoundToZero, + kRoundToNearestEven, + kRoundToPositiveInfinity, + kRoundToNegativeInfinity +}; + +// Template class that houses a floating pointer number. +// It exposes a number of constants based on the provided traits to +// assist in interpreting the bits of the value. +template > +class HexFloat { + public: + typedef typename Traits::uint_type uint_type; + typedef typename Traits::int_type int_type; + typedef typename Traits::underlying_type underlying_type; + typedef typename Traits::native_type native_type; + + explicit HexFloat(T f) : value_(f) {} + + T value() const { return value_; } + void set_value(T f) { value_ = f; } + + // These are all written like this because it is convenient to have + // compile-time constants for all of these values. + + // Pass-through values to save typing. + static const uint32_t num_used_bits = Traits::num_used_bits; + static const uint32_t exponent_bias = Traits::exponent_bias; + static const uint32_t num_exponent_bits = Traits::num_exponent_bits; + static const uint32_t num_fraction_bits = Traits::num_fraction_bits; + + // Number of bits to shift left to set the highest relevant bit. + static const uint32_t top_bit_left_shift = num_used_bits - 1; + // How many nibbles (hex characters) the fractional part takes up. + static const uint32_t fraction_nibbles = (num_fraction_bits + 3) / 4; + // If the fractional part does not fit evenly into a hex character (4-bits) + // then we have to left-shift to get rid of leading 0s. This is the amount + // we have to shift (might be 0). + static const uint32_t num_overflow_bits = + fraction_nibbles * 4 - num_fraction_bits; + + // The representation of the fraction, not the actual bits. This + // includes the leading bit that is usually implicit. + static const uint_type fraction_represent_mask = + spvutils::SetBits::get; + + // The topmost bit in the nibble-aligned fraction. + static const uint_type fraction_top_bit = + uint_type(1) << (num_fraction_bits + num_overflow_bits - 1); + + // The least significant bit in the exponent, which is also the bit + // immediately to the left of the significand. + static const uint_type first_exponent_bit = uint_type(1) + << (num_fraction_bits); + + // The mask for the encoded fraction. It does not include the + // implicit bit. + static const uint_type fraction_encode_mask = + spvutils::SetBits::get; + + // The bit that is used as a sign. + static const uint_type sign_mask = uint_type(1) << top_bit_left_shift; + + // The bits that represent the exponent. + static const uint_type exponent_mask = + spvutils::SetBits::get; + + // How far left the exponent is shifted. + static const uint32_t exponent_left_shift = num_fraction_bits; + + // How far from the right edge the fraction is shifted. + static const uint32_t fraction_right_shift = + static_cast(sizeof(uint_type) * 8) - num_fraction_bits; + + // The maximum representable unbiased exponent. + static const int_type max_exponent = + (exponent_mask >> num_fraction_bits) - exponent_bias; + // The minimum representable exponent for normalized numbers. + static const int_type min_exponent = -static_cast(exponent_bias); + + // Returns the bits associated with the value. + uint_type getBits() const { return spvutils::BitwiseCast(value_); } + + // Returns the bits associated with the value, without the leading sign bit. + uint_type getUnsignedBits() const { + return static_cast(spvutils::BitwiseCast(value_) & + ~sign_mask); + } + + // Returns the bits associated with the exponent, shifted to start at the + // lsb of the type. + const uint_type getExponentBits() const { + return static_cast((getBits() & exponent_mask) >> + num_fraction_bits); + } + + // Returns the exponent in unbiased form. This is the exponent in the + // human-friendly form. + const int_type getUnbiasedExponent() const { + return static_cast(getExponentBits() - exponent_bias); + } + + // Returns just the significand bits from the value. + const uint_type getSignificandBits() const { + return getBits() & fraction_encode_mask; + } + + // If the number was normalized, returns the unbiased exponent. + // If the number was denormal, normalize the exponent first. + const int_type getUnbiasedNormalizedExponent() const { + if ((getBits() & ~sign_mask) == 0) { // special case if everything is 0 + return 0; + } + int_type exp = getUnbiasedExponent(); + if (exp == min_exponent) { // We are in denorm land. + uint_type significand_bits = getSignificandBits(); + while ((significand_bits & (first_exponent_bit >> 1)) == 0) { + significand_bits = static_cast(significand_bits << 1); + exp = static_cast(exp - 1); + } + significand_bits &= fraction_encode_mask; + } + return exp; + } + + // Returns the signficand after it has been normalized. + const uint_type getNormalizedSignificand() const { + int_type unbiased_exponent = getUnbiasedNormalizedExponent(); + uint_type significand = getSignificandBits(); + for (int_type i = unbiased_exponent; i <= min_exponent; ++i) { + significand = static_cast(significand << 1); + } + significand &= fraction_encode_mask; + return significand; + } + + // Returns true if this number represents a negative value. + bool isNegative() const { return (getBits() & sign_mask) != 0; } + + // Sets this HexFloat from the individual components. + // Note this assumes EVERY significand is normalized, and has an implicit + // leading one. This means that the only way that this method will set 0, + // is if you set a number so denormalized that it underflows. + // Do not use this method with raw bits extracted from a subnormal number, + // since subnormals do not have an implicit leading 1 in the significand. + // The significand is also expected to be in the + // lowest-most num_fraction_bits of the uint_type. + // The exponent is expected to be unbiased, meaning an exponent of + // 0 actually means 0. + // If underflow_round_up is set, then on underflow, if a number is non-0 + // and would underflow, we round up to the smallest denorm. + void setFromSignUnbiasedExponentAndNormalizedSignificand( + bool negative, int_type exponent, uint_type significand, + bool round_denorm_up) { + bool significand_is_zero = significand == 0; + + if (exponent <= min_exponent) { + // If this was denormalized, then we have to shift the bit on, meaning + // the significand is not zero. + significand_is_zero = false; + significand |= first_exponent_bit; + significand = static_cast(significand >> 1); + } + + while (exponent < min_exponent) { + significand = static_cast(significand >> 1); + ++exponent; + } + + if (exponent == min_exponent) { + if (significand == 0 && !significand_is_zero && round_denorm_up) { + significand = static_cast(0x1); + } + } + + uint_type new_value = 0; + if (negative) { + new_value = static_cast(new_value | sign_mask); + } + exponent = static_cast(exponent + exponent_bias); + assert(exponent >= 0); + + // put it all together + exponent = static_cast((exponent << exponent_left_shift) & + exponent_mask); + significand = static_cast(significand & fraction_encode_mask); + new_value = static_cast(new_value | (exponent | significand)); + value_ = BitwiseCast(new_value); + } + + // Increments the significand of this number by the given amount. + // If this would spill the significand into the implicit bit, + // carry is set to true and the significand is shifted to fit into + // the correct location, otherwise carry is set to false. + // All significands and to_increment are assumed to be within the bounds + // for a valid significand. + static uint_type incrementSignificand(uint_type significand, + uint_type to_increment, bool* carry) { + significand = static_cast(significand + to_increment); + *carry = false; + if (significand & first_exponent_bit) { + *carry = true; + // The implicit 1-bit will have carried, so we should zero-out the + // top bit and shift back. + significand = static_cast(significand & ~first_exponent_bit); + significand = static_cast(significand >> 1); + } + return significand; + } + + // These exist because MSVC throws warnings on negative right-shifts + // even if they are not going to be executed. Eg: + // constant_number < 0? 0: constant_number + // These convert the negative left-shifts into right shifts. + + template + uint_type negatable_left_shift(int_type N, uint_type val) + { + if(N >= 0) + return val << N; + + return val >> -N; + } + + template + uint_type negatable_right_shift(int_type N, uint_type val) + { + if(N >= 0) + return val >> N; + + return val << -N; + } + + // Returns the significand, rounded to fit in a significand in + // other_T. This is shifted so that the most significant + // bit of the rounded number lines up with the most significant bit + // of the returned significand. + template + typename other_T::uint_type getRoundedNormalizedSignificand( + round_direction dir, bool* carry_bit) { + typedef typename other_T::uint_type other_uint_type; + static const int_type num_throwaway_bits = + static_cast(num_fraction_bits) - + static_cast(other_T::num_fraction_bits); + + static const uint_type last_significant_bit = + (num_throwaway_bits < 0) + ? 0 + : negatable_left_shift(num_throwaway_bits, 1u); + static const uint_type first_rounded_bit = + (num_throwaway_bits < 1) + ? 0 + : negatable_left_shift(num_throwaway_bits - 1, 1u); + + static const uint_type throwaway_mask_bits = + num_throwaway_bits > 0 ? num_throwaway_bits : 0; + static const uint_type throwaway_mask = + spvutils::SetBits::get; + + *carry_bit = false; + other_uint_type out_val = 0; + uint_type significand = getNormalizedSignificand(); + // If we are up-casting, then we just have to shift to the right location. + if (num_throwaway_bits <= 0) { + out_val = static_cast(significand); + uint_type shift_amount = static_cast(-num_throwaway_bits); + out_val = static_cast(out_val << shift_amount); + return out_val; + } + + // If every non-representable bit is 0, then we don't have any casting to + // do. + if ((significand & throwaway_mask) == 0) { + return static_cast( + negatable_right_shift(num_throwaway_bits, significand)); + } + + bool round_away_from_zero = false; + // We actually have to narrow the significand here, so we have to follow the + // rounding rules. + switch (dir) { + case kRoundToZero: + break; + case kRoundToPositiveInfinity: + round_away_from_zero = !isNegative(); + break; + case kRoundToNegativeInfinity: + round_away_from_zero = isNegative(); + break; + case kRoundToNearestEven: + // Have to round down, round bit is 0 + if ((first_rounded_bit & significand) == 0) { + break; + } + if (((significand & throwaway_mask) & ~first_rounded_bit) != 0) { + // If any subsequent bit of the rounded portion is non-0 then we round + // up. + round_away_from_zero = true; + break; + } + // We are exactly half-way between 2 numbers, pick even. + if ((significand & last_significant_bit) != 0) { + // 1 for our last bit, round up. + round_away_from_zero = true; + break; + } + break; + } + + if (round_away_from_zero) { + return static_cast( + negatable_right_shift(num_throwaway_bits, incrementSignificand( + significand, last_significant_bit, carry_bit))); + } else { + return static_cast( + negatable_right_shift(num_throwaway_bits, significand)); + } + } + + // Casts this value to another HexFloat. If the cast is widening, + // then round_dir is ignored. If the cast is narrowing, then + // the result is rounded in the direction specified. + // This number will retain Nan and Inf values. + // It will also saturate to Inf if the number overflows, and + // underflow to (0 or min depending on rounding) if the number underflows. + template + void castTo(other_T& other, round_direction round_dir) { + other = other_T(static_cast(0)); + bool negate = isNegative(); + if (getUnsignedBits() == 0) { + if (negate) { + other.set_value(-other.value()); + } + return; + } + uint_type significand = getSignificandBits(); + bool carried = false; + typename other_T::uint_type rounded_significand = + getRoundedNormalizedSignificand(round_dir, &carried); + + int_type exponent = getUnbiasedExponent(); + if (exponent == min_exponent) { + // If we are denormal, normalize the exponent, so that we can encode + // easily. + exponent = static_cast(exponent + 1); + for (uint_type check_bit = first_exponent_bit >> 1; check_bit != 0; + check_bit = static_cast(check_bit >> 1)) { + exponent = static_cast(exponent - 1); + if (check_bit & significand) break; + } + } + + bool is_nan = + (getBits() & exponent_mask) == exponent_mask && significand != 0; + bool is_inf = + !is_nan && + ((exponent + carried) > static_cast(other_T::exponent_bias) || + (significand == 0 && (getBits() & exponent_mask) == exponent_mask)); + + // If we are Nan or Inf we should pass that through. + if (is_inf) { + other.set_value(BitwiseCast( + static_cast( + (negate ? other_T::sign_mask : 0) | other_T::exponent_mask))); + return; + } + if (is_nan) { + typename other_T::uint_type shifted_significand; + shifted_significand = static_cast( + negatable_left_shift( + static_cast(other_T::num_fraction_bits) - + static_cast(num_fraction_bits), significand)); + + // We are some sort of Nan. We try to keep the bit-pattern of the Nan + // as close as possible. If we had to shift off bits so we are 0, then we + // just set the last bit. + other.set_value(BitwiseCast( + static_cast( + (negate ? other_T::sign_mask : 0) | other_T::exponent_mask | + (shifted_significand == 0 ? 0x1 : shifted_significand)))); + return; + } + + bool round_underflow_up = + isNegative() ? round_dir == kRoundToNegativeInfinity + : round_dir == kRoundToPositiveInfinity; + typedef typename other_T::int_type other_int_type; + // setFromSignUnbiasedExponentAndNormalizedSignificand will + // zero out any underflowing value (but retain the sign). + other.setFromSignUnbiasedExponentAndNormalizedSignificand( + negate, static_cast(exponent), rounded_significand, + round_underflow_up); + return; + } + + private: + T value_; + + static_assert(num_used_bits == + Traits::num_exponent_bits + Traits::num_fraction_bits + 1, + "The number of bits do not fit"); + static_assert(sizeof(T) == sizeof(uint_type), "The type sizes do not match"); +}; + +// Returns 4 bits represented by the hex character. +inline uint8_t get_nibble_from_character(int character) { + const char* dec = "0123456789"; + const char* lower = "abcdef"; + const char* upper = "ABCDEF"; + const char* p = nullptr; + if ((p = strchr(dec, character))) { + return static_cast(p - dec); + } else if ((p = strchr(lower, character))) { + return static_cast(p - lower + 0xa); + } else if ((p = strchr(upper, character))) { + return static_cast(p - upper + 0xa); + } + + assert(false && "This was called with a non-hex character"); + return 0; +} + +// Outputs the given HexFloat to the stream. +template +std::ostream& operator<<(std::ostream& os, const HexFloat& value) { + typedef HexFloat HF; + typedef typename HF::uint_type uint_type; + typedef typename HF::int_type int_type; + + static_assert(HF::num_used_bits != 0, + "num_used_bits must be non-zero for a valid float"); + static_assert(HF::num_exponent_bits != 0, + "num_exponent_bits must be non-zero for a valid float"); + static_assert(HF::num_fraction_bits != 0, + "num_fractin_bits must be non-zero for a valid float"); + + const uint_type bits = spvutils::BitwiseCast(value.value()); + const char* const sign = (bits & HF::sign_mask) ? "-" : ""; + const uint_type exponent = static_cast( + (bits & HF::exponent_mask) >> HF::num_fraction_bits); + + uint_type fraction = static_cast((bits & HF::fraction_encode_mask) + << HF::num_overflow_bits); + + const bool is_zero = exponent == 0 && fraction == 0; + const bool is_denorm = exponent == 0 && !is_zero; + + // exponent contains the biased exponent we have to convert it back into + // the normal range. + int_type int_exponent = static_cast(exponent - HF::exponent_bias); + // If the number is all zeros, then we actually have to NOT shift the + // exponent. + int_exponent = is_zero ? 0 : int_exponent; + + // If we are denorm, then start shifting, and decreasing the exponent until + // our leading bit is 1. + + if (is_denorm) { + while ((fraction & HF::fraction_top_bit) == 0) { + fraction = static_cast(fraction << 1); + int_exponent = static_cast(int_exponent - 1); + } + // Since this is denormalized, we have to consume the leading 1 since it + // will end up being implicit. + fraction = static_cast(fraction << 1); // eat the leading 1 + fraction &= HF::fraction_represent_mask; + } + + uint_type fraction_nibbles = HF::fraction_nibbles; + // We do not have to display any trailing 0s, since this represents the + // fractional part. + while (fraction_nibbles > 0 && (fraction & 0xF) == 0) { + // Shift off any trailing values; + fraction = static_cast(fraction >> 4); + --fraction_nibbles; + } + + const auto saved_flags = os.flags(); + const auto saved_fill = os.fill(); + + os << sign << "0x" << (is_zero ? '0' : '1'); + if (fraction_nibbles) { + // Make sure to keep the leading 0s in place, since this is the fractional + // part. + os << "." << std::setw(static_cast(fraction_nibbles)) + << std::setfill('0') << std::hex << fraction; + } + os << "p" << std::dec << (int_exponent >= 0 ? "+" : "") << int_exponent; + + os.flags(saved_flags); + os.fill(saved_fill); + + return os; +} + +// Returns true if negate_value is true and the next character on the +// input stream is a plus or minus sign. In that case we also set the fail bit +// on the stream and set the value to the zero value for its type. +template +inline bool RejectParseDueToLeadingSign(std::istream& is, bool negate_value, + HexFloat& value) { + if (negate_value) { + auto next_char = is.peek(); + if (next_char == '-' || next_char == '+') { + // Fail the parse. Emulate standard behaviour by setting the value to + // the zero value, and set the fail bit on the stream. + value = HexFloat(typename HexFloat::uint_type(0)); + is.setstate(std::ios_base::failbit); + return true; + } + } + return false; +} + +// Parses a floating point number from the given stream and stores it into the +// value parameter. +// If negate_value is true then the number may not have a leading minus or +// plus, and if it successfully parses, then the number is negated before +// being stored into the value parameter. +// If the value cannot be correctly parsed or overflows the target floating +// point type, then set the fail bit on the stream. +// TODO(dneto): Promise C++11 standard behavior in how the value is set in +// the error case, but only after all target platforms implement it correctly. +// In particular, the Microsoft C++ runtime appears to be out of spec. +template +inline std::istream& ParseNormalFloat(std::istream& is, bool negate_value, + HexFloat& value) { + if (RejectParseDueToLeadingSign(is, negate_value, value)) { + return is; + } + T val; + is >> val; + if (negate_value) { + val = -val; + } + value.set_value(val); + // In the failure case, map -0.0 to 0.0. + if (is.fail() && value.getUnsignedBits() == 0u) { + value = HexFloat(typename HexFloat::uint_type(0)); + } + if (val.isInfinity()) { + // Fail the parse. Emulate standard behaviour by setting the value to + // the closest normal value, and set the fail bit on the stream. + value.set_value((value.isNegative() || negate_value) ? T::lowest() + : T::max()); + is.setstate(std::ios_base::failbit); + } + return is; +} + +// Specialization of ParseNormalFloat for FloatProxy values. +// This will parse the float as it were a 32-bit floating point number, +// and then round it down to fit into a Float16 value. +// The number is rounded towards zero. +// If negate_value is true then the number may not have a leading minus or +// plus, and if it successfully parses, then the number is negated before +// being stored into the value parameter. +// If the value cannot be correctly parsed or overflows the target floating +// point type, then set the fail bit on the stream. +// TODO(dneto): Promise C++11 standard behavior in how the value is set in +// the error case, but only after all target platforms implement it correctly. +// In particular, the Microsoft C++ runtime appears to be out of spec. +template <> +inline std::istream& +ParseNormalFloat, HexFloatTraits>>( + std::istream& is, bool negate_value, + HexFloat, HexFloatTraits>>& value) { + // First parse as a 32-bit float. + HexFloat> float_val(0.0f); + ParseNormalFloat(is, negate_value, float_val); + + // Then convert to 16-bit float, saturating at infinities, and + // rounding toward zero. + float_val.castTo(value, kRoundToZero); + + // Overflow on 16-bit behaves the same as for 32- and 64-bit: set the + // fail bit and set the lowest or highest value. + if (Float16::isInfinity(value.value().getAsFloat())) { + value.set_value(value.isNegative() ? Float16::lowest() : Float16::max()); + is.setstate(std::ios_base::failbit); + } + return is; +} + +// Reads a HexFloat from the given stream. +// If the float is not encoded as a hex-float then it will be parsed +// as a regular float. +// This may fail if your stream does not support at least one unget. +// Nan values can be encoded with "0x1.p+exponent_bias". +// This would normally overflow a float and round to +// infinity but this special pattern is the exact representation for a NaN, +// and therefore is actually encoded as the correct NaN. To encode inf, +// either 0x0p+exponent_bias can be specified or any exponent greater than +// exponent_bias. +// Examples using IEEE 32-bit float encoding. +// 0x1.0p+128 (+inf) +// -0x1.0p-128 (-inf) +// +// 0x1.1p+128 (+Nan) +// -0x1.1p+128 (-Nan) +// +// 0x1p+129 (+inf) +// -0x1p+129 (-inf) +template +std::istream& operator>>(std::istream& is, HexFloat& value) { + using HF = HexFloat; + using uint_type = typename HF::uint_type; + using int_type = typename HF::int_type; + + value.set_value(static_cast(0.f)); + + if (is.flags() & std::ios::skipws) { + // If the user wants to skip whitespace , then we should obey that. + while (std::isspace(is.peek())) { + is.get(); + } + } + + auto next_char = is.peek(); + bool negate_value = false; + + if (next_char != '-' && next_char != '0') { + return ParseNormalFloat(is, negate_value, value); + } + + if (next_char == '-') { + negate_value = true; + is.get(); + next_char = is.peek(); + } + + if (next_char == '0') { + is.get(); // We may have to unget this. + auto maybe_hex_start = is.peek(); + if (maybe_hex_start != 'x' && maybe_hex_start != 'X') { + is.unget(); + return ParseNormalFloat(is, negate_value, value); + } else { + is.get(); // Throw away the 'x'; + } + } else { + return ParseNormalFloat(is, negate_value, value); + } + + // This "looks" like a hex-float so treat it as one. + bool seen_p = false; + bool seen_dot = false; + uint_type fraction_index = 0; + + uint_type fraction = 0; + int_type exponent = HF::exponent_bias; + + // Strip off leading zeros so we don't have to special-case them later. + while ((next_char = is.peek()) == '0') { + is.get(); + } + + bool is_denorm = + true; // Assume denorm "representation" until we hear otherwise. + // NB: This does not mean the value is actually denorm, + // it just means that it was written 0. + bool bits_written = false; // Stays false until we write a bit. + while (!seen_p && !seen_dot) { + // Handle characters that are left of the fractional part. + if (next_char == '.') { + seen_dot = true; + } else if (next_char == 'p') { + seen_p = true; + } else if (::isxdigit(next_char)) { + // We know this is not denormalized since we have stripped all leading + // zeroes and we are not a ".". + is_denorm = false; + int number = get_nibble_from_character(next_char); + for (int i = 0; i < 4; ++i, number <<= 1) { + uint_type write_bit = (number & 0x8) ? 0x1 : 0x0; + if (bits_written) { + // If we are here the bits represented belong in the fractional + // part of the float, and we have to adjust the exponent accordingly. + fraction = static_cast( + fraction | + static_cast( + write_bit << (HF::top_bit_left_shift - fraction_index++))); + exponent = static_cast(exponent + 1); + } + bits_written |= write_bit != 0; + } + } else { + // We have not found our exponent yet, so we have to fail. + is.setstate(std::ios::failbit); + return is; + } + is.get(); + next_char = is.peek(); + } + bits_written = false; + while (seen_dot && !seen_p) { + // Handle only fractional parts now. + if (next_char == 'p') { + seen_p = true; + } else if (::isxdigit(next_char)) { + int number = get_nibble_from_character(next_char); + for (int i = 0; i < 4; ++i, number <<= 1) { + uint_type write_bit = (number & 0x8) ? 0x01 : 0x00; + bits_written |= write_bit != 0; + if (is_denorm && !bits_written) { + // Handle modifying the exponent here this way we can handle + // an arbitrary number of hex values without overflowing our + // integer. + exponent = static_cast(exponent - 1); + } else { + fraction = static_cast( + fraction | + static_cast( + write_bit << (HF::top_bit_left_shift - fraction_index++))); + } + } + } else { + // We still have not found our 'p' exponent yet, so this is not a valid + // hex-float. + is.setstate(std::ios::failbit); + return is; + } + is.get(); + next_char = is.peek(); + } + + bool seen_sign = false; + int8_t exponent_sign = 1; + int_type written_exponent = 0; + while (true) { + if ((next_char == '-' || next_char == '+')) { + if (seen_sign) { + is.setstate(std::ios::failbit); + return is; + } + seen_sign = true; + exponent_sign = (next_char == '-') ? -1 : 1; + } else if (::isdigit(next_char)) { + // Hex-floats express their exponent as decimal. + written_exponent = static_cast(written_exponent * 10); + written_exponent = + static_cast(written_exponent + (next_char - '0')); + } else { + break; + } + is.get(); + next_char = is.peek(); + } + + written_exponent = static_cast(written_exponent * exponent_sign); + exponent = static_cast(exponent + written_exponent); + + bool is_zero = is_denorm && (fraction == 0); + if (is_denorm && !is_zero) { + fraction = static_cast(fraction << 1); + exponent = static_cast(exponent - 1); + } else if (is_zero) { + exponent = 0; + } + + if (exponent <= 0 && !is_zero) { + fraction = static_cast(fraction >> 1); + fraction |= static_cast(1) << HF::top_bit_left_shift; + } + + fraction = (fraction >> HF::fraction_right_shift) & HF::fraction_encode_mask; + + const int_type max_exponent = + SetBits::get; + + // Handle actual denorm numbers + while (exponent < 0 && !is_zero) { + fraction = static_cast(fraction >> 1); + exponent = static_cast(exponent + 1); + + fraction &= HF::fraction_encode_mask; + if (fraction == 0) { + // We have underflowed our fraction. We should clamp to zero. + is_zero = true; + exponent = 0; + } + } + + // We have overflowed so we should be inf/-inf. + if (exponent > max_exponent) { + exponent = max_exponent; + fraction = 0; + } + + uint_type output_bits = static_cast( + static_cast(negate_value ? 1 : 0) << HF::top_bit_left_shift); + output_bits |= fraction; + + uint_type shifted_exponent = static_cast( + static_cast(exponent << HF::exponent_left_shift) & + HF::exponent_mask); + output_bits |= shifted_exponent; + + T output_float = spvutils::BitwiseCast(output_bits); + value.set_value(output_float); + + return is; +} + +// Writes a FloatProxy value to a stream. +// Zero and normal numbers are printed in the usual notation, but with +// enough digits to fully reproduce the value. Other values (subnormal, +// NaN, and infinity) are printed as a hex float. +template +std::ostream& operator<<(std::ostream& os, const FloatProxy& value) { + auto float_val = value.getAsFloat(); + switch (std::fpclassify(float_val)) { + case FP_ZERO: + case FP_NORMAL: { + auto saved_precision = os.precision(); + os.precision(std::numeric_limits::digits10); + os << float_val; + os.precision(saved_precision); + } break; + default: + os << HexFloat>(value); + break; + } + return os; +} + +template <> +inline std::ostream& operator<<(std::ostream& os, + const FloatProxy& value) { + os << HexFloat>(value); + return os; +} +} + +#endif // LIBSPIRV_UTIL_HEX_FLOAT_H_ diff --git a/src/third_party/glslang/SPIRV/spirv.hpp b/src/third_party/glslang/SPIRV/spirv.hpp new file mode 100644 index 0000000..35482ea --- /dev/null +++ b/src/third_party/glslang/SPIRV/spirv.hpp @@ -0,0 +1,2171 @@ +// Copyright (c) 2014-2020 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python, C#, D +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// - C# will use enum classes in the Specification class located in the "Spv" namespace, +// e.g.: Spv.Specification.SourceLanguage.GLSL +// - D will have tokens under the "spv" module, e.g: spv.SourceLanguage.GLSL +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10500 +#define SPV_REVISION 3 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010500; +static const unsigned int Revision = 3; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelTaskNV = 5267, + ExecutionModelMeshNV = 5268, + ExecutionModelRayGenerationKHR = 5313, + ExecutionModelRayGenerationNV = 5313, + ExecutionModelIntersectionKHR = 5314, + ExecutionModelIntersectionNV = 5314, + ExecutionModelAnyHitKHR = 5315, + ExecutionModelAnyHitNV = 5315, + ExecutionModelClosestHitKHR = 5316, + ExecutionModelClosestHitNV = 5316, + ExecutionModelMissKHR = 5317, + ExecutionModelMissNV = 5317, + ExecutionModelCallableKHR = 5318, + ExecutionModelCallableNV = 5318, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64 = 5348, + AddressingModelPhysicalStorageBuffer64EXT = 5348, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelVulkan = 3, + MemoryModelVulkanKHR = 3, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeDenormPreserve = 4459, + ExecutionModeDenormFlushToZero = 4460, + ExecutionModeSignedZeroInfNanPreserve = 4461, + ExecutionModeRoundingModeRTE = 4462, + ExecutionModeRoundingModeRTZ = 4463, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeOutputLinesNV = 5269, + ExecutionModeOutputPrimitivesNV = 5270, + ExecutionModeDerivativeGroupQuadsNV = 5289, + ExecutionModeDerivativeGroupLinearNV = 5290, + ExecutionModeOutputTrianglesNV = 5298, + ExecutionModePixelInterlockOrderedEXT = 5366, + ExecutionModePixelInterlockUnorderedEXT = 5367, + ExecutionModeSampleInterlockOrderedEXT = 5368, + ExecutionModeSampleInterlockUnorderedEXT = 5369, + ExecutionModeShadingRateInterlockOrderedEXT = 5370, + ExecutionModeShadingRateInterlockUnorderedEXT = 5371, + ExecutionModeMaxWorkgroupSizeINTEL = 5893, + ExecutionModeMaxWorkDimINTEL = 5894, + ExecutionModeNoGlobalOffsetINTEL = 5895, + ExecutionModeNumSIMDWorkitemsINTEL = 5896, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassCallableDataKHR = 5328, + StorageClassCallableDataNV = 5328, + StorageClassIncomingCallableDataKHR = 5329, + StorageClassIncomingCallableDataNV = 5329, + StorageClassRayPayloadKHR = 5338, + StorageClassRayPayloadNV = 5338, + StorageClassHitAttributeKHR = 5339, + StorageClassHitAttributeNV = 5339, + StorageClassIncomingRayPayloadKHR = 5342, + StorageClassIncomingRayPayloadNV = 5342, + StorageClassShaderRecordBufferKHR = 5343, + StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBuffer = 5349, + StorageClassPhysicalStorageBufferEXT = 5349, + StorageClassCodeSectionINTEL = 5605, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMakeTexelAvailableShift = 8, + ImageOperandsMakeTexelAvailableKHRShift = 8, + ImageOperandsMakeTexelVisibleShift = 9, + ImageOperandsMakeTexelVisibleKHRShift = 9, + ImageOperandsNonPrivateTexelShift = 10, + ImageOperandsNonPrivateTexelKHRShift = 10, + ImageOperandsVolatileTexelShift = 11, + ImageOperandsVolatileTexelKHRShift = 11, + ImageOperandsSignExtendShift = 12, + ImageOperandsZeroExtendShift = 13, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, + ImageOperandsMakeTexelAvailableMask = 0x00000100, + ImageOperandsMakeTexelAvailableKHRMask = 0x00000100, + ImageOperandsMakeTexelVisibleMask = 0x00000200, + ImageOperandsMakeTexelVisibleKHRMask = 0x00000200, + ImageOperandsNonPrivateTexelMask = 0x00000400, + ImageOperandsNonPrivateTexelKHRMask = 0x00000400, + ImageOperandsVolatileTexelMask = 0x00000800, + ImageOperandsVolatileTexelKHRMask = 0x00000800, + ImageOperandsSignExtendMask = 0x00001000, + ImageOperandsZeroExtendMask = 0x00002000, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationUniformId = 27, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationNoSignedWrap = 4469, + DecorationNoUnsignedWrap = 4470, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationPerPrimitiveNV = 5271, + DecorationPerViewNV = 5272, + DecorationPerTaskNV = 5273, + DecorationPerVertexNV = 5285, + DecorationNonUniform = 5300, + DecorationNonUniformEXT = 5300, + DecorationRestrictPointer = 5355, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointer = 5356, + DecorationAliasedPointerEXT = 5356, + DecorationReferencedIndirectlyINTEL = 5602, + DecorationCounterBuffer = 5634, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationUserSemantic = 5635, + DecorationUserTypeGOOGLE = 5636, + DecorationRegisterINTEL = 5825, + DecorationMemoryINTEL = 5826, + DecorationNumbanksINTEL = 5827, + DecorationBankwidthINTEL = 5828, + DecorationMaxPrivateCopiesINTEL = 5829, + DecorationSinglepumpINTEL = 5830, + DecorationDoublepumpINTEL = 5831, + DecorationMaxReplicatesINTEL = 5832, + DecorationSimpleDualPortINTEL = 5833, + DecorationMergeINTEL = 5834, + DecorationBankBitsINTEL = 5835, + DecorationForcePow2DepthINTEL = 5836, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInTaskCountNV = 5274, + BuiltInPrimitiveCountNV = 5275, + BuiltInPrimitiveIndicesNV = 5276, + BuiltInClipDistancePerViewNV = 5277, + BuiltInCullDistancePerViewNV = 5278, + BuiltInLayerPerViewNV = 5279, + BuiltInMeshViewCountNV = 5280, + BuiltInMeshViewIndicesNV = 5281, + BuiltInBaryCoordNV = 5286, + BuiltInBaryCoordNoPerspNV = 5287, + BuiltInFragSizeEXT = 5292, + BuiltInFragmentSizeNV = 5292, + BuiltInFragInvocationCountEXT = 5293, + BuiltInInvocationsPerPixelNV = 5293, + BuiltInLaunchIdKHR = 5319, + BuiltInLaunchIdNV = 5319, + BuiltInLaunchSizeKHR = 5320, + BuiltInLaunchSizeNV = 5320, + BuiltInWorldRayOriginKHR = 5321, + BuiltInWorldRayOriginNV = 5321, + BuiltInWorldRayDirectionKHR = 5322, + BuiltInWorldRayDirectionNV = 5322, + BuiltInObjectRayOriginKHR = 5323, + BuiltInObjectRayOriginNV = 5323, + BuiltInObjectRayDirectionKHR = 5324, + BuiltInObjectRayDirectionNV = 5324, + BuiltInRayTminKHR = 5325, + BuiltInRayTminNV = 5325, + BuiltInRayTmaxKHR = 5326, + BuiltInRayTmaxNV = 5326, + BuiltInInstanceCustomIndexKHR = 5327, + BuiltInInstanceCustomIndexNV = 5327, + BuiltInObjectToWorldKHR = 5330, + BuiltInObjectToWorldNV = 5330, + BuiltInWorldToObjectKHR = 5331, + BuiltInWorldToObjectNV = 5331, + BuiltInHitTKHR = 5332, + BuiltInHitTNV = 5332, + BuiltInHitKindKHR = 5333, + BuiltInHitKindNV = 5333, + BuiltInIncomingRayFlagsKHR = 5351, + BuiltInIncomingRayFlagsNV = 5351, + BuiltInRayGeometryIndexKHR = 5352, + BuiltInWarpsPerSMNV = 5374, + BuiltInSMCountNV = 5375, + BuiltInWarpIDNV = 5376, + BuiltInSMIDNV = 5377, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMinIterationsShift = 4, + LoopControlMaxIterationsShift = 5, + LoopControlIterationMultipleShift = 6, + LoopControlPeelCountShift = 7, + LoopControlPartialCountShift = 8, + LoopControlInitiationIntervalINTELShift = 16, + LoopControlMaxConcurrencyINTELShift = 17, + LoopControlDependencyArrayINTELShift = 18, + LoopControlPipelineEnableINTELShift = 19, + LoopControlLoopCoalesceINTELShift = 20, + LoopControlMaxInterleavingINTELShift = 21, + LoopControlSpeculatedIterationsINTELShift = 22, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, + LoopControlMinIterationsMask = 0x00000010, + LoopControlMaxIterationsMask = 0x00000020, + LoopControlIterationMultipleMask = 0x00000040, + LoopControlPeelCountMask = 0x00000080, + LoopControlPartialCountMask = 0x00000100, + LoopControlInitiationIntervalINTELMask = 0x00010000, + LoopControlMaxConcurrencyINTELMask = 0x00020000, + LoopControlDependencyArrayINTELMask = 0x00040000, + LoopControlPipelineEnableINTELMask = 0x00080000, + LoopControlLoopCoalesceINTELMask = 0x00100000, + LoopControlMaxInterleavingINTELMask = 0x00200000, + LoopControlSpeculatedIterationsINTELMask = 0x00400000, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsOutputMemoryShift = 12, + MemorySemanticsOutputMemoryKHRShift = 12, + MemorySemanticsMakeAvailableShift = 13, + MemorySemanticsMakeAvailableKHRShift = 13, + MemorySemanticsMakeVisibleShift = 14, + MemorySemanticsMakeVisibleKHRShift = 14, + MemorySemanticsVolatileShift = 15, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, + MemorySemanticsOutputMemoryMask = 0x00001000, + MemorySemanticsOutputMemoryKHRMask = 0x00001000, + MemorySemanticsMakeAvailableMask = 0x00002000, + MemorySemanticsMakeAvailableKHRMask = 0x00002000, + MemorySemanticsMakeVisibleMask = 0x00004000, + MemorySemanticsMakeVisibleKHRMask = 0x00004000, + MemorySemanticsVolatileMask = 0x00008000, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMakePointerAvailableShift = 3, + MemoryAccessMakePointerAvailableKHRShift = 3, + MemoryAccessMakePointerVisibleShift = 4, + MemoryAccessMakePointerVisibleKHRShift = 4, + MemoryAccessNonPrivatePointerShift = 5, + MemoryAccessNonPrivatePointerKHRShift = 5, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, + MemoryAccessMakePointerAvailableMask = 0x00000008, + MemoryAccessMakePointerAvailableKHRMask = 0x00000008, + MemoryAccessMakePointerVisibleMask = 0x00000010, + MemoryAccessMakePointerVisibleKHRMask = 0x00000010, + MemoryAccessNonPrivatePointerMask = 0x00000020, + MemoryAccessNonPrivatePointerKHRMask = 0x00000020, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeQueueFamily = 5, + ScopeQueueFamilyKHR = 5, + ScopeShaderCallKHR = 6, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationPartitionedReduceNV = 6, + GroupOperationPartitionedInclusiveScanNV = 7, + GroupOperationPartitionedExclusiveScanNV = 8, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilityShaderLayer = 69, + CapabilityShaderViewportIndex = 70, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityStorageBuffer8BitAccess = 4448, + CapabilityUniformAndStorageBuffer8BitAccess = 4449, + CapabilityStoragePushConstant8 = 4450, + CapabilityDenormPreserve = 4464, + CapabilityDenormFlushToZero = 4465, + CapabilitySignedZeroInfNanPreserve = 4466, + CapabilityRoundingModeRTE = 4467, + CapabilityRoundingModeRTZ = 4468, + CapabilityRayQueryProvisionalKHR = 4471, + CapabilityRayTraversalPrimitiveCullingProvisionalKHR = 4478, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilityShaderClockKHR = 5055, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilityMeshShadingNV = 5266, + CapabilityImageFootprintNV = 5282, + CapabilityFragmentBarycentricNV = 5284, + CapabilityComputeDerivativeGroupQuadsNV = 5288, + CapabilityFragmentDensityEXT = 5291, + CapabilityShadingRateNV = 5291, + CapabilityGroupNonUniformPartitionedNV = 5297, + CapabilityShaderNonUniform = 5301, + CapabilityShaderNonUniformEXT = 5301, + CapabilityRuntimeDescriptorArray = 5302, + CapabilityRuntimeDescriptorArrayEXT = 5302, + CapabilityInputAttachmentArrayDynamicIndexing = 5303, + CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303, + CapabilityUniformTexelBufferArrayDynamicIndexing = 5304, + CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304, + CapabilityStorageTexelBufferArrayDynamicIndexing = 5305, + CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305, + CapabilityUniformBufferArrayNonUniformIndexing = 5306, + CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306, + CapabilitySampledImageArrayNonUniformIndexing = 5307, + CapabilitySampledImageArrayNonUniformIndexingEXT = 5307, + CapabilityStorageBufferArrayNonUniformIndexing = 5308, + CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308, + CapabilityStorageImageArrayNonUniformIndexing = 5309, + CapabilityStorageImageArrayNonUniformIndexingEXT = 5309, + CapabilityInputAttachmentArrayNonUniformIndexing = 5310, + CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310, + CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311, + CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311, + CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312, + CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312, + CapabilityRayTracingNV = 5340, + CapabilityVulkanMemoryModel = 5345, + CapabilityVulkanMemoryModelKHR = 5345, + CapabilityVulkanMemoryModelDeviceScope = 5346, + CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddresses = 5347, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, + CapabilityComputeDerivativeGroupLinearNV = 5350, + CapabilityRayTracingProvisionalKHR = 5353, + CapabilityCooperativeMatrixNV = 5357, + CapabilityFragmentShaderSampleInterlockEXT = 5363, + CapabilityFragmentShaderShadingRateInterlockEXT = 5372, + CapabilityShaderSMBuiltinsNV = 5373, + CapabilityFragmentShaderPixelInterlockEXT = 5378, + CapabilityDemoteToHelperInvocationEXT = 5379, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilitySubgroupImageMediaBlockIOINTEL = 5579, + CapabilityIntegerFunctions2INTEL = 5584, + CapabilityFunctionPointersINTEL = 5603, + CapabilityIndirectReferencesINTEL = 5604, + CapabilitySubgroupAvcMotionEstimationINTEL = 5696, + CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697, + CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698, + CapabilityFPGAMemoryAttributesINTEL = 5824, + CapabilityUnstructuredLoopControlsINTEL = 5886, + CapabilityFPGALoopControlsINTEL = 5888, + CapabilityKernelAttributesINTEL = 5892, + CapabilityFPGAKernelAttributesINTEL = 5897, + CapabilityBlockingPipesINTEL = 5945, + CapabilityFPGARegINTEL = 5948, + CapabilityAtomicFloat32AddEXT = 6033, + CapabilityAtomicFloat64AddEXT = 6034, + CapabilityMax = 0x7fffffff, +}; + +enum RayFlagsShift { + RayFlagsOpaqueKHRShift = 0, + RayFlagsNoOpaqueKHRShift = 1, + RayFlagsTerminateOnFirstHitKHRShift = 2, + RayFlagsSkipClosestHitShaderKHRShift = 3, + RayFlagsCullBackFacingTrianglesKHRShift = 4, + RayFlagsCullFrontFacingTrianglesKHRShift = 5, + RayFlagsCullOpaqueKHRShift = 6, + RayFlagsCullNoOpaqueKHRShift = 7, + RayFlagsSkipTrianglesKHRShift = 8, + RayFlagsSkipAABBsKHRShift = 9, + RayFlagsMax = 0x7fffffff, +}; + +enum RayFlagsMask { + RayFlagsMaskNone = 0, + RayFlagsOpaqueKHRMask = 0x00000001, + RayFlagsNoOpaqueKHRMask = 0x00000002, + RayFlagsTerminateOnFirstHitKHRMask = 0x00000004, + RayFlagsSkipClosestHitShaderKHRMask = 0x00000008, + RayFlagsCullBackFacingTrianglesKHRMask = 0x00000010, + RayFlagsCullFrontFacingTrianglesKHRMask = 0x00000020, + RayFlagsCullOpaqueKHRMask = 0x00000040, + RayFlagsCullNoOpaqueKHRMask = 0x00000080, + RayFlagsSkipTrianglesKHRMask = 0x00000100, + RayFlagsSkipAABBsKHRMask = 0x00000200, +}; + +enum RayQueryIntersection { + RayQueryIntersectionRayQueryCandidateIntersectionKHR = 0, + RayQueryIntersectionRayQueryCommittedIntersectionKHR = 1, + RayQueryIntersectionMax = 0x7fffffff, +}; + +enum RayQueryCommittedIntersectionType { + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionNoneKHR = 0, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionTriangleKHR = 1, + RayQueryCommittedIntersectionTypeRayQueryCommittedIntersectionGeneratedKHR = 2, + RayQueryCommittedIntersectionTypeMax = 0x7fffffff, +}; + +enum RayQueryCandidateIntersectionType { + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionTriangleKHR = 0, + RayQueryCandidateIntersectionTypeRayQueryCandidateIntersectionAABBKHR = 1, + RayQueryCandidateIntersectionTypeMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpCopyLogical = 400, + OpPtrEqual = 401, + OpPtrNotEqual = 402, + OpPtrDiff = 403, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpTypeRayQueryProvisionalKHR = 4472, + OpRayQueryInitializeKHR = 4473, + OpRayQueryTerminateKHR = 4474, + OpRayQueryGenerateIntersectionKHR = 4475, + OpRayQueryConfirmIntersectionKHR = 4476, + OpRayQueryProceedKHR = 4477, + OpRayQueryGetIntersectionTypeKHR = 4479, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpReadClockKHR = 5056, + OpImageSampleFootprintNV = 5283, + OpGroupNonUniformPartitionNV = 5296, + OpWritePackedPrimitiveIndices4x8NV = 5299, + OpReportIntersectionKHR = 5334, + OpReportIntersectionNV = 5334, + OpIgnoreIntersectionKHR = 5335, + OpIgnoreIntersectionNV = 5335, + OpTerminateRayKHR = 5336, + OpTerminateRayNV = 5336, + OpTraceNV = 5337, + OpTraceRayKHR = 5337, + OpTypeAccelerationStructureKHR = 5341, + OpTypeAccelerationStructureNV = 5341, + OpExecuteCallableKHR = 5344, + OpExecuteCallableNV = 5344, + OpTypeCooperativeMatrixNV = 5358, + OpCooperativeMatrixLoadNV = 5359, + OpCooperativeMatrixStoreNV = 5360, + OpCooperativeMatrixMulAddNV = 5361, + OpCooperativeMatrixLengthNV = 5362, + OpBeginInvocationInterlockEXT = 5364, + OpEndInvocationInterlockEXT = 5365, + OpDemoteToHelperInvocationEXT = 5380, + OpIsHelperInvocationEXT = 5381, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpSubgroupImageMediaBlockReadINTEL = 5580, + OpSubgroupImageMediaBlockWriteINTEL = 5581, + OpUCountLeadingZerosINTEL = 5585, + OpUCountTrailingZerosINTEL = 5586, + OpAbsISubINTEL = 5587, + OpAbsUSubINTEL = 5588, + OpIAddSatINTEL = 5589, + OpUAddSatINTEL = 5590, + OpIAverageINTEL = 5591, + OpUAverageINTEL = 5592, + OpIAverageRoundedINTEL = 5593, + OpUAverageRoundedINTEL = 5594, + OpISubSatINTEL = 5595, + OpUSubSatINTEL = 5596, + OpIMul32x16INTEL = 5597, + OpUMul32x16INTEL = 5598, + OpFunctionPointerINTEL = 5600, + OpFunctionPointerCallINTEL = 5601, + OpDecorateString = 5632, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateString = 5633, + OpMemberDecorateStringGOOGLE = 5633, + OpVmeImageINTEL = 5699, + OpTypeVmeImageINTEL = 5700, + OpTypeAvcImePayloadINTEL = 5701, + OpTypeAvcRefPayloadINTEL = 5702, + OpTypeAvcSicPayloadINTEL = 5703, + OpTypeAvcMcePayloadINTEL = 5704, + OpTypeAvcMceResultINTEL = 5705, + OpTypeAvcImeResultINTEL = 5706, + OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707, + OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708, + OpTypeAvcImeSingleReferenceStreaminINTEL = 5709, + OpTypeAvcImeDualReferenceStreaminINTEL = 5710, + OpTypeAvcRefResultINTEL = 5711, + OpTypeAvcSicResultINTEL = 5712, + OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713, + OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714, + OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715, + OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716, + OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717, + OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718, + OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719, + OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720, + OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721, + OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722, + OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723, + OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724, + OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725, + OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726, + OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727, + OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728, + OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729, + OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730, + OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731, + OpSubgroupAvcMceConvertToImePayloadINTEL = 5732, + OpSubgroupAvcMceConvertToImeResultINTEL = 5733, + OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734, + OpSubgroupAvcMceConvertToRefResultINTEL = 5735, + OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736, + OpSubgroupAvcMceConvertToSicResultINTEL = 5737, + OpSubgroupAvcMceGetMotionVectorsINTEL = 5738, + OpSubgroupAvcMceGetInterDistortionsINTEL = 5739, + OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740, + OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741, + OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742, + OpSubgroupAvcMceGetInterDirectionsINTEL = 5743, + OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744, + OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745, + OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746, + OpSubgroupAvcImeInitializeINTEL = 5747, + OpSubgroupAvcImeSetSingleReferenceINTEL = 5748, + OpSubgroupAvcImeSetDualReferenceINTEL = 5749, + OpSubgroupAvcImeRefWindowSizeINTEL = 5750, + OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751, + OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752, + OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753, + OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754, + OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755, + OpSubgroupAvcImeSetWeightedSadINTEL = 5756, + OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757, + OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761, + OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762, + OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763, + OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764, + OpSubgroupAvcImeConvertToMceResultINTEL = 5765, + OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766, + OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767, + OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768, + OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771, + OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774, + OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775, + OpSubgroupAvcImeGetBorderReachedINTEL = 5776, + OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777, + OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778, + OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779, + OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780, + OpSubgroupAvcFmeInitializeINTEL = 5781, + OpSubgroupAvcBmeInitializeINTEL = 5782, + OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783, + OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784, + OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785, + OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786, + OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787, + OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788, + OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789, + OpSubgroupAvcRefConvertToMceResultINTEL = 5790, + OpSubgroupAvcSicInitializeINTEL = 5791, + OpSubgroupAvcSicConfigureSkcINTEL = 5792, + OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793, + OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794, + OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795, + OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796, + OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797, + OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798, + OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799, + OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800, + OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801, + OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802, + OpSubgroupAvcSicEvaluateIpeINTEL = 5803, + OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804, + OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805, + OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806, + OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807, + OpSubgroupAvcSicConvertToMceResultINTEL = 5808, + OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809, + OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810, + OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811, + OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812, + OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813, + OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814, + OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815, + OpSubgroupAvcSicGetInterRawSadsINTEL = 5816, + OpLoopControlINTEL = 5887, + OpReadPipeBlockingINTEL = 5946, + OpWritePipeBlockingINTEL = 5947, + OpFPGARegINTEL = 5949, + OpRayQueryGetRayTMinKHR = 6016, + OpRayQueryGetRayFlagsKHR = 6017, + OpRayQueryGetIntersectionTKHR = 6018, + OpRayQueryGetIntersectionInstanceCustomIndexKHR = 6019, + OpRayQueryGetIntersectionInstanceIdKHR = 6020, + OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR = 6021, + OpRayQueryGetIntersectionGeometryIndexKHR = 6022, + OpRayQueryGetIntersectionPrimitiveIndexKHR = 6023, + OpRayQueryGetIntersectionBarycentricsKHR = 6024, + OpRayQueryGetIntersectionFrontFaceKHR = 6025, + OpRayQueryGetIntersectionCandidateAABBOpaqueKHR = 6026, + OpRayQueryGetIntersectionObjectRayDirectionKHR = 6027, + OpRayQueryGetIntersectionObjectRayOriginKHR = 6028, + OpRayQueryGetWorldRayDirectionKHR = 6029, + OpRayQueryGetWorldRayOriginKHR = 6030, + OpRayQueryGetIntersectionObjectToWorldKHR = 6031, + OpRayQueryGetIntersectionWorldToObjectKHR = 6032, + OpAtomicFAddEXT = 6035, + OpMax = 0x7fffffff, +}; + +#ifdef SPV_ENABLE_UTILITY_CODE +inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) { + *hasResult = *hasResultType = false; + switch (opcode) { + default: /* unknown opcode */ break; + case OpNop: *hasResult = false; *hasResultType = false; break; + case OpUndef: *hasResult = true; *hasResultType = true; break; + case OpSourceContinued: *hasResult = false; *hasResultType = false; break; + case OpSource: *hasResult = false; *hasResultType = false; break; + case OpSourceExtension: *hasResult = false; *hasResultType = false; break; + case OpName: *hasResult = false; *hasResultType = false; break; + case OpMemberName: *hasResult = false; *hasResultType = false; break; + case OpString: *hasResult = true; *hasResultType = false; break; + case OpLine: *hasResult = false; *hasResultType = false; break; + case OpExtension: *hasResult = false; *hasResultType = false; break; + case OpExtInstImport: *hasResult = true; *hasResultType = false; break; + case OpExtInst: *hasResult = true; *hasResultType = true; break; + case OpMemoryModel: *hasResult = false; *hasResultType = false; break; + case OpEntryPoint: *hasResult = false; *hasResultType = false; break; + case OpExecutionMode: *hasResult = false; *hasResultType = false; break; + case OpCapability: *hasResult = false; *hasResultType = false; break; + case OpTypeVoid: *hasResult = true; *hasResultType = false; break; + case OpTypeBool: *hasResult = true; *hasResultType = false; break; + case OpTypeInt: *hasResult = true; *hasResultType = false; break; + case OpTypeFloat: *hasResult = true; *hasResultType = false; break; + case OpTypeVector: *hasResult = true; *hasResultType = false; break; + case OpTypeMatrix: *hasResult = true; *hasResultType = false; break; + case OpTypeImage: *hasResult = true; *hasResultType = false; break; + case OpTypeSampler: *hasResult = true; *hasResultType = false; break; + case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break; + case OpTypeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break; + case OpTypeStruct: *hasResult = true; *hasResultType = false; break; + case OpTypeOpaque: *hasResult = true; *hasResultType = false; break; + case OpTypePointer: *hasResult = true; *hasResultType = false; break; + case OpTypeFunction: *hasResult = true; *hasResultType = false; break; + case OpTypeEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break; + case OpTypeReserveId: *hasResult = true; *hasResultType = false; break; + case OpTypeQueue: *hasResult = true; *hasResultType = false; break; + case OpTypePipe: *hasResult = true; *hasResultType = false; break; + case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break; + case OpConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpConstant: *hasResult = true; *hasResultType = true; break; + case OpConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpConstantSampler: *hasResult = true; *hasResultType = true; break; + case OpConstantNull: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break; + case OpSpecConstant: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break; + case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break; + case OpFunction: *hasResult = true; *hasResultType = true; break; + case OpFunctionParameter: *hasResult = true; *hasResultType = true; break; + case OpFunctionEnd: *hasResult = false; *hasResultType = false; break; + case OpFunctionCall: *hasResult = true; *hasResultType = true; break; + case OpVariable: *hasResult = true; *hasResultType = true; break; + case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break; + case OpLoad: *hasResult = true; *hasResultType = true; break; + case OpStore: *hasResult = false; *hasResultType = false; break; + case OpCopyMemory: *hasResult = false; *hasResultType = false; break; + case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break; + case OpAccessChain: *hasResult = true; *hasResultType = true; break; + case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break; + case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpArrayLength: *hasResult = true; *hasResultType = true; break; + case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break; + case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break; + case OpDecorate: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpDecorationGroup: *hasResult = true; *hasResultType = false; break; + case OpGroupDecorate: *hasResult = false; *hasResultType = false; break; + case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break; + case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break; + case OpVectorShuffle: *hasResult = true; *hasResultType = true; break; + case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break; + case OpCompositeExtract: *hasResult = true; *hasResultType = true; break; + case OpCompositeInsert: *hasResult = true; *hasResultType = true; break; + case OpCopyObject: *hasResult = true; *hasResultType = true; break; + case OpTranspose: *hasResult = true; *hasResultType = true; break; + case OpSampledImage: *hasResult = true; *hasResultType = true; break; + case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageFetch: *hasResult = true; *hasResultType = true; break; + case OpImageGather: *hasResult = true; *hasResultType = true; break; + case OpImageDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageRead: *hasResult = true; *hasResultType = true; break; + case OpImageWrite: *hasResult = false; *hasResultType = false; break; + case OpImage: *hasResult = true; *hasResultType = true; break; + case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break; + case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySize: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLod: *hasResult = true; *hasResultType = true; break; + case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break; + case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break; + case OpConvertFToU: *hasResult = true; *hasResultType = true; break; + case OpConvertFToS: *hasResult = true; *hasResultType = true; break; + case OpConvertSToF: *hasResult = true; *hasResultType = true; break; + case OpConvertUToF: *hasResult = true; *hasResultType = true; break; + case OpUConvert: *hasResult = true; *hasResultType = true; break; + case OpSConvert: *hasResult = true; *hasResultType = true; break; + case OpFConvert: *hasResult = true; *hasResultType = true; break; + case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break; + case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break; + case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break; + case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break; + case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break; + case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break; + case OpBitcast: *hasResult = true; *hasResultType = true; break; + case OpSNegate: *hasResult = true; *hasResultType = true; break; + case OpFNegate: *hasResult = true; *hasResultType = true; break; + case OpIAdd: *hasResult = true; *hasResultType = true; break; + case OpFAdd: *hasResult = true; *hasResultType = true; break; + case OpISub: *hasResult = true; *hasResultType = true; break; + case OpFSub: *hasResult = true; *hasResultType = true; break; + case OpIMul: *hasResult = true; *hasResultType = true; break; + case OpFMul: *hasResult = true; *hasResultType = true; break; + case OpUDiv: *hasResult = true; *hasResultType = true; break; + case OpSDiv: *hasResult = true; *hasResultType = true; break; + case OpFDiv: *hasResult = true; *hasResultType = true; break; + case OpUMod: *hasResult = true; *hasResultType = true; break; + case OpSRem: *hasResult = true; *hasResultType = true; break; + case OpSMod: *hasResult = true; *hasResultType = true; break; + case OpFRem: *hasResult = true; *hasResultType = true; break; + case OpFMod: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break; + case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break; + case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break; + case OpOuterProduct: *hasResult = true; *hasResultType = true; break; + case OpDot: *hasResult = true; *hasResultType = true; break; + case OpIAddCarry: *hasResult = true; *hasResultType = true; break; + case OpISubBorrow: *hasResult = true; *hasResultType = true; break; + case OpUMulExtended: *hasResult = true; *hasResultType = true; break; + case OpSMulExtended: *hasResult = true; *hasResultType = true; break; + case OpAny: *hasResult = true; *hasResultType = true; break; + case OpAll: *hasResult = true; *hasResultType = true; break; + case OpIsNan: *hasResult = true; *hasResultType = true; break; + case OpIsInf: *hasResult = true; *hasResultType = true; break; + case OpIsFinite: *hasResult = true; *hasResultType = true; break; + case OpIsNormal: *hasResult = true; *hasResultType = true; break; + case OpSignBitSet: *hasResult = true; *hasResultType = true; break; + case OpLessOrGreater: *hasResult = true; *hasResultType = true; break; + case OpOrdered: *hasResult = true; *hasResultType = true; break; + case OpUnordered: *hasResult = true; *hasResultType = true; break; + case OpLogicalEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break; + case OpLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpLogicalNot: *hasResult = true; *hasResultType = true; break; + case OpSelect: *hasResult = true; *hasResultType = true; break; + case OpIEqual: *hasResult = true; *hasResultType = true; break; + case OpINotEqual: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpULessThan: *hasResult = true; *hasResultType = true; break; + case OpSLessThan: *hasResult = true; *hasResultType = true; break; + case OpULessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break; + case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break; + case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break; + case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break; + case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break; + case OpBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpNot: *hasResult = true; *hasResultType = true; break; + case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break; + case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break; + case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break; + case OpBitReverse: *hasResult = true; *hasResultType = true; break; + case OpBitCount: *hasResult = true; *hasResultType = true; break; + case OpDPdx: *hasResult = true; *hasResultType = true; break; + case OpDPdy: *hasResult = true; *hasResultType = true; break; + case OpFwidth: *hasResult = true; *hasResultType = true; break; + case OpDPdxFine: *hasResult = true; *hasResultType = true; break; + case OpDPdyFine: *hasResult = true; *hasResultType = true; break; + case OpFwidthFine: *hasResult = true; *hasResultType = true; break; + case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break; + case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break; + case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break; + case OpEmitVertex: *hasResult = false; *hasResultType = false; break; + case OpEndPrimitive: *hasResult = false; *hasResultType = false; break; + case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break; + case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break; + case OpControlBarrier: *hasResult = false; *hasResultType = false; break; + case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break; + case OpAtomicLoad: *hasResult = true; *hasResultType = true; break; + case OpAtomicStore: *hasResult = false; *hasResultType = false; break; + case OpAtomicExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break; + case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break; + case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break; + case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break; + case OpAtomicISub: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMin: *hasResult = true; *hasResultType = true; break; + case OpAtomicSMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicUMax: *hasResult = true; *hasResultType = true; break; + case OpAtomicAnd: *hasResult = true; *hasResultType = true; break; + case OpAtomicOr: *hasResult = true; *hasResultType = true; break; + case OpAtomicXor: *hasResult = true; *hasResultType = true; break; + case OpPhi: *hasResult = true; *hasResultType = true; break; + case OpLoopMerge: *hasResult = false; *hasResultType = false; break; + case OpSelectionMerge: *hasResult = false; *hasResultType = false; break; + case OpLabel: *hasResult = true; *hasResultType = false; break; + case OpBranch: *hasResult = false; *hasResultType = false; break; + case OpBranchConditional: *hasResult = false; *hasResultType = false; break; + case OpSwitch: *hasResult = false; *hasResultType = false; break; + case OpKill: *hasResult = false; *hasResultType = false; break; + case OpReturn: *hasResult = false; *hasResultType = false; break; + case OpReturnValue: *hasResult = false; *hasResultType = false; break; + case OpUnreachable: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStart: *hasResult = false; *hasResultType = false; break; + case OpLifetimeStop: *hasResult = false; *hasResultType = false; break; + case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break; + case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break; + case OpGroupAll: *hasResult = true; *hasResultType = true; break; + case OpGroupAny: *hasResult = true; *hasResultType = true; break; + case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupSMax: *hasResult = true; *hasResultType = true; break; + case OpReadPipe: *hasResult = true; *hasResultType = true; break; + case OpWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break; + case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break; + case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break; + case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break; + case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break; + case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break; + case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break; + case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break; + case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break; + case OpRetainEvent: *hasResult = false; *hasResultType = false; break; + case OpReleaseEvent: *hasResult = false; *hasResultType = false; break; + case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break; + case OpIsValidEvent: *hasResult = true; *hasResultType = true; break; + case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break; + case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break; + case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break; + case OpBuildNDRange: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break; + case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break; + case OpImageSparseGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break; + case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break; + case OpNoLine: *hasResult = false; *hasResultType = false; break; + case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break; + case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break; + case OpImageSparseRead: *hasResult = true; *hasResultType = true; break; + case OpSizeOf: *hasResult = true; *hasResultType = true; break; + case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break; + case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break; + case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break; + case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break; + case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break; + case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break; + case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break; + case OpModuleProcessed: *hasResult = false; *hasResultType = false; break; + case OpExecutionModeId: *hasResult = false; *hasResultType = false; break; + case OpDecorateId: *hasResult = false; *hasResultType = false; break; + case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break; + case OpCopyLogical: *hasResult = true; *hasResultType = true; break; + case OpPtrEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break; + case OpPtrDiff: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break; + case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break; + case OpTypeRayQueryProvisionalKHR: *hasResult = true; *hasResultType = false; break; + case OpRayQueryInitializeKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryTerminateKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryGenerateIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryConfirmIntersectionKHR: *hasResult = false; *hasResultType = false; break; + case OpRayQueryProceedKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTypeKHR: *hasResult = true; *hasResultType = true; break; + case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break; + case OpReadClockKHR: *hasResult = true; *hasResultType = true; break; + case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break; + case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break; + case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break; + case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break; + case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break; + case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break; + case OpTraceNV: *hasResult = false; *hasResultType = false; break; + case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break; + case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break; + case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break; + case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break; + case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break; + case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break; + case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break; + case OpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break; + case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break; + case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break; + case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break; + case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break; + case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break; + case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break; + case OpFunctionPointerINTEL: *hasResult = true; *hasResultType = true; break; + case OpFunctionPointerCallINTEL: *hasResult = true; *hasResultType = true; break; + case OpDecorateString: *hasResult = false; *hasResultType = false; break; + case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break; + case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break; + case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break; + case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break; + case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break; + case OpLoopControlINTEL: *hasResult = false; *hasResultType = false; break; + case OpReadPipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case OpWritePipeBlockingINTEL: *hasResult = true; *hasResultType = true; break; + case OpFPGARegINTEL: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayTMinKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetRayFlagsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionTKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceCustomIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceIdKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionGeometryIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionPrimitiveIndexKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionBarycentricsKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionFrontFaceKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionCandidateAABBOpaqueKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayDirectionKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetWorldRayOriginKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionObjectToWorldKHR: *hasResult = true; *hasResultType = true; break; + case OpRayQueryGetIntersectionWorldToObjectKHR: *hasResult = true; *hasResultType = true; break; + case OpAtomicFAddEXT: *hasResult = true; *hasResultType = true; break; + } +} +#endif /* SPV_ENABLE_UTILITY_CODE */ + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } +inline RayFlagsMask operator|(RayFlagsMask a, RayFlagsMask b) { return RayFlagsMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/src/third_party/glslang/SPIRV/spvIR.h b/src/third_party/glslang/SPIRV/spvIR.h new file mode 100644 index 0000000..868b9bf --- /dev/null +++ b/src/third_party/glslang/SPIRV/spvIR.h @@ -0,0 +1,507 @@ +// +// Copyright (C) 2014 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// SPIRV-IR +// +// Simple in-memory representation (IR) of SPIRV. Just for holding +// Each function's CFG of blocks. Has this hierarchy: +// - Module, which is a list of +// - Function, which is a list of +// - Block, which is a list of +// - Instruction +// + +#pragma once +#ifndef spvIR_H +#define spvIR_H + +#include "spirv.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace spv { + +class Block; +class Function; +class Module; + +const Id NoResult = 0; +const Id NoType = 0; + +const Decoration NoPrecision = DecorationMax; + +#ifdef __GNUC__ +# define POTENTIALLY_UNUSED __attribute__((unused)) +#else +# define POTENTIALLY_UNUSED +#endif + +POTENTIALLY_UNUSED +const MemorySemanticsMask MemorySemanticsAllMemory = + (MemorySemanticsMask)(MemorySemanticsUniformMemoryMask | + MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask | + MemorySemanticsImageMemoryMask); + +struct IdImmediate { + bool isId; // true if word is an Id, false if word is an immediate + unsigned word; + IdImmediate(bool i, unsigned w) : isId(i), word(w) {} +}; + +// +// SPIR-V IR instruction. +// + +class Instruction { +public: + Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { } + explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { } + virtual ~Instruction() {} + void addIdOperand(Id id) { + operands.push_back(id); + idOperand.push_back(true); + } + void addImmediateOperand(unsigned int immediate) { + operands.push_back(immediate); + idOperand.push_back(false); + } + void setImmediateOperand(unsigned idx, unsigned int immediate) { + assert(!idOperand[idx]); + operands[idx] = immediate; + } + + void addStringOperand(const char* str) + { + unsigned int word; + char* wordString = (char*)&word; + char* wordPtr = wordString; + int charCount = 0; + char c; + do { + c = *(str++); + *(wordPtr++) = c; + ++charCount; + if (charCount == 4) { + addImmediateOperand(word); + wordPtr = wordString; + charCount = 0; + } + } while (c != 0); + + // deal with partial last word + if (charCount > 0) { + // pad with 0s + for (; charCount < 4; ++charCount) + *(wordPtr++) = 0; + addImmediateOperand(word); + } + } + bool isIdOperand(int op) const { return idOperand[op]; } + void setBlock(Block* b) { block = b; } + Block* getBlock() const { return block; } + Op getOpCode() const { return opCode; } + int getNumOperands() const + { + assert(operands.size() == idOperand.size()); + return (int)operands.size(); + } + Id getResultId() const { return resultId; } + Id getTypeId() const { return typeId; } + Id getIdOperand(int op) const { + assert(idOperand[op]); + return operands[op]; + } + unsigned int getImmediateOperand(int op) const { + assert(!idOperand[op]); + return operands[op]; + } + + // Write out the binary form. + void dump(std::vector& out) const + { + // Compute the wordCount + unsigned int wordCount = 1; + if (typeId) + ++wordCount; + if (resultId) + ++wordCount; + wordCount += (unsigned int)operands.size(); + + // Write out the beginning of the instruction + out.push_back(((wordCount) << WordCountShift) | opCode); + if (typeId) + out.push_back(typeId); + if (resultId) + out.push_back(resultId); + + // Write out the operands + for (int op = 0; op < (int)operands.size(); ++op) + out.push_back(operands[op]); + } + +protected: + Instruction(const Instruction&); + Id resultId; + Id typeId; + Op opCode; + std::vector operands; // operands, both and immediates (both are unsigned int) + std::vector idOperand; // true for operands that are , false for immediates + Block* block; +}; + +// +// SPIR-V IR block. +// + +class Block { +public: + Block(Id id, Function& parent); + virtual ~Block() + { + } + + Id getId() { return instructions.front()->getResultId(); } + + Function& getParent() const { return parent; } + void addInstruction(std::unique_ptr inst); + void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);} + void addLocalVariable(std::unique_ptr inst) { localVariables.push_back(std::move(inst)); } + const std::vector& getPredecessors() const { return predecessors; } + const std::vector& getSuccessors() const { return successors; } + const std::vector >& getInstructions() const { + return instructions; + } + const std::vector >& getLocalVariables() const { return localVariables; } + void setUnreachable() { unreachable = true; } + bool isUnreachable() const { return unreachable; } + // Returns the block's merge instruction, if one exists (otherwise null). + const Instruction* getMergeInstruction() const { + if (instructions.size() < 2) return nullptr; + const Instruction* nextToLast = (instructions.cend() - 2)->get(); + switch (nextToLast->getOpCode()) { + case OpSelectionMerge: + case OpLoopMerge: + return nextToLast; + default: + return nullptr; + } + return nullptr; + } + + // Change this block into a canonical dead merge block. Delete instructions + // as necessary. A canonical dead merge block has only an OpLabel and an + // OpUnreachable. + void rewriteAsCanonicalUnreachableMerge() { + assert(localVariables.empty()); + // Delete all instructions except for the label. + assert(instructions.size() > 0); + instructions.resize(1); + successors.clear(); + addInstruction(std::unique_ptr(new Instruction(OpUnreachable))); + } + // Change this block into a canonical dead continue target branching to the + // given header ID. Delete instructions as necessary. A canonical dead continue + // target has only an OpLabel and an unconditional branch back to the corresponding + // header. + void rewriteAsCanonicalUnreachableContinue(Block* header) { + assert(localVariables.empty()); + // Delete all instructions except for the label. + assert(instructions.size() > 0); + instructions.resize(1); + successors.clear(); + // Add OpBranch back to the header. + assert(header != nullptr); + Instruction* branch = new Instruction(OpBranch); + branch->addIdOperand(header->getId()); + addInstruction(std::unique_ptr(branch)); + successors.push_back(header); + } + + bool isTerminated() const + { + switch (instructions.back()->getOpCode()) { + case OpBranch: + case OpBranchConditional: + case OpSwitch: + case OpKill: + case OpReturn: + case OpReturnValue: + case OpUnreachable: + return true; + default: + return false; + } + } + + void dump(std::vector& out) const + { + instructions[0]->dump(out); + for (int i = 0; i < (int)localVariables.size(); ++i) + localVariables[i]->dump(out); + for (int i = 1; i < (int)instructions.size(); ++i) + instructions[i]->dump(out); + } + +protected: + Block(const Block&); + Block& operator=(Block&); + + // To enforce keeping parent and ownership in sync: + friend Function; + + std::vector > instructions; + std::vector predecessors, successors; + std::vector > localVariables; + Function& parent; + + // track whether this block is known to be uncreachable (not necessarily + // true for all unreachable blocks, but should be set at least + // for the extraneous ones introduced by the builder). + bool unreachable; +}; + +// The different reasons for reaching a block in the inReadableOrder traversal. +enum ReachReason { + // Reachable from the entry block via transfers of control, i.e. branches. + ReachViaControlFlow = 0, + // A continue target that is not reachable via control flow. + ReachDeadContinue, + // A merge block that is not reachable via control flow. + ReachDeadMerge +}; + +// Traverses the control-flow graph rooted at root in an order suited for +// readable code generation. Invokes callback at every node in the traversal +// order. The callback arguments are: +// - the block, +// - the reason we reached the block, +// - if the reason was that block is an unreachable continue or unreachable merge block +// then the last parameter is the corresponding header block. +void inReadableOrder(Block* root, std::function callback); + +// +// SPIR-V IR Function. +// + +class Function { +public: + Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent); + virtual ~Function() + { + for (int i = 0; i < (int)parameterInstructions.size(); ++i) + delete parameterInstructions[i]; + + for (int i = 0; i < (int)blocks.size(); ++i) + delete blocks[i]; + } + Id getId() const { return functionInstruction.getResultId(); } + Id getParamId(int p) const { return parameterInstructions[p]->getResultId(); } + Id getParamType(int p) const { return parameterInstructions[p]->getTypeId(); } + + void addBlock(Block* block) { blocks.push_back(block); } + void removeBlock(Block* block) + { + auto found = find(blocks.begin(), blocks.end(), block); + assert(found != blocks.end()); + blocks.erase(found); + delete block; + } + + Module& getParent() const { return parent; } + Block* getEntryBlock() const { return blocks.front(); } + Block* getLastBlock() const { return blocks.back(); } + const std::vector& getBlocks() const { return blocks; } + void addLocalVariable(std::unique_ptr inst); + Id getReturnType() const { return functionInstruction.getTypeId(); } + void setReturnPrecision(Decoration precision) + { + if (precision == DecorationRelaxedPrecision) + reducedPrecisionReturn = true; + } + Decoration getReturnPrecision() const + { return reducedPrecisionReturn ? DecorationRelaxedPrecision : NoPrecision; } + + void setImplicitThis() { implicitThis = true; } + bool hasImplicitThis() const { return implicitThis; } + + void addParamPrecision(unsigned param, Decoration precision) + { + if (precision == DecorationRelaxedPrecision) + reducedPrecisionParams.insert(param); + } + Decoration getParamPrecision(unsigned param) const + { + return reducedPrecisionParams.find(param) != reducedPrecisionParams.end() ? + DecorationRelaxedPrecision : NoPrecision; + } + + void dump(std::vector& out) const + { + // OpFunction + functionInstruction.dump(out); + + // OpFunctionParameter + for (int p = 0; p < (int)parameterInstructions.size(); ++p) + parameterInstructions[p]->dump(out); + + // Blocks + inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); }); + Instruction end(0, 0, OpFunctionEnd); + end.dump(out); + } + +protected: + Function(const Function&); + Function& operator=(Function&); + + Module& parent; + Instruction functionInstruction; + std::vector parameterInstructions; + std::vector blocks; + bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument + bool reducedPrecisionReturn; + std::set reducedPrecisionParams; // list of parameter indexes that need a relaxed precision arg +}; + +// +// SPIR-V IR Module. +// + +class Module { +public: + Module() {} + virtual ~Module() + { + // TODO delete things + } + + void addFunction(Function *fun) { functions.push_back(fun); } + + void mapInstruction(Instruction *instruction) + { + spv::Id resultId = instruction->getResultId(); + // map the instruction's result id + if (resultId >= idToInstruction.size()) + idToInstruction.resize(resultId + 16); + idToInstruction[resultId] = instruction; + } + + Instruction* getInstruction(Id id) const { return idToInstruction[id]; } + const std::vector& getFunctions() const { return functions; } + spv::Id getTypeId(Id resultId) const { + return idToInstruction[resultId] == nullptr ? NoType : idToInstruction[resultId]->getTypeId(); + } + StorageClass getStorageClass(Id typeId) const + { + assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer); + return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0); + } + + void dump(std::vector& out) const + { + for (int f = 0; f < (int)functions.size(); ++f) + functions[f]->dump(out); + } + +protected: + Module(const Module&); + std::vector functions; + + // map from result id to instruction having that result id + std::vector idToInstruction; + + // map from a result id to its type id +}; + +// +// Implementation (it's here due to circular type definitions). +// + +// Add both +// - the OpFunction instruction +// - all the OpFunctionParameter instructions +__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent) + : parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false), + reducedPrecisionReturn(false) +{ + // OpFunction + functionInstruction.addImmediateOperand(FunctionControlMaskNone); + functionInstruction.addIdOperand(functionType); + parent.mapInstruction(&functionInstruction); + parent.addFunction(this); + + // OpFunctionParameter + Instruction* typeInst = parent.getInstruction(functionType); + int numParams = typeInst->getNumOperands() - 1; + for (int p = 0; p < numParams; ++p) { + Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter); + parent.mapInstruction(param); + parameterInstructions.push_back(param); + } +} + +__inline void Function::addLocalVariable(std::unique_ptr inst) +{ + Instruction* raw_instruction = inst.get(); + blocks[0]->addLocalVariable(std::move(inst)); + parent.mapInstruction(raw_instruction); +} + +__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false) +{ + instructions.push_back(std::unique_ptr(new Instruction(id, NoType, OpLabel))); + instructions.back()->setBlock(this); + parent.getParent().mapInstruction(instructions.back().get()); +} + +__inline void Block::addInstruction(std::unique_ptr inst) +{ + Instruction* raw_instruction = inst.get(); + instructions.push_back(std::move(inst)); + raw_instruction->setBlock(this); + if (raw_instruction->getResultId()) + parent.getParent().mapInstruction(raw_instruction); +} + +} // end spv namespace + +#endif // spvIR_H diff --git a/src/third_party/glslang/StandAlone/DirStackFileIncluder.h b/src/third_party/glslang/StandAlone/DirStackFileIncluder.h new file mode 100644 index 0000000..1873413 --- /dev/null +++ b/src/third_party/glslang/StandAlone/DirStackFileIncluder.h @@ -0,0 +1,141 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2017 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +#include +#include +#include +#include + +#include "./../glslang/Public/ShaderLang.h" + +// Default include class for normal include convention of search backward +// through the stack of active include paths (for nested includes). +// Can be overridden to customize. +class DirStackFileIncluder : public glslang::TShader::Includer { +public: + DirStackFileIncluder() : externalLocalDirectoryCount(0) { } + + virtual IncludeResult* includeLocal(const char* headerName, + const char* includerName, + size_t inclusionDepth) override + { + return readLocalPath(headerName, includerName, (int)inclusionDepth); + } + + virtual IncludeResult* includeSystem(const char* headerName, + const char* /*includerName*/, + size_t /*inclusionDepth*/) override + { + return readSystemPath(headerName); + } + + // Externally set directories. E.g., from a command-line -I. + // - Most-recently pushed are checked first. + // - All these are checked after the parse-time stack of local directories + // is checked. + // - This only applies to the "local" form of #include. + // - Makes its own copy of the path. + virtual void pushExternalLocalDirectory(const std::string& dir) + { + directoryStack.push_back(dir); + externalLocalDirectoryCount = (int)directoryStack.size(); + } + + virtual void releaseInclude(IncludeResult* result) override + { + if (result != nullptr) { + delete [] static_cast(result->userData); + delete result; + } + } + + virtual ~DirStackFileIncluder() override { } + +protected: + typedef char tUserDataElement; + std::vector directoryStack; + int externalLocalDirectoryCount; + + // Search for a valid "local" path based on combining the stack of include + // directories and the nominal name of the header. + virtual IncludeResult* readLocalPath(const char* headerName, const char* includerName, int depth) + { + // Discard popped include directories, and + // initialize when at parse-time first level. + directoryStack.resize(depth + externalLocalDirectoryCount); + if (depth == 1) + directoryStack.back() = getDirectory(includerName); + + // Find a directory that works, using a reverse search of the include stack. + for (auto it = directoryStack.rbegin(); it != directoryStack.rend(); ++it) { + std::string path = *it + '/' + headerName; + std::replace(path.begin(), path.end(), '\\', '/'); + std::ifstream file(path, std::ios_base::binary | std::ios_base::ate); + if (file) { + directoryStack.push_back(getDirectory(path)); + return newIncludeResult(path, file, (int)file.tellg()); + } + } + + return nullptr; + } + + // Search for a valid path. + // Not implemented yet; returning nullptr signals failure to find. + virtual IncludeResult* readSystemPath(const char* /*headerName*/) const + { + return nullptr; + } + + // Do actual reading of the file, filling in a new include result. + virtual IncludeResult* newIncludeResult(const std::string& path, std::ifstream& file, int length) const + { + char* content = new tUserDataElement [length]; + file.seekg(0, file.beg); + file.read(content, length); + return new IncludeResult(path, content, length, content); + } + + // If no path markers, return current working directory. + // Otherwise, strip file name and return path leading up to it. + virtual std::string getDirectory(const std::string path) const + { + size_t last = path.find_last_of("/\\"); + return last == std::string::npos ? "." : path.substr(0, last); + } +}; diff --git a/src/third_party/glslang/StandAlone/ResourceLimits.cpp b/src/third_party/glslang/StandAlone/ResourceLimits.cpp new file mode 100644 index 0000000..7c7f4c4 --- /dev/null +++ b/src/third_party/glslang/StandAlone/ResourceLimits.cpp @@ -0,0 +1,496 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include + +#include "ResourceLimits.h" + +namespace glslang { + +const TBuiltInResource DefaultTBuiltInResource = { + /* .MaxLights = */ 32, + /* .MaxClipPlanes = */ 6, + /* .MaxTextureUnits = */ 32, + /* .MaxTextureCoords = */ 32, + /* .MaxVertexAttribs = */ 64, + /* .MaxVertexUniformComponents = */ 4096, + /* .MaxVaryingFloats = */ 64, + /* .MaxVertexTextureImageUnits = */ 32, + /* .MaxCombinedTextureImageUnits = */ 80, + /* .MaxTextureImageUnits = */ 32, + /* .MaxFragmentUniformComponents = */ 4096, + /* .MaxDrawBuffers = */ 32, + /* .MaxVertexUniformVectors = */ 128, + /* .MaxVaryingVectors = */ 8, + /* .MaxFragmentUniformVectors = */ 16, + /* .MaxVertexOutputVectors = */ 16, + /* .MaxFragmentInputVectors = */ 15, + /* .MinProgramTexelOffset = */ -8, + /* .MaxProgramTexelOffset = */ 7, + /* .MaxClipDistances = */ 8, + /* .MaxComputeWorkGroupCountX = */ 65535, + /* .MaxComputeWorkGroupCountY = */ 65535, + /* .MaxComputeWorkGroupCountZ = */ 65535, + /* .MaxComputeWorkGroupSizeX = */ 1024, + /* .MaxComputeWorkGroupSizeY = */ 1024, + /* .MaxComputeWorkGroupSizeZ = */ 64, + /* .MaxComputeUniformComponents = */ 1024, + /* .MaxComputeTextureImageUnits = */ 16, + /* .MaxComputeImageUniforms = */ 8, + /* .MaxComputeAtomicCounters = */ 8, + /* .MaxComputeAtomicCounterBuffers = */ 1, + /* .MaxVaryingComponents = */ 60, + /* .MaxVertexOutputComponents = */ 64, + /* .MaxGeometryInputComponents = */ 64, + /* .MaxGeometryOutputComponents = */ 128, + /* .MaxFragmentInputComponents = */ 128, + /* .MaxImageUnits = */ 8, + /* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8, + /* .MaxCombinedShaderOutputResources = */ 8, + /* .MaxImageSamples = */ 0, + /* .MaxVertexImageUniforms = */ 0, + /* .MaxTessControlImageUniforms = */ 0, + /* .MaxTessEvaluationImageUniforms = */ 0, + /* .MaxGeometryImageUniforms = */ 0, + /* .MaxFragmentImageUniforms = */ 8, + /* .MaxCombinedImageUniforms = */ 8, + /* .MaxGeometryTextureImageUnits = */ 16, + /* .MaxGeometryOutputVertices = */ 256, + /* .MaxGeometryTotalOutputComponents = */ 1024, + /* .MaxGeometryUniformComponents = */ 1024, + /* .MaxGeometryVaryingComponents = */ 64, + /* .MaxTessControlInputComponents = */ 128, + /* .MaxTessControlOutputComponents = */ 128, + /* .MaxTessControlTextureImageUnits = */ 16, + /* .MaxTessControlUniformComponents = */ 1024, + /* .MaxTessControlTotalOutputComponents = */ 4096, + /* .MaxTessEvaluationInputComponents = */ 128, + /* .MaxTessEvaluationOutputComponents = */ 128, + /* .MaxTessEvaluationTextureImageUnits = */ 16, + /* .MaxTessEvaluationUniformComponents = */ 1024, + /* .MaxTessPatchComponents = */ 120, + /* .MaxPatchVertices = */ 32, + /* .MaxTessGenLevel = */ 64, + /* .MaxViewports = */ 16, + /* .MaxVertexAtomicCounters = */ 0, + /* .MaxTessControlAtomicCounters = */ 0, + /* .MaxTessEvaluationAtomicCounters = */ 0, + /* .MaxGeometryAtomicCounters = */ 0, + /* .MaxFragmentAtomicCounters = */ 8, + /* .MaxCombinedAtomicCounters = */ 8, + /* .MaxAtomicCounterBindings = */ 1, + /* .MaxVertexAtomicCounterBuffers = */ 0, + /* .MaxTessControlAtomicCounterBuffers = */ 0, + /* .MaxTessEvaluationAtomicCounterBuffers = */ 0, + /* .MaxGeometryAtomicCounterBuffers = */ 0, + /* .MaxFragmentAtomicCounterBuffers = */ 1, + /* .MaxCombinedAtomicCounterBuffers = */ 1, + /* .MaxAtomicCounterBufferSize = */ 16384, + /* .MaxTransformFeedbackBuffers = */ 4, + /* .MaxTransformFeedbackInterleavedComponents = */ 64, + /* .MaxCullDistances = */ 8, + /* .MaxCombinedClipAndCullDistances = */ 8, + /* .MaxSamples = */ 4, + /* .maxMeshOutputVerticesNV = */ 256, + /* .maxMeshOutputPrimitivesNV = */ 512, + /* .maxMeshWorkGroupSizeX_NV = */ 32, + /* .maxMeshWorkGroupSizeY_NV = */ 1, + /* .maxMeshWorkGroupSizeZ_NV = */ 1, + /* .maxTaskWorkGroupSizeX_NV = */ 32, + /* .maxTaskWorkGroupSizeY_NV = */ 1, + /* .maxTaskWorkGroupSizeZ_NV = */ 1, + /* .maxMeshViewCountNV = */ 4, + /* .maxDualSourceDrawBuffersEXT = */ 1, + + /* .limits = */ { + /* .nonInductiveForLoops = */ 1, + /* .whileLoops = */ 1, + /* .doWhileLoops = */ 1, + /* .generalUniformIndexing = */ 1, + /* .generalAttributeMatrixVectorIndexing = */ 1, + /* .generalVaryingIndexing = */ 1, + /* .generalSamplerIndexing = */ 1, + /* .generalVariableIndexing = */ 1, + /* .generalConstantMatrixVectorIndexing = */ 1, + }}; + +std::string GetDefaultTBuiltInResourceString() +{ + std::ostringstream ostream; + + ostream << "MaxLights " << DefaultTBuiltInResource.maxLights << "\n" + << "MaxClipPlanes " << DefaultTBuiltInResource.maxClipPlanes << "\n" + << "MaxTextureUnits " << DefaultTBuiltInResource.maxTextureUnits << "\n" + << "MaxTextureCoords " << DefaultTBuiltInResource.maxTextureCoords << "\n" + << "MaxVertexAttribs " << DefaultTBuiltInResource.maxVertexAttribs << "\n" + << "MaxVertexUniformComponents " << DefaultTBuiltInResource.maxVertexUniformComponents << "\n" + << "MaxVaryingFloats " << DefaultTBuiltInResource.maxVaryingFloats << "\n" + << "MaxVertexTextureImageUnits " << DefaultTBuiltInResource.maxVertexTextureImageUnits << "\n" + << "MaxCombinedTextureImageUnits " << DefaultTBuiltInResource.maxCombinedTextureImageUnits << "\n" + << "MaxTextureImageUnits " << DefaultTBuiltInResource.maxTextureImageUnits << "\n" + << "MaxFragmentUniformComponents " << DefaultTBuiltInResource.maxFragmentUniformComponents << "\n" + << "MaxDrawBuffers " << DefaultTBuiltInResource.maxDrawBuffers << "\n" + << "MaxVertexUniformVectors " << DefaultTBuiltInResource.maxVertexUniformVectors << "\n" + << "MaxVaryingVectors " << DefaultTBuiltInResource.maxVaryingVectors << "\n" + << "MaxFragmentUniformVectors " << DefaultTBuiltInResource.maxFragmentUniformVectors << "\n" + << "MaxVertexOutputVectors " << DefaultTBuiltInResource.maxVertexOutputVectors << "\n" + << "MaxFragmentInputVectors " << DefaultTBuiltInResource.maxFragmentInputVectors << "\n" + << "MinProgramTexelOffset " << DefaultTBuiltInResource.minProgramTexelOffset << "\n" + << "MaxProgramTexelOffset " << DefaultTBuiltInResource.maxProgramTexelOffset << "\n" + << "MaxClipDistances " << DefaultTBuiltInResource.maxClipDistances << "\n" + << "MaxComputeWorkGroupCountX " << DefaultTBuiltInResource.maxComputeWorkGroupCountX << "\n" + << "MaxComputeWorkGroupCountY " << DefaultTBuiltInResource.maxComputeWorkGroupCountY << "\n" + << "MaxComputeWorkGroupCountZ " << DefaultTBuiltInResource.maxComputeWorkGroupCountZ << "\n" + << "MaxComputeWorkGroupSizeX " << DefaultTBuiltInResource.maxComputeWorkGroupSizeX << "\n" + << "MaxComputeWorkGroupSizeY " << DefaultTBuiltInResource.maxComputeWorkGroupSizeY << "\n" + << "MaxComputeWorkGroupSizeZ " << DefaultTBuiltInResource.maxComputeWorkGroupSizeZ << "\n" + << "MaxComputeUniformComponents " << DefaultTBuiltInResource.maxComputeUniformComponents << "\n" + << "MaxComputeTextureImageUnits " << DefaultTBuiltInResource.maxComputeTextureImageUnits << "\n" + << "MaxComputeImageUniforms " << DefaultTBuiltInResource.maxComputeImageUniforms << "\n" + << "MaxComputeAtomicCounters " << DefaultTBuiltInResource.maxComputeAtomicCounters << "\n" + << "MaxComputeAtomicCounterBuffers " << DefaultTBuiltInResource.maxComputeAtomicCounterBuffers << "\n" + << "MaxVaryingComponents " << DefaultTBuiltInResource.maxVaryingComponents << "\n" + << "MaxVertexOutputComponents " << DefaultTBuiltInResource.maxVertexOutputComponents << "\n" + << "MaxGeometryInputComponents " << DefaultTBuiltInResource.maxGeometryInputComponents << "\n" + << "MaxGeometryOutputComponents " << DefaultTBuiltInResource.maxGeometryOutputComponents << "\n" + << "MaxFragmentInputComponents " << DefaultTBuiltInResource.maxFragmentInputComponents << "\n" + << "MaxImageUnits " << DefaultTBuiltInResource.maxImageUnits << "\n" + << "MaxCombinedImageUnitsAndFragmentOutputs " << DefaultTBuiltInResource.maxCombinedImageUnitsAndFragmentOutputs << "\n" + << "MaxCombinedShaderOutputResources " << DefaultTBuiltInResource.maxCombinedShaderOutputResources << "\n" + << "MaxImageSamples " << DefaultTBuiltInResource.maxImageSamples << "\n" + << "MaxVertexImageUniforms " << DefaultTBuiltInResource.maxVertexImageUniforms << "\n" + << "MaxTessControlImageUniforms " << DefaultTBuiltInResource.maxTessControlImageUniforms << "\n" + << "MaxTessEvaluationImageUniforms " << DefaultTBuiltInResource.maxTessEvaluationImageUniforms << "\n" + << "MaxGeometryImageUniforms " << DefaultTBuiltInResource.maxGeometryImageUniforms << "\n" + << "MaxFragmentImageUniforms " << DefaultTBuiltInResource.maxFragmentImageUniforms << "\n" + << "MaxCombinedImageUniforms " << DefaultTBuiltInResource.maxCombinedImageUniforms << "\n" + << "MaxGeometryTextureImageUnits " << DefaultTBuiltInResource.maxGeometryTextureImageUnits << "\n" + << "MaxGeometryOutputVertices " << DefaultTBuiltInResource.maxGeometryOutputVertices << "\n" + << "MaxGeometryTotalOutputComponents " << DefaultTBuiltInResource.maxGeometryTotalOutputComponents << "\n" + << "MaxGeometryUniformComponents " << DefaultTBuiltInResource.maxGeometryUniformComponents << "\n" + << "MaxGeometryVaryingComponents " << DefaultTBuiltInResource.maxGeometryVaryingComponents << "\n" + << "MaxTessControlInputComponents " << DefaultTBuiltInResource.maxTessControlInputComponents << "\n" + << "MaxTessControlOutputComponents " << DefaultTBuiltInResource.maxTessControlOutputComponents << "\n" + << "MaxTessControlTextureImageUnits " << DefaultTBuiltInResource.maxTessControlTextureImageUnits << "\n" + << "MaxTessControlUniformComponents " << DefaultTBuiltInResource.maxTessControlUniformComponents << "\n" + << "MaxTessControlTotalOutputComponents " << DefaultTBuiltInResource.maxTessControlTotalOutputComponents << "\n" + << "MaxTessEvaluationInputComponents " << DefaultTBuiltInResource.maxTessEvaluationInputComponents << "\n" + << "MaxTessEvaluationOutputComponents " << DefaultTBuiltInResource.maxTessEvaluationOutputComponents << "\n" + << "MaxTessEvaluationTextureImageUnits " << DefaultTBuiltInResource.maxTessEvaluationTextureImageUnits << "\n" + << "MaxTessEvaluationUniformComponents " << DefaultTBuiltInResource.maxTessEvaluationUniformComponents << "\n" + << "MaxTessPatchComponents " << DefaultTBuiltInResource.maxTessPatchComponents << "\n" + << "MaxPatchVertices " << DefaultTBuiltInResource.maxPatchVertices << "\n" + << "MaxTessGenLevel " << DefaultTBuiltInResource.maxTessGenLevel << "\n" + << "MaxViewports " << DefaultTBuiltInResource.maxViewports << "\n" + << "MaxVertexAtomicCounters " << DefaultTBuiltInResource.maxVertexAtomicCounters << "\n" + << "MaxTessControlAtomicCounters " << DefaultTBuiltInResource.maxTessControlAtomicCounters << "\n" + << "MaxTessEvaluationAtomicCounters " << DefaultTBuiltInResource.maxTessEvaluationAtomicCounters << "\n" + << "MaxGeometryAtomicCounters " << DefaultTBuiltInResource.maxGeometryAtomicCounters << "\n" + << "MaxFragmentAtomicCounters " << DefaultTBuiltInResource.maxFragmentAtomicCounters << "\n" + << "MaxCombinedAtomicCounters " << DefaultTBuiltInResource.maxCombinedAtomicCounters << "\n" + << "MaxAtomicCounterBindings " << DefaultTBuiltInResource.maxAtomicCounterBindings << "\n" + << "MaxVertexAtomicCounterBuffers " << DefaultTBuiltInResource.maxVertexAtomicCounterBuffers << "\n" + << "MaxTessControlAtomicCounterBuffers " << DefaultTBuiltInResource.maxTessControlAtomicCounterBuffers << "\n" + << "MaxTessEvaluationAtomicCounterBuffers " << DefaultTBuiltInResource.maxTessEvaluationAtomicCounterBuffers << "\n" + << "MaxGeometryAtomicCounterBuffers " << DefaultTBuiltInResource.maxGeometryAtomicCounterBuffers << "\n" + << "MaxFragmentAtomicCounterBuffers " << DefaultTBuiltInResource.maxFragmentAtomicCounterBuffers << "\n" + << "MaxCombinedAtomicCounterBuffers " << DefaultTBuiltInResource.maxCombinedAtomicCounterBuffers << "\n" + << "MaxAtomicCounterBufferSize " << DefaultTBuiltInResource.maxAtomicCounterBufferSize << "\n" + << "MaxTransformFeedbackBuffers " << DefaultTBuiltInResource.maxTransformFeedbackBuffers << "\n" + << "MaxTransformFeedbackInterleavedComponents " << DefaultTBuiltInResource.maxTransformFeedbackInterleavedComponents << "\n" + << "MaxCullDistances " << DefaultTBuiltInResource.maxCullDistances << "\n" + << "MaxCombinedClipAndCullDistances " << DefaultTBuiltInResource.maxCombinedClipAndCullDistances << "\n" + << "MaxSamples " << DefaultTBuiltInResource.maxSamples << "\n" + << "MaxMeshOutputVerticesNV " << DefaultTBuiltInResource.maxMeshOutputVerticesNV << "\n" + << "MaxMeshOutputPrimitivesNV " << DefaultTBuiltInResource.maxMeshOutputPrimitivesNV << "\n" + << "MaxMeshWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeX_NV << "\n" + << "MaxMeshWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeY_NV << "\n" + << "MaxMeshWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxMeshWorkGroupSizeZ_NV << "\n" + << "MaxTaskWorkGroupSizeX_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeX_NV << "\n" + << "MaxTaskWorkGroupSizeY_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeY_NV << "\n" + << "MaxTaskWorkGroupSizeZ_NV " << DefaultTBuiltInResource.maxTaskWorkGroupSizeZ_NV << "\n" + << "MaxMeshViewCountNV " << DefaultTBuiltInResource.maxMeshViewCountNV << "\n" + << "MaxDualSourceDrawBuffersEXT " << DefaultTBuiltInResource.maxDualSourceDrawBuffersEXT << "\n" + << "nonInductiveForLoops " << DefaultTBuiltInResource.limits.nonInductiveForLoops << "\n" + << "whileLoops " << DefaultTBuiltInResource.limits.whileLoops << "\n" + << "doWhileLoops " << DefaultTBuiltInResource.limits.doWhileLoops << "\n" + << "generalUniformIndexing " << DefaultTBuiltInResource.limits.generalUniformIndexing << "\n" + << "generalAttributeMatrixVectorIndexing " << DefaultTBuiltInResource.limits.generalAttributeMatrixVectorIndexing << "\n" + << "generalVaryingIndexing " << DefaultTBuiltInResource.limits.generalVaryingIndexing << "\n" + << "generalSamplerIndexing " << DefaultTBuiltInResource.limits.generalSamplerIndexing << "\n" + << "generalVariableIndexing " << DefaultTBuiltInResource.limits.generalVariableIndexing << "\n" + << "generalConstantMatrixVectorIndexing " << DefaultTBuiltInResource.limits.generalConstantMatrixVectorIndexing << "\n" + ; + + return ostream.str(); +} + +void DecodeResourceLimits(TBuiltInResource* resources, char* config) +{ + static const char* delims = " \t\n\r"; + + size_t pos = 0; + std::string configStr(config); + + while ((pos = configStr.find_first_not_of(delims, pos)) != std::string::npos) { + const size_t token_s = pos; + const size_t token_e = configStr.find_first_of(delims, token_s); + const size_t value_s = configStr.find_first_not_of(delims, token_e); + const size_t value_e = configStr.find_first_of(delims, value_s); + pos = value_e; + + // Faster to use compare(), but prefering readability. + const std::string tokenStr = configStr.substr(token_s, token_e-token_s); + const std::string valueStr = configStr.substr(value_s, value_e-value_s); + + if (value_s == std::string::npos || ! (valueStr[0] == '-' || isdigit(valueStr[0]))) { + printf("Error: '%s' bad .conf file. Each name must be followed by one number.\n", + valueStr.c_str()); + return; + } + + const int value = std::atoi(valueStr.c_str()); + + if (tokenStr == "MaxLights") + resources->maxLights = value; + else if (tokenStr == "MaxClipPlanes") + resources->maxClipPlanes = value; + else if (tokenStr == "MaxTextureUnits") + resources->maxTextureUnits = value; + else if (tokenStr == "MaxTextureCoords") + resources->maxTextureCoords = value; + else if (tokenStr == "MaxVertexAttribs") + resources->maxVertexAttribs = value; + else if (tokenStr == "MaxVertexUniformComponents") + resources->maxVertexUniformComponents = value; + else if (tokenStr == "MaxVaryingFloats") + resources->maxVaryingFloats = value; + else if (tokenStr == "MaxVertexTextureImageUnits") + resources->maxVertexTextureImageUnits = value; + else if (tokenStr == "MaxCombinedTextureImageUnits") + resources->maxCombinedTextureImageUnits = value; + else if (tokenStr == "MaxTextureImageUnits") + resources->maxTextureImageUnits = value; + else if (tokenStr == "MaxFragmentUniformComponents") + resources->maxFragmentUniformComponents = value; + else if (tokenStr == "MaxDrawBuffers") + resources->maxDrawBuffers = value; + else if (tokenStr == "MaxVertexUniformVectors") + resources->maxVertexUniformVectors = value; + else if (tokenStr == "MaxVaryingVectors") + resources->maxVaryingVectors = value; + else if (tokenStr == "MaxFragmentUniformVectors") + resources->maxFragmentUniformVectors = value; + else if (tokenStr == "MaxVertexOutputVectors") + resources->maxVertexOutputVectors = value; + else if (tokenStr == "MaxFragmentInputVectors") + resources->maxFragmentInputVectors = value; + else if (tokenStr == "MinProgramTexelOffset") + resources->minProgramTexelOffset = value; + else if (tokenStr == "MaxProgramTexelOffset") + resources->maxProgramTexelOffset = value; + else if (tokenStr == "MaxClipDistances") + resources->maxClipDistances = value; + else if (tokenStr == "MaxComputeWorkGroupCountX") + resources->maxComputeWorkGroupCountX = value; + else if (tokenStr == "MaxComputeWorkGroupCountY") + resources->maxComputeWorkGroupCountY = value; + else if (tokenStr == "MaxComputeWorkGroupCountZ") + resources->maxComputeWorkGroupCountZ = value; + else if (tokenStr == "MaxComputeWorkGroupSizeX") + resources->maxComputeWorkGroupSizeX = value; + else if (tokenStr == "MaxComputeWorkGroupSizeY") + resources->maxComputeWorkGroupSizeY = value; + else if (tokenStr == "MaxComputeWorkGroupSizeZ") + resources->maxComputeWorkGroupSizeZ = value; + else if (tokenStr == "MaxComputeUniformComponents") + resources->maxComputeUniformComponents = value; + else if (tokenStr == "MaxComputeTextureImageUnits") + resources->maxComputeTextureImageUnits = value; + else if (tokenStr == "MaxComputeImageUniforms") + resources->maxComputeImageUniforms = value; + else if (tokenStr == "MaxComputeAtomicCounters") + resources->maxComputeAtomicCounters = value; + else if (tokenStr == "MaxComputeAtomicCounterBuffers") + resources->maxComputeAtomicCounterBuffers = value; + else if (tokenStr == "MaxVaryingComponents") + resources->maxVaryingComponents = value; + else if (tokenStr == "MaxVertexOutputComponents") + resources->maxVertexOutputComponents = value; + else if (tokenStr == "MaxGeometryInputComponents") + resources->maxGeometryInputComponents = value; + else if (tokenStr == "MaxGeometryOutputComponents") + resources->maxGeometryOutputComponents = value; + else if (tokenStr == "MaxFragmentInputComponents") + resources->maxFragmentInputComponents = value; + else if (tokenStr == "MaxImageUnits") + resources->maxImageUnits = value; + else if (tokenStr == "MaxCombinedImageUnitsAndFragmentOutputs") + resources->maxCombinedImageUnitsAndFragmentOutputs = value; + else if (tokenStr == "MaxCombinedShaderOutputResources") + resources->maxCombinedShaderOutputResources = value; + else if (tokenStr == "MaxImageSamples") + resources->maxImageSamples = value; + else if (tokenStr == "MaxVertexImageUniforms") + resources->maxVertexImageUniforms = value; + else if (tokenStr == "MaxTessControlImageUniforms") + resources->maxTessControlImageUniforms = value; + else if (tokenStr == "MaxTessEvaluationImageUniforms") + resources->maxTessEvaluationImageUniforms = value; + else if (tokenStr == "MaxGeometryImageUniforms") + resources->maxGeometryImageUniforms = value; + else if (tokenStr == "MaxFragmentImageUniforms") + resources->maxFragmentImageUniforms = value; + else if (tokenStr == "MaxCombinedImageUniforms") + resources->maxCombinedImageUniforms = value; + else if (tokenStr == "MaxGeometryTextureImageUnits") + resources->maxGeometryTextureImageUnits = value; + else if (tokenStr == "MaxGeometryOutputVertices") + resources->maxGeometryOutputVertices = value; + else if (tokenStr == "MaxGeometryTotalOutputComponents") + resources->maxGeometryTotalOutputComponents = value; + else if (tokenStr == "MaxGeometryUniformComponents") + resources->maxGeometryUniformComponents = value; + else if (tokenStr == "MaxGeometryVaryingComponents") + resources->maxGeometryVaryingComponents = value; + else if (tokenStr == "MaxTessControlInputComponents") + resources->maxTessControlInputComponents = value; + else if (tokenStr == "MaxTessControlOutputComponents") + resources->maxTessControlOutputComponents = value; + else if (tokenStr == "MaxTessControlTextureImageUnits") + resources->maxTessControlTextureImageUnits = value; + else if (tokenStr == "MaxTessControlUniformComponents") + resources->maxTessControlUniformComponents = value; + else if (tokenStr == "MaxTessControlTotalOutputComponents") + resources->maxTessControlTotalOutputComponents = value; + else if (tokenStr == "MaxTessEvaluationInputComponents") + resources->maxTessEvaluationInputComponents = value; + else if (tokenStr == "MaxTessEvaluationOutputComponents") + resources->maxTessEvaluationOutputComponents = value; + else if (tokenStr == "MaxTessEvaluationTextureImageUnits") + resources->maxTessEvaluationTextureImageUnits = value; + else if (tokenStr == "MaxTessEvaluationUniformComponents") + resources->maxTessEvaluationUniformComponents = value; + else if (tokenStr == "MaxTessPatchComponents") + resources->maxTessPatchComponents = value; + else if (tokenStr == "MaxPatchVertices") + resources->maxPatchVertices = value; + else if (tokenStr == "MaxTessGenLevel") + resources->maxTessGenLevel = value; + else if (tokenStr == "MaxViewports") + resources->maxViewports = value; + else if (tokenStr == "MaxVertexAtomicCounters") + resources->maxVertexAtomicCounters = value; + else if (tokenStr == "MaxTessControlAtomicCounters") + resources->maxTessControlAtomicCounters = value; + else if (tokenStr == "MaxTessEvaluationAtomicCounters") + resources->maxTessEvaluationAtomicCounters = value; + else if (tokenStr == "MaxGeometryAtomicCounters") + resources->maxGeometryAtomicCounters = value; + else if (tokenStr == "MaxFragmentAtomicCounters") + resources->maxFragmentAtomicCounters = value; + else if (tokenStr == "MaxCombinedAtomicCounters") + resources->maxCombinedAtomicCounters = value; + else if (tokenStr == "MaxAtomicCounterBindings") + resources->maxAtomicCounterBindings = value; + else if (tokenStr == "MaxVertexAtomicCounterBuffers") + resources->maxVertexAtomicCounterBuffers = value; + else if (tokenStr == "MaxTessControlAtomicCounterBuffers") + resources->maxTessControlAtomicCounterBuffers = value; + else if (tokenStr == "MaxTessEvaluationAtomicCounterBuffers") + resources->maxTessEvaluationAtomicCounterBuffers = value; + else if (tokenStr == "MaxGeometryAtomicCounterBuffers") + resources->maxGeometryAtomicCounterBuffers = value; + else if (tokenStr == "MaxFragmentAtomicCounterBuffers") + resources->maxFragmentAtomicCounterBuffers = value; + else if (tokenStr == "MaxCombinedAtomicCounterBuffers") + resources->maxCombinedAtomicCounterBuffers = value; + else if (tokenStr == "MaxAtomicCounterBufferSize") + resources->maxAtomicCounterBufferSize = value; + else if (tokenStr == "MaxTransformFeedbackBuffers") + resources->maxTransformFeedbackBuffers = value; + else if (tokenStr == "MaxTransformFeedbackInterleavedComponents") + resources->maxTransformFeedbackInterleavedComponents = value; + else if (tokenStr == "MaxCullDistances") + resources->maxCullDistances = value; + else if (tokenStr == "MaxCombinedClipAndCullDistances") + resources->maxCombinedClipAndCullDistances = value; + else if (tokenStr == "MaxSamples") + resources->maxSamples = value; + else if (tokenStr == "MaxMeshOutputVerticesNV") + resources->maxMeshOutputVerticesNV = value; + else if (tokenStr == "MaxMeshOutputPrimitivesNV") + resources->maxMeshOutputPrimitivesNV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeX_NV") + resources->maxMeshWorkGroupSizeX_NV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeY_NV") + resources->maxMeshWorkGroupSizeY_NV = value; + else if (tokenStr == "MaxMeshWorkGroupSizeZ_NV") + resources->maxMeshWorkGroupSizeZ_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeX_NV") + resources->maxTaskWorkGroupSizeX_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeY_NV") + resources->maxTaskWorkGroupSizeY_NV = value; + else if (tokenStr == "MaxTaskWorkGroupSizeZ_NV") + resources->maxTaskWorkGroupSizeZ_NV = value; + else if (tokenStr == "MaxMeshViewCountNV") + resources->maxMeshViewCountNV = value; + else if (tokenStr == "nonInductiveForLoops") + resources->limits.nonInductiveForLoops = (value != 0); + else if (tokenStr == "whileLoops") + resources->limits.whileLoops = (value != 0); + else if (tokenStr == "doWhileLoops") + resources->limits.doWhileLoops = (value != 0); + else if (tokenStr == "generalUniformIndexing") + resources->limits.generalUniformIndexing = (value != 0); + else if (tokenStr == "generalAttributeMatrixVectorIndexing") + resources->limits.generalAttributeMatrixVectorIndexing = (value != 0); + else if (tokenStr == "generalVaryingIndexing") + resources->limits.generalVaryingIndexing = (value != 0); + else if (tokenStr == "generalSamplerIndexing") + resources->limits.generalSamplerIndexing = (value != 0); + else if (tokenStr == "generalVariableIndexing") + resources->limits.generalVariableIndexing = (value != 0); + else if (tokenStr == "generalConstantMatrixVectorIndexing") + resources->limits.generalConstantMatrixVectorIndexing = (value != 0); + else + printf("Warning: unrecognized limit (%s) in configuration file.\n", tokenStr.c_str()); + + } +} + +} // end namespace glslang diff --git a/src/third_party/glslang/StandAlone/ResourceLimits.h b/src/third_party/glslang/StandAlone/ResourceLimits.h new file mode 100644 index 0000000..736248e --- /dev/null +++ b/src/third_party/glslang/StandAlone/ResourceLimits.h @@ -0,0 +1,57 @@ +// +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ +#define _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ + +#include + +#include "../glslang/Include/ResourceLimits.h" + +namespace glslang { + +// These are the default resources for TBuiltInResources, used for both +// - parsing this string for the case where the user didn't supply one, +// - dumping out a template for user construction of a config file. +extern const TBuiltInResource DefaultTBuiltInResource; + +// Returns the DefaultTBuiltInResource as a human-readable string. +std::string GetDefaultTBuiltInResourceString(); + +// Decodes the resource limits from |config| to |resources|. +void DecodeResourceLimits(TBuiltInResource* resources, char* config); + +} // end namespace glslang + +#endif // _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp b/src/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp new file mode 100644 index 0000000..551c027 --- /dev/null +++ b/src/third_party/glslang/glslang/CInterface/glslang_c_interface.cpp @@ -0,0 +1,428 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#include "../Include/glslang_c_interface.h" + +#include "../../StandAlone/DirStackFileIncluder.h" +#include "../../StandAlone/ResourceLimits.h" +#include "../Include/ShHandle.h" + +#include "../Include/ResourceLimits.h" +#include "../MachineIndependent/Versions.h" + +static_assert(int(GLSLANG_STAGE_COUNT) == EShLangCount, ""); +static_assert(int(GLSLANG_STAGE_MASK_COUNT) == EShLanguageMaskCount, ""); +static_assert(int(GLSLANG_SOURCE_COUNT) == glslang::EShSourceCount, ""); +static_assert(int(GLSLANG_CLIENT_COUNT) == glslang::EShClientCount, ""); +static_assert(int(GLSLANG_TARGET_COUNT) == glslang::EShTargetCount, ""); +static_assert(int(GLSLANG_TARGET_CLIENT_VERSION_COUNT) == glslang::EShTargetClientVersionCount, ""); +static_assert(int(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT) == glslang::EShTargetLanguageVersionCount, ""); +static_assert(int(GLSLANG_OPT_LEVEL_COUNT) == EshOptLevelCount, ""); +static_assert(int(GLSLANG_TEX_SAMP_TRANS_COUNT) == EShTexSampTransCount, ""); +static_assert(int(GLSLANG_MSG_COUNT) == EShMsgCount, ""); +static_assert(int(GLSLANG_REFLECTION_COUNT) == EShReflectionCount, ""); +static_assert(int(GLSLANG_PROFILE_COUNT) == EProfileCount, ""); +static_assert(sizeof(glslang_limits_t) == sizeof(TLimits), ""); +static_assert(sizeof(glslang_resource_t) == sizeof(TBuiltInResource), ""); + +typedef struct glslang_shader_s { + glslang::TShader* shader; + std::string preprocessedGLSL; +} glslang_shader_t; + +typedef struct glslang_program_s { + glslang::TProgram* program; + std::vector spirv; + std::string loggerMessages; +} glslang_program_t; + +/* Wrapper/Adapter for C glsl_include_callbacks_t functions + + This class contains a 'glsl_include_callbacks_t' structure + with C include_local/include_system callback pointers. + + This class implement TShader::Includer interface + by redirecting C++ virtual methods to C callbacks. + + The 'IncludeResult' instances produced by this Includer + contain a reference to glsl_include_result_t C structure + to allow its lifetime management by another C callback + (CallbackIncluder::callbacks::free_include_result) +*/ +class CallbackIncluder : public glslang::TShader::Includer { +public: + /* Wrapper of IncludeResult which stores a glsl_include_result object internally */ + class CallbackIncludeResult : public glslang::TShader::Includer::IncludeResult { + public: + CallbackIncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, + void* userData, glsl_include_result_t* includeResult) + : glslang::TShader::Includer::IncludeResult(headerName, headerData, headerLength, userData), + includeResult(includeResult) + { + } + + virtual ~CallbackIncludeResult() {} + + protected: + friend class CallbackIncluder; + + glsl_include_result_t* includeResult; + }; + +public: + CallbackIncluder(glsl_include_callbacks_t _callbacks, void* _context) : callbacks(_callbacks), context(_context) {} + + virtual ~CallbackIncluder() {} + + virtual IncludeResult* includeSystem(const char* headerName, const char* includerName, + size_t inclusionDepth) override + { + if (this->callbacks.include_system) { + glsl_include_result_t* result = + this->callbacks.include_system(this->context, headerName, includerName, inclusionDepth); + + return new CallbackIncludeResult(std::string(headerName), result->header_data, result->header_length, + nullptr, result); + } + + return glslang::TShader::Includer::includeSystem(headerName, includerName, inclusionDepth); + } + + virtual IncludeResult* includeLocal(const char* headerName, const char* includerName, + size_t inclusionDepth) override + { + if (this->callbacks.include_local) { + glsl_include_result_t* result = + this->callbacks.include_local(this->context, headerName, includerName, inclusionDepth); + + return new CallbackIncludeResult(std::string(headerName), result->header_data, result->header_length, + nullptr, result); + } + + return glslang::TShader::Includer::includeLocal(headerName, includerName, inclusionDepth); + } + + /* This function only calls free_include_result callback + when the IncludeResult instance is allocated by a C function */ + virtual void releaseInclude(IncludeResult* result) override + { + if (result == nullptr) + return; + + if (this->callbacks.free_include_result && (result->userData == nullptr)) { + CallbackIncludeResult* innerResult = static_cast(result); + /* use internal free() function */ + this->callbacks.free_include_result(this->context, innerResult->includeResult); + /* ignore internal fields of TShader::Includer::IncludeResult */ + delete result; + return; + } + + delete[] static_cast(result->userData); + delete result; + } + +private: + CallbackIncluder() {} + + /* C callback pointers */ + glsl_include_callbacks_t callbacks; + /* User-defined context */ + void* context; +}; + +GLSLANG_EXPORT int glslang_initialize_process() { return static_cast(glslang::InitializeProcess()); } + +GLSLANG_EXPORT void glslang_finalize_process() { glslang::FinalizeProcess(); } + +static EShLanguage c_shader_stage(glslang_stage_t stage) +{ + switch (stage) { + case GLSLANG_STAGE_VERTEX: + return EShLangVertex; + case GLSLANG_STAGE_TESSCONTROL: + return EShLangTessControl; + case GLSLANG_STAGE_TESSEVALUATION: + return EShLangTessEvaluation; + case GLSLANG_STAGE_GEOMETRY: + return EShLangGeometry; + case GLSLANG_STAGE_FRAGMENT: + return EShLangFragment; + case GLSLANG_STAGE_COMPUTE: + return EShLangCompute; + case GLSLANG_STAGE_RAYGEN_NV: + return EShLangRayGen; + case GLSLANG_STAGE_INTERSECT_NV: + return EShLangIntersect; + case GLSLANG_STAGE_ANYHIT_NV: + return EShLangAnyHit; + case GLSLANG_STAGE_CLOSESTHIT_NV: + return EShLangClosestHit; + case GLSLANG_STAGE_MISS_NV: + return EShLangMiss; + case GLSLANG_STAGE_CALLABLE_NV: + return EShLangCallable; + case GLSLANG_STAGE_TASK_NV: + return EShLangTaskNV; + case GLSLANG_STAGE_MESH_NV: + return EShLangMeshNV; + default: + break; + } + return EShLangCount; +} + +static int c_shader_messages(glslang_messages_t messages) +{ +#define CONVERT_MSG(in, out) \ + if ((messages & in) == in) \ + res |= out; + + int res = 0; + + CONVERT_MSG(GLSLANG_MSG_RELAXED_ERRORS_BIT, EShMsgRelaxedErrors); + CONVERT_MSG(GLSLANG_MSG_SUPPRESS_WARNINGS_BIT, EShMsgSuppressWarnings); + CONVERT_MSG(GLSLANG_MSG_AST_BIT, EShMsgAST); + CONVERT_MSG(GLSLANG_MSG_SPV_RULES_BIT, EShMsgSpvRules); + CONVERT_MSG(GLSLANG_MSG_VULKAN_RULES_BIT, EShMsgVulkanRules); + CONVERT_MSG(GLSLANG_MSG_ONLY_PREPROCESSOR_BIT, EShMsgOnlyPreprocessor); + CONVERT_MSG(GLSLANG_MSG_READ_HLSL_BIT, EShMsgReadHlsl); + CONVERT_MSG(GLSLANG_MSG_CASCADING_ERRORS_BIT, EShMsgCascadingErrors); + CONVERT_MSG(GLSLANG_MSG_KEEP_UNCALLED_BIT, EShMsgKeepUncalled); + CONVERT_MSG(GLSLANG_MSG_HLSL_OFFSETS_BIT, EShMsgHlslOffsets); + CONVERT_MSG(GLSLANG_MSG_DEBUG_INFO_BIT, EShMsgDebugInfo); + CONVERT_MSG(GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT, EShMsgHlslEnable16BitTypes); + CONVERT_MSG(GLSLANG_MSG_HLSL_LEGALIZATION_BIT, EShMsgHlslLegalization); + CONVERT_MSG(GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT, EShMsgHlslDX9Compatible); + CONVERT_MSG(GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT, EShMsgBuiltinSymbolTable); + return res; +#undef CONVERT_MSG +} + +static glslang::EShTargetLanguageVersion +c_shader_target_language_version(glslang_target_language_version_t target_language_version) +{ + switch (target_language_version) { + case GLSLANG_TARGET_SPV_1_0: + return glslang::EShTargetSpv_1_0; + case GLSLANG_TARGET_SPV_1_1: + return glslang::EShTargetSpv_1_1; + case GLSLANG_TARGET_SPV_1_2: + return glslang::EShTargetSpv_1_2; + case GLSLANG_TARGET_SPV_1_3: + return glslang::EShTargetSpv_1_3; + case GLSLANG_TARGET_SPV_1_4: + return glslang::EShTargetSpv_1_4; + case GLSLANG_TARGET_SPV_1_5: + return glslang::EShTargetSpv_1_5; + default: + break; + } + return glslang::EShTargetSpv_1_0; +} + +static glslang::EShClient c_shader_client(glslang_client_t client) +{ + switch (client) { + case GLSLANG_CLIENT_VULKAN: + return glslang::EShClientVulkan; + case GLSLANG_CLIENT_OPENGL: + return glslang::EShClientOpenGL; + default: + break; + } + + return glslang::EShClientNone; +} + +static glslang::EShTargetClientVersion c_shader_client_version(glslang_target_client_version_t client_version) +{ + switch (client_version) { + case GLSLANG_TARGET_VULKAN_1_1: + return glslang::EShTargetVulkan_1_1; + case GLSLANG_TARGET_OPENGL_450: + return glslang::EShTargetOpenGL_450; + default: + break; + } + + return glslang::EShTargetVulkan_1_0; +} + +static glslang::EShTargetLanguage c_shader_target_language(glslang_target_language_t target_language) +{ + if (target_language == GLSLANG_TARGET_NONE) + return glslang::EShTargetNone; + + return glslang::EShTargetSpv; +} + +static glslang::EShSource c_shader_source(glslang_source_t source) +{ + switch (source) { + case GLSLANG_SOURCE_GLSL: + return glslang::EShSourceGlsl; + case GLSLANG_SOURCE_HLSL: + return glslang::EShSourceHlsl; + default: + break; + } + + return glslang::EShSourceNone; +} + +static EProfile c_shader_profile(glslang_profile_t profile) +{ + switch (profile) { + case GLSLANG_BAD_PROFILE: + return EBadProfile; + case GLSLANG_NO_PROFILE: + return ENoProfile; + case GLSLANG_CORE_PROFILE: + return ECoreProfile; + case GLSLANG_COMPATIBILITY_PROFILE: + return ECompatibilityProfile; + case GLSLANG_ES_PROFILE: + return EEsProfile; + case GLSLANG_PROFILE_COUNT: // Should not use this + break; + } + + return EProfile(); +} + +GLSLANG_EXPORT glslang_shader_t* glslang_shader_create(const glslang_input_t* input) +{ + if (!input || !input->code) { + printf("Error creating shader: null input(%p)/input->code\n", input); + + if (input) + printf("input->code = %p\n", input->code); + + return nullptr; + } + + glslang_shader_t* shader = new glslang_shader_t(); + + shader->shader = new glslang::TShader(c_shader_stage(input->stage)); + shader->shader->setStrings(&input->code, 1); + shader->shader->setEnvInput(c_shader_source(input->language), c_shader_stage(input->stage), + c_shader_client(input->client), input->default_version); + shader->shader->setEnvClient(c_shader_client(input->client), c_shader_client_version(input->client_version)); + shader->shader->setEnvTarget(c_shader_target_language(input->target_language), + c_shader_target_language_version(input->target_language_version)); + + return shader; +} + +GLSLANG_EXPORT const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader) +{ + return shader->preprocessedGLSL.c_str(); +} + +GLSLANG_EXPORT int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input) +{ + DirStackFileIncluder Includer; + /* TODO: use custom callbacks if they are available in 'i->callbacks' */ + return shader->shader->preprocess( + reinterpret_cast(input->resource), + input->default_version, + c_shader_profile(input->default_profile), + input->force_default_version_and_profile != 0, + input->forward_compatible != 0, + (EShMessages)c_shader_messages(input->messages), + &shader->preprocessedGLSL, + Includer + ); +} + +GLSLANG_EXPORT int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input) +{ + const char* preprocessedCStr = shader->preprocessedGLSL.c_str(); + shader->shader->setStrings(&preprocessedCStr, 1); + + return shader->shader->parse( + reinterpret_cast(input->resource), + input->default_version, + input->forward_compatible != 0, + (EShMessages)c_shader_messages(input->messages) + ); +} + +GLSLANG_EXPORT const char* glslang_shader_get_info_log(glslang_shader_t* shader) { return shader->shader->getInfoLog(); } + +GLSLANG_EXPORT const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader) { return shader->shader->getInfoDebugLog(); } + +GLSLANG_EXPORT void glslang_shader_delete(glslang_shader_t* shader) +{ + if (!shader) + return; + + delete (shader->shader); + delete (shader); +} + +GLSLANG_EXPORT glslang_program_t* glslang_program_create() +{ + glslang_program_t* p = new glslang_program_t(); + p->program = new glslang::TProgram(); + return p; +} + +GLSLANG_EXPORT void glslang_program_delete(glslang_program_t* program) +{ + if (!program) + return; + + delete (program->program); + delete (program); +} + +GLSLANG_EXPORT void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader) +{ + program->program->addShader(shader->shader); +} + +GLSLANG_EXPORT int glslang_program_link(glslang_program_t* program, int messages) +{ + return (int)program->program->link((EShMessages)messages); +} + +GLSLANG_EXPORT const char* glslang_program_get_info_log(glslang_program_t* program) +{ + return program->program->getInfoLog(); +} + +GLSLANG_EXPORT const char* glslang_program_get_info_debug_log(glslang_program_t* program) +{ + return program->program->getInfoDebugLog(); +} diff --git a/src/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp b/src/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp new file mode 100644 index 0000000..b3c7226 --- /dev/null +++ b/src/third_party/glslang/glslang/GenericCodeGen/CodeGen.cpp @@ -0,0 +1,76 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/Common.h" +#include "../Include/ShHandle.h" +#include "../MachineIndependent/Versions.h" + +// +// Here is where real machine specific high-level data would be defined. +// +class TGenericCompiler : public TCompiler { +public: + TGenericCompiler(EShLanguage l, int dOptions) : TCompiler(l, infoSink), debugOptions(dOptions) { } + virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile); + TInfoSink infoSink; + int debugOptions; +}; + +// +// This function must be provided to create the actual +// compile object used by higher level code. It returns +// a subclass of TCompiler. +// +TCompiler* ConstructCompiler(EShLanguage language, int debugOptions) +{ + return new TGenericCompiler(language, debugOptions); +} + +// +// Delete the compiler made by ConstructCompiler +// +void DeleteCompiler(TCompiler* compiler) +{ + delete compiler; +} + +// +// Generate code from the given parse tree +// +bool TGenericCompiler::compile(TIntermNode* /*root*/, int /*version*/, EProfile /*profile*/) +{ + haveValidObjectCode = true; + + return haveValidObjectCode; +} diff --git a/src/third_party/glslang/glslang/GenericCodeGen/Link.cpp b/src/third_party/glslang/glslang/GenericCodeGen/Link.cpp new file mode 100644 index 0000000..c38db0f --- /dev/null +++ b/src/third_party/glslang/glslang/GenericCodeGen/Link.cpp @@ -0,0 +1,91 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// The top level algorithms for linking multiple +// shaders together. +// +#include "../Include/Common.h" +#include "../Include/ShHandle.h" + +// +// Actual link object, derived from the shader handle base classes. +// +class TGenericLinker : public TLinker { +public: + TGenericLinker(EShExecutable e, int dOptions) : TLinker(e, infoSink), debugOptions(dOptions) { } + bool link(TCompilerList&, TUniformMap*) { return true; } + void getAttributeBindings(ShBindingTable const **) const { } + TInfoSink infoSink; + int debugOptions; +}; + +// +// The internal view of a uniform/float object exchanged with the driver. +// +class TUniformLinkedMap : public TUniformMap { +public: + TUniformLinkedMap() { } + virtual int getLocation(const char*) { return 0; } +}; + +TShHandleBase* ConstructLinker(EShExecutable executable, int debugOptions) +{ + return new TGenericLinker(executable, debugOptions); +} + +void DeleteLinker(TShHandleBase* linker) +{ + delete linker; +} + +TUniformMap* ConstructUniformMap() +{ + return new TUniformLinkedMap(); +} + +void DeleteUniformMap(TUniformMap* map) +{ + delete map; +} + +TShHandleBase* ConstructBindings() +{ + return 0; +} + +void DeleteBindingList(TShHandleBase* bindingList) +{ + delete bindingList; +} diff --git a/src/third_party/glslang/glslang/Include/BaseTypes.h b/src/third_party/glslang/glslang/Include/BaseTypes.h new file mode 100644 index 0000000..b69eaeb --- /dev/null +++ b/src/third_party/glslang/glslang/Include/BaseTypes.h @@ -0,0 +1,571 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _BASICTYPES_INCLUDED_ +#define _BASICTYPES_INCLUDED_ + +namespace glslang { + +// +// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this. +// +enum TBasicType { + EbtVoid, + EbtFloat, + EbtDouble, + EbtFloat16, + EbtInt8, + EbtUint8, + EbtInt16, + EbtUint16, + EbtInt, + EbtUint, + EbtInt64, + EbtUint64, + EbtBool, + EbtAtomicUint, + EbtSampler, + EbtStruct, + EbtBlock, + EbtAccStruct, + EbtReference, + EbtRayQuery, + + // HLSL types that live only temporarily. + EbtString, + + EbtNumTypes +}; + +// +// Storage qualifiers. Should align with different kinds of storage or +// resource or GLSL storage qualifier. Expansion is deprecated. +// +// N.B.: You probably DON'T want to add anything here, but rather just add it +// to the built-in variables. See the comment above TBuiltInVariable. +// +// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc. +// DO NOT follow the design pattern of, say EvqInstanceId, etc. +// +enum TStorageQualifier { + EvqTemporary, // For temporaries (within a function), read/write + EvqGlobal, // For globals read/write + EvqConst, // User-defined constant values, will be semantically constant and constant folded + EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable) + EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable) + EvqUniform, // read only, shared with app + EvqBuffer, // read/write, shared with app + EvqShared, // compute shader's read/write 'shared' qualifier + + EvqPayload, + EvqPayloadIn, + EvqHitAttr, + EvqCallableData, + EvqCallableDataIn, + + // parameters + EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter + EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter + EvqInOut, + EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics + + // built-ins read by vertex shader + EvqVertexId, + EvqInstanceId, + + // built-ins written by vertex shader + EvqPosition, + EvqPointSize, + EvqClipVertex, + + // built-ins read by fragment shader + EvqFace, + EvqFragCoord, + EvqPointCoord, + + // built-ins written by fragment shader + EvqFragColor, + EvqFragDepth, + + // end of list + EvqLast +}; + +// +// Subcategories of the TStorageQualifier, simply to give a direct mapping +// between built-in variable names and an numerical value (the enum). +// +// For backward compatibility, there is some redundancy between the +// TStorageQualifier and these. Existing members should both be maintained accurately. +// However, any new built-in variable (and any existing non-redundant one) +// must follow the pattern that the specific built-in is here, and only its +// general qualifier is in TStorageQualifier. +// +// Something like gl_Position, which is sometimes 'in' and sometimes 'out' +// shows up as two different built-in variables in a single stage, but +// only has a single enum in TBuiltInVariable, so both the +// TStorageQualifier and the TBuitinVariable are needed to distinguish +// between them. +// +enum TBuiltInVariable { + EbvNone, + EbvNumWorkGroups, + EbvWorkGroupSize, + EbvWorkGroupId, + EbvLocalInvocationId, + EbvGlobalInvocationId, + EbvLocalInvocationIndex, + EbvNumSubgroups, + EbvSubgroupID, + EbvSubGroupSize, + EbvSubGroupInvocation, + EbvSubGroupEqMask, + EbvSubGroupGeMask, + EbvSubGroupGtMask, + EbvSubGroupLeMask, + EbvSubGroupLtMask, + EbvSubgroupSize2, + EbvSubgroupInvocation2, + EbvSubgroupEqMask2, + EbvSubgroupGeMask2, + EbvSubgroupGtMask2, + EbvSubgroupLeMask2, + EbvSubgroupLtMask2, + EbvVertexId, + EbvInstanceId, + EbvVertexIndex, + EbvInstanceIndex, + EbvBaseVertex, + EbvBaseInstance, + EbvDrawId, + EbvPosition, + EbvPointSize, + EbvClipVertex, + EbvClipDistance, + EbvCullDistance, + EbvNormal, + EbvVertex, + EbvMultiTexCoord0, + EbvMultiTexCoord1, + EbvMultiTexCoord2, + EbvMultiTexCoord3, + EbvMultiTexCoord4, + EbvMultiTexCoord5, + EbvMultiTexCoord6, + EbvMultiTexCoord7, + EbvFrontColor, + EbvBackColor, + EbvFrontSecondaryColor, + EbvBackSecondaryColor, + EbvTexCoord, + EbvFogFragCoord, + EbvInvocationId, + EbvPrimitiveId, + EbvLayer, + EbvViewportIndex, + EbvPatchVertices, + EbvTessLevelOuter, + EbvTessLevelInner, + EbvBoundingBox, + EbvTessCoord, + EbvColor, + EbvSecondaryColor, + EbvFace, + EbvFragCoord, + EbvPointCoord, + EbvFragColor, + EbvFragData, + EbvFragDepth, + EbvFragStencilRef, + EbvSampleId, + EbvSamplePosition, + EbvSampleMask, + EbvHelperInvocation, + + EbvBaryCoordNoPersp, + EbvBaryCoordNoPerspCentroid, + EbvBaryCoordNoPerspSample, + EbvBaryCoordSmooth, + EbvBaryCoordSmoothCentroid, + EbvBaryCoordSmoothSample, + EbvBaryCoordPullModel, + + EbvViewIndex, + EbvDeviceIndex, + + EbvFragSizeEXT, + EbvFragInvocationCountEXT, + + EbvSecondaryFragDataEXT, + EbvSecondaryFragColorEXT, + + EbvViewportMaskNV, + EbvSecondaryPositionNV, + EbvSecondaryViewportMaskNV, + EbvPositionPerViewNV, + EbvViewportMaskPerViewNV, + EbvFragFullyCoveredNV, + EbvFragmentSizeNV, + EbvInvocationsPerPixelNV, + // ray tracing + EbvLaunchId, + EbvLaunchSize, + EbvInstanceCustomIndex, + EbvGeometryIndex, + EbvWorldRayOrigin, + EbvWorldRayDirection, + EbvObjectRayOrigin, + EbvObjectRayDirection, + EbvRayTmin, + EbvRayTmax, + EbvHitT, + EbvHitKind, + EbvObjectToWorld, + EbvObjectToWorld3x4, + EbvWorldToObject, + EbvWorldToObject3x4, + EbvIncomingRayFlags, + // barycentrics + EbvBaryCoordNV, + EbvBaryCoordNoPerspNV, + // mesh shaders + EbvTaskCountNV, + EbvPrimitiveCountNV, + EbvPrimitiveIndicesNV, + EbvClipDistancePerViewNV, + EbvCullDistancePerViewNV, + EbvLayerPerViewNV, + EbvMeshViewCountNV, + EbvMeshViewIndicesNV, + + // sm builtins + EbvWarpsPerSM, + EbvSMCount, + EbvWarpID, + EbvSMID, + + // HLSL built-ins that live only temporarily, until they get remapped + // to one of the above. + EbvFragDepthGreater, + EbvFragDepthLesser, + EbvGsOutputStream, + EbvOutputPatch, + EbvInputPatch, + + // structbuffer types + EbvAppendConsume, // no need to differentiate append and consume + EbvRWStructuredBuffer, + EbvStructuredBuffer, + EbvByteAddressBuffer, + EbvRWByteAddressBuffer, + + EbvLast +}; + +// In this enum, order matters; users can assume higher precision is a bigger value +// and EpqNone is 0. +enum TPrecisionQualifier { + EpqNone = 0, + EpqLow, + EpqMedium, + EpqHigh +}; + +#ifdef GLSLANG_WEB +__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; } +__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; } +#else +// These will show up in error messages +__inline const char* GetStorageQualifierString(TStorageQualifier q) +{ + switch (q) { + case EvqTemporary: return "temp"; break; + case EvqGlobal: return "global"; break; + case EvqConst: return "const"; break; + case EvqConstReadOnly: return "const (read only)"; break; + case EvqVaryingIn: return "in"; break; + case EvqVaryingOut: return "out"; break; + case EvqUniform: return "uniform"; break; + case EvqBuffer: return "buffer"; break; + case EvqShared: return "shared"; break; + case EvqIn: return "in"; break; + case EvqOut: return "out"; break; + case EvqInOut: return "inout"; break; + case EvqVertexId: return "gl_VertexId"; break; + case EvqInstanceId: return "gl_InstanceId"; break; + case EvqPosition: return "gl_Position"; break; + case EvqPointSize: return "gl_PointSize"; break; + case EvqClipVertex: return "gl_ClipVertex"; break; + case EvqFace: return "gl_FrontFacing"; break; + case EvqFragCoord: return "gl_FragCoord"; break; + case EvqPointCoord: return "gl_PointCoord"; break; + case EvqFragColor: return "fragColor"; break; + case EvqFragDepth: return "gl_FragDepth"; break; + case EvqPayload: return "rayPayloadNV"; break; + case EvqPayloadIn: return "rayPayloadInNV"; break; + case EvqHitAttr: return "hitAttributeNV"; break; + case EvqCallableData: return "callableDataNV"; break; + case EvqCallableDataIn: return "callableDataInNV"; break; + default: return "unknown qualifier"; + } +} + +__inline const char* GetBuiltInVariableString(TBuiltInVariable v) +{ + switch (v) { + case EbvNone: return ""; + case EbvNumWorkGroups: return "NumWorkGroups"; + case EbvWorkGroupSize: return "WorkGroupSize"; + case EbvWorkGroupId: return "WorkGroupID"; + case EbvLocalInvocationId: return "LocalInvocationID"; + case EbvGlobalInvocationId: return "GlobalInvocationID"; + case EbvLocalInvocationIndex: return "LocalInvocationIndex"; + case EbvNumSubgroups: return "NumSubgroups"; + case EbvSubgroupID: return "SubgroupID"; + case EbvSubGroupSize: return "SubGroupSize"; + case EbvSubGroupInvocation: return "SubGroupInvocation"; + case EbvSubGroupEqMask: return "SubGroupEqMask"; + case EbvSubGroupGeMask: return "SubGroupGeMask"; + case EbvSubGroupGtMask: return "SubGroupGtMask"; + case EbvSubGroupLeMask: return "SubGroupLeMask"; + case EbvSubGroupLtMask: return "SubGroupLtMask"; + case EbvSubgroupSize2: return "SubgroupSize"; + case EbvSubgroupInvocation2: return "SubgroupInvocationID"; + case EbvSubgroupEqMask2: return "SubgroupEqMask"; + case EbvSubgroupGeMask2: return "SubgroupGeMask"; + case EbvSubgroupGtMask2: return "SubgroupGtMask"; + case EbvSubgroupLeMask2: return "SubgroupLeMask"; + case EbvSubgroupLtMask2: return "SubgroupLtMask"; + case EbvVertexId: return "VertexId"; + case EbvInstanceId: return "InstanceId"; + case EbvVertexIndex: return "VertexIndex"; + case EbvInstanceIndex: return "InstanceIndex"; + case EbvBaseVertex: return "BaseVertex"; + case EbvBaseInstance: return "BaseInstance"; + case EbvDrawId: return "DrawId"; + case EbvPosition: return "Position"; + case EbvPointSize: return "PointSize"; + case EbvClipVertex: return "ClipVertex"; + case EbvClipDistance: return "ClipDistance"; + case EbvCullDistance: return "CullDistance"; + case EbvNormal: return "Normal"; + case EbvVertex: return "Vertex"; + case EbvMultiTexCoord0: return "MultiTexCoord0"; + case EbvMultiTexCoord1: return "MultiTexCoord1"; + case EbvMultiTexCoord2: return "MultiTexCoord2"; + case EbvMultiTexCoord3: return "MultiTexCoord3"; + case EbvMultiTexCoord4: return "MultiTexCoord4"; + case EbvMultiTexCoord5: return "MultiTexCoord5"; + case EbvMultiTexCoord6: return "MultiTexCoord6"; + case EbvMultiTexCoord7: return "MultiTexCoord7"; + case EbvFrontColor: return "FrontColor"; + case EbvBackColor: return "BackColor"; + case EbvFrontSecondaryColor: return "FrontSecondaryColor"; + case EbvBackSecondaryColor: return "BackSecondaryColor"; + case EbvTexCoord: return "TexCoord"; + case EbvFogFragCoord: return "FogFragCoord"; + case EbvInvocationId: return "InvocationID"; + case EbvPrimitiveId: return "PrimitiveID"; + case EbvLayer: return "Layer"; + case EbvViewportIndex: return "ViewportIndex"; + case EbvPatchVertices: return "PatchVertices"; + case EbvTessLevelOuter: return "TessLevelOuter"; + case EbvTessLevelInner: return "TessLevelInner"; + case EbvBoundingBox: return "BoundingBox"; + case EbvTessCoord: return "TessCoord"; + case EbvColor: return "Color"; + case EbvSecondaryColor: return "SecondaryColor"; + case EbvFace: return "Face"; + case EbvFragCoord: return "FragCoord"; + case EbvPointCoord: return "PointCoord"; + case EbvFragColor: return "FragColor"; + case EbvFragData: return "FragData"; + case EbvFragDepth: return "FragDepth"; + case EbvFragStencilRef: return "FragStencilRef"; + case EbvSampleId: return "SampleId"; + case EbvSamplePosition: return "SamplePosition"; + case EbvSampleMask: return "SampleMaskIn"; + case EbvHelperInvocation: return "HelperInvocation"; + + case EbvBaryCoordNoPersp: return "BaryCoordNoPersp"; + case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid"; + case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample"; + case EbvBaryCoordSmooth: return "BaryCoordSmooth"; + case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid"; + case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample"; + case EbvBaryCoordPullModel: return "BaryCoordPullModel"; + + case EbvViewIndex: return "ViewIndex"; + case EbvDeviceIndex: return "DeviceIndex"; + + case EbvFragSizeEXT: return "FragSizeEXT"; + case EbvFragInvocationCountEXT: return "FragInvocationCountEXT"; + + case EbvSecondaryFragDataEXT: return "SecondaryFragDataEXT"; + case EbvSecondaryFragColorEXT: return "SecondaryFragColorEXT"; + + case EbvViewportMaskNV: return "ViewportMaskNV"; + case EbvSecondaryPositionNV: return "SecondaryPositionNV"; + case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV"; + case EbvPositionPerViewNV: return "PositionPerViewNV"; + case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV"; + case EbvFragFullyCoveredNV: return "FragFullyCoveredNV"; + case EbvFragmentSizeNV: return "FragmentSizeNV"; + case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV"; + case EbvLaunchId: return "LaunchIdNV"; + case EbvLaunchSize: return "LaunchSizeNV"; + case EbvInstanceCustomIndex: return "InstanceCustomIndexNV"; + case EbvGeometryIndex: return "GeometryIndexEXT"; + case EbvWorldRayOrigin: return "WorldRayOriginNV"; + case EbvWorldRayDirection: return "WorldRayDirectionNV"; + case EbvObjectRayOrigin: return "ObjectRayOriginNV"; + case EbvObjectRayDirection: return "ObjectRayDirectionNV"; + case EbvRayTmin: return "ObjectRayTminNV"; + case EbvRayTmax: return "ObjectRayTmaxNV"; + case EbvHitT: return "HitTNV"; + case EbvHitKind: return "HitKindNV"; + case EbvIncomingRayFlags: return "IncomingRayFlagsNV"; + case EbvObjectToWorld: return "ObjectToWorldNV"; + case EbvWorldToObject: return "WorldToObjectNV"; + + case EbvBaryCoordNV: return "BaryCoordNV"; + case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV"; + + case EbvTaskCountNV: return "TaskCountNV"; + case EbvPrimitiveCountNV: return "PrimitiveCountNV"; + case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV"; + case EbvClipDistancePerViewNV: return "ClipDistancePerViewNV"; + case EbvCullDistancePerViewNV: return "CullDistancePerViewNV"; + case EbvLayerPerViewNV: return "LayerPerViewNV"; + case EbvMeshViewCountNV: return "MeshViewCountNV"; + case EbvMeshViewIndicesNV: return "MeshViewIndicesNV"; + + case EbvWarpsPerSM: return "WarpsPerSMNV"; + case EbvSMCount: return "SMCountNV"; + case EbvWarpID: return "WarpIDNV"; + case EbvSMID: return "SMIDNV"; + + default: return "unknown built-in variable"; + } +} + +__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) +{ + switch (p) { + case EpqNone: return ""; break; + case EpqLow: return "lowp"; break; + case EpqMedium: return "mediump"; break; + case EpqHigh: return "highp"; break; + default: return "unknown precision qualifier"; + } +} +#endif + +__inline bool isTypeSignedInt(TBasicType type) +{ + switch (type) { + case EbtInt8: + case EbtInt16: + case EbtInt: + case EbtInt64: + return true; + default: + return false; + } +} + +__inline bool isTypeUnsignedInt(TBasicType type) +{ + switch (type) { + case EbtUint8: + case EbtUint16: + case EbtUint: + case EbtUint64: + return true; + default: + return false; + } +} + +__inline bool isTypeInt(TBasicType type) +{ + return isTypeSignedInt(type) || isTypeUnsignedInt(type); +} + +__inline bool isTypeFloat(TBasicType type) +{ + switch (type) { + case EbtFloat: + case EbtDouble: + case EbtFloat16: + return true; + default: + return false; + } +} + +__inline int getTypeRank(TBasicType type) +{ + int res = -1; + switch(type) { + case EbtInt8: + case EbtUint8: + res = 0; + break; + case EbtInt16: + case EbtUint16: + res = 1; + break; + case EbtInt: + case EbtUint: + res = 2; + break; + case EbtInt64: + case EbtUint64: + res = 3; + break; + default: + assert(false); + break; + } + return res; +} + +} // end namespace glslang + +#endif // _BASICTYPES_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/Common.h b/src/third_party/glslang/glslang/Include/Common.h new file mode 100644 index 0000000..b628cdc --- /dev/null +++ b/src/third_party/glslang/glslang/Include/Common.h @@ -0,0 +1,291 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _COMMON_INCLUDED_ +#define _COMMON_INCLUDED_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700) +#include +namespace std { +template +std::string to_string(const T& val) { + std::ostringstream os; + os << val; + return os.str(); +} +} +#endif + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API + #include + #ifndef snprintf + #define snprintf sprintf_s + #endif + #define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args)) +#elif defined (solaris) + #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args)) + #include + #define UINT_PTR uintptr_t +#else + #define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args)) + #include + #define UINT_PTR uintptr_t +#endif + +#if defined(_MSC_VER) && _MSC_VER < 1800 + #include + inline long long int strtoll (const char* str, char** endptr, int base) + { + return _strtoi64(str, endptr, base); + } + inline unsigned long long int strtoull (const char* str, char** endptr, int base) + { + return _strtoui64(str, endptr, base); + } + inline long long int atoll (const char* str) + { + return strtoll(str, NULL, 10); + } +#endif + +#if defined(_MSC_VER) +#define strdup _strdup +#endif + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4786) // Don't warn about too long identifiers + #pragma warning(disable : 4514) // unused inline method + #pragma warning(disable : 4201) // nameless union +#endif + +#include "PoolAlloc.h" + +// +// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme. +// +#define POOL_ALLOCATOR_NEW_DELETE(A) \ + void* operator new(size_t s) { return (A).allocate(s); } \ + void* operator new(size_t, void *_Where) { return (_Where); } \ + void operator delete(void*) { } \ + void operator delete(void *, void *) { } \ + void* operator new[](size_t s) { return (A).allocate(s); } \ + void* operator new[](size_t, void *_Where) { return (_Where); } \ + void operator delete[](void*) { } \ + void operator delete[](void *, void *) { } + +namespace glslang { + + // + // Pool version of string. + // + typedef pool_allocator TStringAllocator; + typedef std::basic_string , TStringAllocator> TString; + +} // end namespace glslang + +// Repackage the std::hash for use by unordered map/set with a TString key. +namespace std { + + template<> struct hash { + std::size_t operator()(const glslang::TString& s) const + { + const unsigned _FNV_offset_basis = 2166136261U; + const unsigned _FNV_prime = 16777619U; + unsigned _Val = _FNV_offset_basis; + size_t _Count = s.size(); + const char* _First = s.c_str(); + for (size_t _Next = 0; _Next < _Count; ++_Next) + { + _Val ^= (unsigned)_First[_Next]; + _Val *= _FNV_prime; + } + + return _Val; + } + }; +} + +namespace glslang { + +inline TString* NewPoolTString(const char* s) +{ + void* memory = GetThreadPoolAllocator().allocate(sizeof(TString)); + return new(memory) TString(s); +} + +template inline T* NewPoolObject(T*) +{ + return new(GetThreadPoolAllocator().allocate(sizeof(T))) T; +} + +template inline T* NewPoolObject(T, int instances) +{ + return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances]; +} + +// +// Pool allocator versions of vectors, lists, and maps +// +template class TVector : public std::vector > { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + typedef typename std::vector >::size_type size_type; + TVector() : std::vector >() {} + TVector(const pool_allocator& a) : std::vector >(a) {} + TVector(size_type i) : std::vector >(i) {} + TVector(size_type i, const T& val) : std::vector >(i, val) {} +}; + +template class TList : public std::list > { +}; + +template > +class TMap : public std::map > > { +}; + +template , class PRED = std::equal_to > +class TUnorderedMap : public std::unordered_map > > { +}; + +// +// Persistent string memory. Should only be used for strings that survive +// across compiles/links. +// +typedef std::basic_string TPersistString; + +// +// templatized min and max functions. +// +template T Min(const T a, const T b) { return a < b ? a : b; } +template T Max(const T a, const T b) { return a > b ? a : b; } + +// +// Create a TString object from an integer. +// +#if defined _MSC_VER || defined MINGW_HAS_SECURE_API +inline const TString String(const int i, const int base = 10) +{ + char text[16]; // 32 bit ints are at most 10 digits in base 10 + _itoa_s(i, text, sizeof(text), base); + return text; +} +#else +inline const TString String(const int i, const int /*base*/ = 10) +{ + char text[16]; // 32 bit ints are at most 10 digits in base 10 + + // we assume base 10 for all cases + snprintf(text, sizeof(text), "%d", i); + + return text; +} +#endif + +struct TSourceLoc { + void init() + { + name = nullptr; string = 0; line = 0; column = 0; + } + void init(int stringNum) { init(); string = stringNum; } + // Returns the name if it exists. Otherwise, returns the string number. + std::string getStringNameOrNum(bool quoteStringName = true) const + { + if (name != nullptr) { + TString qstr = quoteStringName ? ("\"" + *name + "\"") : *name; + std::string ret_str(qstr.c_str()); + return ret_str; + } + return std::to_string((long long)string); + } + const char* getFilename() const + { + if (name == nullptr) + return nullptr; + return name->c_str(); + } + const char* getFilenameStr() const { return name == nullptr ? "" : name->c_str(); } + TString* name; // descriptive name for this string, when a textual name is available, otherwise nullptr + int string; + int line; + int column; +}; + +class TPragmaTable : public TMap { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) +}; + +const int MaxTokenLength = 1024; + +template bool IsPow2(T powerOf2) +{ + if (powerOf2 <= 0) + return false; + + return (powerOf2 & (powerOf2 - 1)) == 0; +} + +// Round number up to a multiple of the given powerOf2, which is not +// a power, just a number that must be a power of 2. +template void RoundToPow2(T& number, int powerOf2) +{ + assert(IsPow2(powerOf2)); + number = (number + powerOf2 - 1) & ~(powerOf2 - 1); +} + +template bool IsMultipleOfPow2(T number, int powerOf2) +{ + assert(IsPow2(powerOf2)); + return ! (number & (powerOf2 - 1)); +} + +} // end namespace glslang + +#endif // _COMMON_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/ConstantUnion.h b/src/third_party/glslang/glslang/Include/ConstantUnion.h new file mode 100644 index 0000000..c4ffb85 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/ConstantUnion.h @@ -0,0 +1,974 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _CONSTANT_UNION_INCLUDED_ +#define _CONSTANT_UNION_INCLUDED_ + +#include "../Include/Common.h" +#include "../Include/BaseTypes.h" + +namespace glslang { + +class TConstUnion { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TConstUnion() : iConst(0), type(EbtInt) { } + + void setI8Const(signed char i) + { + i8Const = i; + type = EbtInt8; + } + + void setU8Const(unsigned char u) + { + u8Const = u; + type = EbtUint8; + } + + void setI16Const(signed short i) + { + i16Const = i; + type = EbtInt16; + } + + void setU16Const(unsigned short u) + { + u16Const = u; + type = EbtUint16; + } + + void setIConst(int i) + { + iConst = i; + type = EbtInt; + } + + void setUConst(unsigned int u) + { + uConst = u; + type = EbtUint; + } + + void setI64Const(long long i64) + { + i64Const = i64; + type = EbtInt64; + } + + void setU64Const(unsigned long long u64) + { + u64Const = u64; + type = EbtUint64; + } + + void setDConst(double d) + { + dConst = d; + type = EbtDouble; + } + + void setBConst(bool b) + { + bConst = b; + type = EbtBool; + } + + void setSConst(const TString* s) + { + sConst = s; + type = EbtString; + } + + signed char getI8Const() const { return i8Const; } + unsigned char getU8Const() const { return u8Const; } + signed short getI16Const() const { return i16Const; } + unsigned short getU16Const() const { return u16Const; } + int getIConst() const { return iConst; } + unsigned int getUConst() const { return uConst; } + long long getI64Const() const { return i64Const; } + unsigned long long getU64Const() const { return u64Const; } + double getDConst() const { return dConst; } + bool getBConst() const { return bConst; } + const TString* getSConst() const { return sConst; } + + bool operator==(const signed char i) const + { + if (i == i8Const) + return true; + + return false; + } + + bool operator==(const unsigned char u) const + { + if (u == u8Const) + return true; + + return false; + } + + bool operator==(const signed short i) const + { + if (i == i16Const) + return true; + + return false; + } + + bool operator==(const unsigned short u) const + { + if (u == u16Const) + return true; + + return false; + } + + bool operator==(const int i) const + { + if (i == iConst) + return true; + + return false; + } + + bool operator==(const unsigned int u) const + { + if (u == uConst) + return true; + + return false; + } + + bool operator==(const long long i64) const + { + if (i64 == i64Const) + return true; + + return false; + } + + bool operator==(const unsigned long long u64) const + { + if (u64 == u64Const) + return true; + + return false; + } + + bool operator==(const double d) const + { + if (d == dConst) + return true; + + return false; + } + + bool operator==(const bool b) const + { + if (b == bConst) + return true; + + return false; + } + + bool operator==(const TConstUnion& constant) const + { + if (constant.type != type) + return false; + + switch (type) { + case EbtInt: + if (constant.iConst == iConst) + return true; + + break; + case EbtUint: + if (constant.uConst == uConst) + return true; + + break; + case EbtBool: + if (constant.bConst == bConst) + return true; + + break; + case EbtDouble: + if (constant.dConst == dConst) + return true; + + break; + +#ifndef GLSLANG_WEB + case EbtInt16: + if (constant.i16Const == i16Const) + return true; + + break; + case EbtUint16: + if (constant.u16Const == u16Const) + return true; + + break; + case EbtInt8: + if (constant.i8Const == i8Const) + return true; + + break; + case EbtUint8: + if (constant.u8Const == u8Const) + return true; + + break; + case EbtInt64: + if (constant.i64Const == i64Const) + return true; + + break; + case EbtUint64: + if (constant.u64Const == u64Const) + return true; + + break; +#endif + default: + assert(false && "Default missing"); + } + + return false; + } + + bool operator!=(const signed char i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned char u) const + { + return !operator==(u); + } + + bool operator!=(const signed short i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned short u) const + { + return !operator==(u); + } + + bool operator!=(const int i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned int u) const + { + return !operator==(u); + } + + bool operator!=(const long long i) const + { + return !operator==(i); + } + + bool operator!=(const unsigned long long u) const + { + return !operator==(u); + } + + bool operator!=(const float f) const + { + return !operator==(f); + } + + bool operator!=(const bool b) const + { + return !operator==(b); + } + + bool operator!=(const TConstUnion& constant) const + { + return !operator==(constant); + } + + bool operator>(const TConstUnion& constant) const + { + assert(type == constant.type); + switch (type) { + case EbtInt: + if (iConst > constant.iConst) + return true; + + return false; + case EbtUint: + if (uConst > constant.uConst) + return true; + + return false; + case EbtDouble: + if (dConst > constant.dConst) + return true; + + return false; +#ifndef GLSLANG_WEB + case EbtInt8: + if (i8Const > constant.i8Const) + return true; + + return false; + case EbtUint8: + if (u8Const > constant.u8Const) + return true; + + return false; + case EbtInt16: + if (i16Const > constant.i16Const) + return true; + + return false; + case EbtUint16: + if (u16Const > constant.u16Const) + return true; + + return false; + case EbtInt64: + if (i64Const > constant.i64Const) + return true; + + return false; + case EbtUint64: + if (u64Const > constant.u64Const) + return true; + + return false; +#endif + default: + assert(false && "Default missing"); + return false; + } + } + + bool operator<(const TConstUnion& constant) const + { + assert(type == constant.type); + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + if (i8Const < constant.i8Const) + return true; + + return false; + case EbtUint8: + if (u8Const < constant.u8Const) + return true; + + return false; + case EbtInt16: + if (i16Const < constant.i16Const) + return true; + + return false; + case EbtUint16: + if (u16Const < constant.u16Const) + return true; + return false; + case EbtInt64: + if (i64Const < constant.i64Const) + return true; + + return false; + case EbtUint64: + if (u64Const < constant.u64Const) + return true; + + return false; +#endif + case EbtDouble: + if (dConst < constant.dConst) + return true; + + return false; + case EbtInt: + if (iConst < constant.iConst) + return true; + + return false; + case EbtUint: + if (uConst < constant.uConst) + return true; + + return false; + default: + assert(false && "Default missing"); + return false; + } + } + + TConstUnion operator+(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst + constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst + constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator-(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst - constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst - constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator*(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst * constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst * constant.uConst); break; + case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator%(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst % constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst % constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break; + case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break; + case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break; + case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator>>(const TConstUnion& constant) const + { + TConstUnion returnValue; + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + switch (constant.type) { + case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break; + case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break; + case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break; + case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint8: + switch (constant.type) { + case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break; + case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break; + case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break; + case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt16: + switch (constant.type) { + case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break; + case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break; + case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break; + case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint16: + switch (constant.type) { + case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break; + case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break; + case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break; + case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + case EbtInt: + switch (constant.type) { + case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break; + case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break; + case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break; + case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break; + case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break; + case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break; + case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + case EbtUint: + switch (constant.type) { + case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break; + case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break; + case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break; + case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break; + case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break; + case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; +#ifndef GLSLANG_WEB + case EbtInt64: + switch (constant.type) { + case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break; + case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break; + case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break; + case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break; + case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break; + case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break; + case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break; + case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint64: + switch (constant.type) { + case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break; + case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break; + case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break; + case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break; + case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break; + case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break; + case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator<<(const TConstUnion& constant) const + { + TConstUnion returnValue; + switch (type) { +#ifndef GLSLANG_WEB + case EbtInt8: + switch (constant.type) { + case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break; + case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break; + case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break; + case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break; + case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break; + case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break; + case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break; + case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint8: + switch (constant.type) { + case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break; + case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break; + case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break; + case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break; + case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break; + case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break; + case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt16: + switch (constant.type) { + case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break; + case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break; + case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break; + case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break; + case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break; + case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break; + case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint16: + switch (constant.type) { + case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break; + case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break; + case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break; + case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break; + case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break; + case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break; + case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtInt64: + switch (constant.type) { + case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break; + case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break; + case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break; + case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break; + case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break; + case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break; + case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break; + case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; + case EbtUint64: + switch (constant.type) { + case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break; + case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break; + case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break; + case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break; + case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break; + case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break; + case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break; + default: assert(false && "Default missing"); + } + break; +#endif + case EbtInt: + switch (constant.type) { + case EbtInt: returnValue.setIConst(iConst << constant.iConst); break; + case EbtUint: returnValue.setIConst(iConst << constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break; + case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break; + case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break; + case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break; + case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break; + case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + case EbtUint: + switch (constant.type) { + case EbtInt: returnValue.setUConst(uConst << constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst << constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break; + case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break; + case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break; + case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break; + case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break; + case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator&(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst & constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst & constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator|(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst | constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst | constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator^(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break; + case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break; + case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break; + case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break; + case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break; + case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break; + case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator~() const + { + TConstUnion returnValue; + switch (type) { + case EbtInt: returnValue.setIConst(~iConst); break; + case EbtUint: returnValue.setUConst(~uConst); break; +#ifndef GLSLANG_WEB + case EbtInt8: returnValue.setI8Const(~i8Const); break; + case EbtUint8: returnValue.setU8Const(~u8Const); break; + case EbtInt16: returnValue.setI16Const(~i16Const); break; + case EbtUint16: returnValue.setU16Const(~u16Const); break; + case EbtInt64: returnValue.setI64Const(~i64Const); break; + case EbtUint64: returnValue.setU64Const(~u64Const); break; +#endif + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator&&(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtBool: returnValue.setBConst(bConst && constant.bConst); break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TConstUnion operator||(const TConstUnion& constant) const + { + TConstUnion returnValue; + assert(type == constant.type); + switch (type) { + case EbtBool: returnValue.setBConst(bConst || constant.bConst); break; + default: assert(false && "Default missing"); + } + + return returnValue; + } + + TBasicType getType() const { return type; } + +private: + union { + signed char i8Const; // used for i8vec, scalar int8s + unsigned char u8Const; // used for u8vec, scalar uint8s + signed short i16Const; // used for i16vec, scalar int16s + unsigned short u16Const; // used for u16vec, scalar uint16s + int iConst; // used for ivec, scalar ints + unsigned int uConst; // used for uvec, scalar uints + long long i64Const; // used for i64vec, scalar int64s + unsigned long long u64Const; // used for u64vec, scalar uint64s + bool bConst; // used for bvec, scalar bools + double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles + const TString* sConst; // string constant + }; + + TBasicType type; +}; + +// Encapsulate having a pointer to an array of TConstUnion, +// which only needs to be allocated if its size is going to be +// bigger than 0. +// +// One convenience is being able to use [] to go inside the array, instead +// of C++ assuming it as an array of pointers to vectors. +// +// General usage is that the size is known up front, and it is +// created once with the proper size. +// +class TConstUnionArray { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TConstUnionArray() : unionArray(nullptr) { } + virtual ~TConstUnionArray() { } + + explicit TConstUnionArray(int size) + { + if (size == 0) + unionArray = nullptr; + else + unionArray = new TConstUnionVector(size); + } + TConstUnionArray(const TConstUnionArray& a) = default; + TConstUnionArray(const TConstUnionArray& a, int start, int size) + { + unionArray = new TConstUnionVector(size); + for (int i = 0; i < size; ++i) + (*unionArray)[i] = a[start + i]; + } + + // Use this constructor for a smear operation + TConstUnionArray(int size, const TConstUnion& val) + { + unionArray = new TConstUnionVector(size, val); + } + + int size() const { return unionArray ? (int)unionArray->size() : 0; } + TConstUnion& operator[](size_t index) { return (*unionArray)[index]; } + const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; } + bool operator==(const TConstUnionArray& rhs) const + { + // this includes the case that both are unallocated + if (unionArray == rhs.unionArray) + return true; + + if (! unionArray || ! rhs.unionArray) + return false; + + return *unionArray == *rhs.unionArray; + } + bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); } + + double dot(const TConstUnionArray& rhs) + { + assert(rhs.unionArray->size() == unionArray->size()); + double sum = 0.0; + + for (size_t comp = 0; comp < unionArray->size(); ++comp) + sum += (*this)[comp].getDConst() * rhs[comp].getDConst(); + + return sum; + } + + bool empty() const { return unionArray == nullptr; } + +protected: + typedef TVector TConstUnionVector; + TConstUnionVector* unionArray; +}; + +} // end namespace glslang + +#endif // _CONSTANT_UNION_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/InfoSink.h b/src/third_party/glslang/glslang/Include/InfoSink.h new file mode 100644 index 0000000..dceb603 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/InfoSink.h @@ -0,0 +1,144 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _INFOSINK_INCLUDED_ +#define _INFOSINK_INCLUDED_ + +#include "../Include/Common.h" +#include + +namespace glslang { + +// +// TPrefixType is used to centralize how info log messages start. +// See below. +// +enum TPrefixType { + EPrefixNone, + EPrefixWarning, + EPrefixError, + EPrefixInternalError, + EPrefixUnimplemented, + EPrefixNote +}; + +enum TOutputStream { + ENull = 0, + EDebugger = 0x01, + EStdOut = 0x02, + EString = 0x04, +}; +// +// Encapsulate info logs for all objects that have them. +// +// The methods are a general set of tools for getting a variety of +// messages and types inserted into the log. +// +class TInfoSinkBase { +public: + TInfoSinkBase() : outputStream(4) {} + void erase() { sink.erase(); } + TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; } + TInfoSinkBase& operator<<(char c) { append(1, c); return *this; } + TInfoSinkBase& operator<<(const char* s) { append(s); return *this; } + TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; } + TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; } + TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size]; + snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n); + append(buf); + return *this; } + TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; } + TInfoSinkBase& operator+(const TString& t) { append(t); return *this; } + TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; } + TInfoSinkBase& operator+(const char* s) { append(s); return *this; } + const char* c_str() const { return sink.c_str(); } + void prefix(TPrefixType message) { + switch(message) { + case EPrefixNone: break; + case EPrefixWarning: append("WARNING: "); break; + case EPrefixError: append("ERROR: "); break; + case EPrefixInternalError: append("INTERNAL ERROR: "); break; + case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break; + case EPrefixNote: append("NOTE: "); break; + default: append("UNKNOWN ERROR: "); break; + } + } + void location(const TSourceLoc& loc) { + const int maxSize = 24; + char locText[maxSize]; + snprintf(locText, maxSize, ":%d", loc.line); + append(loc.getStringNameOrNum(false).c_str()); + append(locText); + append(": "); + } + void message(TPrefixType message, const char* s) { + prefix(message); + append(s); + append("\n"); + } + void message(TPrefixType message, const char* s, const TSourceLoc& loc) { + prefix(message); + location(loc); + append(s); + append("\n"); + } + + void setOutputStream(int output = 4) + { + outputStream = output; + } + +protected: + void append(const char* s); + + void append(int count, char c); + void append(const TPersistString& t); + void append(const TString& t); + + void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2) + sink.reserve(sink.capacity() + sink.capacity() / 2); } + void appendToStream(const char* s); + TPersistString sink; + int outputStream; +}; + +} // end namespace glslang + +class TInfoSink { +public: + glslang::TInfoSinkBase info; + glslang::TInfoSinkBase debug; +}; + +#endif // _INFOSINK_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/InitializeGlobals.h b/src/third_party/glslang/glslang/Include/InitializeGlobals.h new file mode 100644 index 0000000..95d0a40 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/InitializeGlobals.h @@ -0,0 +1,44 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef __INITIALIZE_GLOBALS_INCLUDED_ +#define __INITIALIZE_GLOBALS_INCLUDED_ + +namespace glslang { + +bool InitializePoolIndex(); + +} // end namespace glslang + +#endif // __INITIALIZE_GLOBALS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/PoolAlloc.h b/src/third_party/glslang/glslang/Include/PoolAlloc.h new file mode 100644 index 0000000..b8eccb8 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/PoolAlloc.h @@ -0,0 +1,316 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _POOLALLOC_INCLUDED_ +#define _POOLALLOC_INCLUDED_ + +#ifdef _DEBUG +# define GUARD_BLOCKS // define to enable guard block sanity checking +#endif + +// +// This header defines an allocator that can be used to efficiently +// allocate a large number of small requests for heap memory, with the +// intention that they are not individually deallocated, but rather +// collectively deallocated at one time. +// +// This simultaneously +// +// * Makes each individual allocation much more efficient; the +// typical allocation is trivial. +// * Completely avoids the cost of doing individual deallocation. +// * Saves the trouble of tracking down and plugging a large class of leaks. +// +// Individual classes can use this allocator by supplying their own +// new and delete methods. +// +// STL containers can use this allocator by using the pool_allocator +// class as the allocator (second) template argument. +// + +#include +#include +#include + +namespace glslang { + +// If we are using guard blocks, we must track each individual +// allocation. If we aren't using guard blocks, these +// never get instantiated, so won't have any impact. +// + +class TAllocation { +public: + TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) : + size(size), mem(mem), prevAlloc(prev) { + // Allocations are bracketed: + // [allocationHeader][initialGuardBlock][userData][finalGuardBlock] + // This would be cleaner with if (guardBlockSize)..., but that + // makes the compiler print warnings about 0 length memsets, + // even with the if() protecting them. +# ifdef GUARD_BLOCKS + memset(preGuard(), guardBlockBeginVal, guardBlockSize); + memset(data(), userDataFill, size); + memset(postGuard(), guardBlockEndVal, guardBlockSize); +# endif + } + + void check() const { + checkGuardBlock(preGuard(), guardBlockBeginVal, "before"); + checkGuardBlock(postGuard(), guardBlockEndVal, "after"); + } + + void checkAllocList() const; + + // Return total size needed to accommodate user buffer of 'size', + // plus our tracking data. + inline static size_t allocationSize(size_t size) { + return size + 2 * guardBlockSize + headerSize(); + } + + // Offset from surrounding buffer to get to user data buffer. + inline static unsigned char* offsetAllocation(unsigned char* m) { + return m + guardBlockSize + headerSize(); + } + +private: + void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const; + + // Find offsets to pre and post guard blocks, and user data buffer + unsigned char* preGuard() const { return mem + headerSize(); } + unsigned char* data() const { return preGuard() + guardBlockSize; } + unsigned char* postGuard() const { return data() + size; } + + size_t size; // size of the user data area + unsigned char* mem; // beginning of our allocation (pts to header) + TAllocation* prevAlloc; // prior allocation in the chain + + const static unsigned char guardBlockBeginVal; + const static unsigned char guardBlockEndVal; + const static unsigned char userDataFill; + + const static size_t guardBlockSize; +# ifdef GUARD_BLOCKS + inline static size_t headerSize() { return sizeof(TAllocation); } +# else + inline static size_t headerSize() { return 0; } +# endif +}; + +// +// There are several stacks. One is to track the pushing and popping +// of the user, and not yet implemented. The others are simply a +// repositories of free pages or used pages. +// +// Page stacks are linked together with a simple header at the beginning +// of each allocation obtained from the underlying OS. Multi-page allocations +// are returned to the OS. Individual page allocations are kept for future +// re-use. +// +// The "page size" used is not, nor must it match, the underlying OS +// page size. But, having it be about that size or equal to a set of +// pages is likely most optimal. +// +class TPoolAllocator { +public: + TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16); + + // + // Don't call the destructor just to free up the memory, call pop() + // + ~TPoolAllocator(); + + // + // Call push() to establish a new place to pop memory too. Does not + // have to be called to get things started. + // + void push(); + + // + // Call pop() to free all memory allocated since the last call to push(), + // or if no last call to push, frees all memory since first allocation. + // + void pop(); + + // + // Call popAll() to free all memory allocated. + // + void popAll(); + + // + // Call allocate() to actually acquire memory. Returns 0 if no memory + // available, otherwise a properly aligned pointer to 'numBytes' of memory. + // + void* allocate(size_t numBytes); + + // + // There is no deallocate. The point of this class is that + // deallocation can be skipped by the user of it, as the model + // of use is to simultaneously deallocate everything at once + // by calling pop(), and to not have to solve memory leak problems. + // + +protected: + friend struct tHeader; + + struct tHeader { + tHeader(tHeader* nextPage, size_t pageCount) : +#ifdef GUARD_BLOCKS + lastAllocation(0), +#endif + nextPage(nextPage), pageCount(pageCount) { } + + ~tHeader() { +#ifdef GUARD_BLOCKS + if (lastAllocation) + lastAllocation->checkAllocList(); +#endif + } + +#ifdef GUARD_BLOCKS + TAllocation* lastAllocation; +#endif + tHeader* nextPage; + size_t pageCount; + }; + + struct tAllocState { + size_t offset; + tHeader* page; + }; + typedef std::vector tAllocStack; + + // Track allocations if and only if we're using guard blocks +#ifndef GUARD_BLOCKS + void* initializeAllocation(tHeader*, unsigned char* memory, size_t) { +#else + void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) { + new(memory) TAllocation(numBytes, memory, block->lastAllocation); + block->lastAllocation = reinterpret_cast(memory); +#endif + + // This is optimized entirely away if GUARD_BLOCKS is not defined. + return TAllocation::offsetAllocation(memory); + } + + size_t pageSize; // granularity of allocation from the OS + size_t alignment; // all returned allocations will be aligned at + // this granularity, which will be a power of 2 + size_t alignmentMask; + size_t headerSkip; // amount of memory to skip to make room for the + // header (basically, size of header, rounded + // up to make it aligned + size_t currentPageOffset; // next offset in top of inUseList to allocate from + tHeader* freeList; // list of popped memory + tHeader* inUseList; // list of all memory currently being used + tAllocStack stack; // stack of where to allocate from, to partition pool + + int numCalls; // just an interesting statistic + size_t totalBytes; // just an interesting statistic +private: + TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator + TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor +}; + +// +// There could potentially be many pools with pops happening at +// different times. But a simple use is to have a global pop +// with everyone using the same global allocator. +// +extern TPoolAllocator& GetThreadPoolAllocator(); +void SetThreadPoolAllocator(TPoolAllocator* poolAllocator); + +// +// This STL compatible allocator is intended to be used as the allocator +// parameter to templatized STL containers, like vector and map. +// +// It will use the pools for allocation, and not +// do any deallocation, but will still do destruction. +// +template +class pool_allocator { +public: + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T *pointer; + typedef const T *const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + template + struct rebind { + typedef pool_allocator other; + }; + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { return &x; } + + pool_allocator() : allocator(GetThreadPoolAllocator()) { } + pool_allocator(TPoolAllocator& a) : allocator(a) { } + pool_allocator(const pool_allocator& p) : allocator(p.allocator) { } + + template + pool_allocator(const pool_allocator& p) : allocator(p.getAllocator()) { } + + pointer allocate(size_type n) { + return reinterpret_cast(getAllocator().allocate(n * sizeof(T))); } + pointer allocate(size_type n, const void*) { + return reinterpret_cast(getAllocator().allocate(n * sizeof(T))); } + + void deallocate(void*, size_type) { } + void deallocate(pointer, size_type) { } + + pointer _Charalloc(size_t n) { + return reinterpret_cast(getAllocator().allocate(n)); } + + void construct(pointer p, const T& val) { new ((void *)p) T(val); } + void destroy(pointer p) { p->T::~T(); } + + bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); } + bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); } + + size_type max_size() const { return static_cast(-1) / sizeof(T); } + size_type max_size(int size) const { return static_cast(-1) / size; } + + TPoolAllocator& getAllocator() const { return allocator; } + +protected: + pool_allocator& operator=(const pool_allocator&) { return *this; } + TPoolAllocator& allocator; +}; + +} // end namespace glslang + +#endif // _POOLALLOC_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/ResourceLimits.h b/src/third_party/glslang/glslang/Include/ResourceLimits.h new file mode 100644 index 0000000..b670cf1 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/ResourceLimits.h @@ -0,0 +1,150 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _RESOURCE_LIMITS_INCLUDED_ +#define _RESOURCE_LIMITS_INCLUDED_ + +struct TLimits { + bool nonInductiveForLoops; + bool whileLoops; + bool doWhileLoops; + bool generalUniformIndexing; + bool generalAttributeMatrixVectorIndexing; + bool generalVaryingIndexing; + bool generalSamplerIndexing; + bool generalVariableIndexing; + bool generalConstantMatrixVectorIndexing; +}; + +struct TBuiltInResource { + int maxLights; + int maxClipPlanes; + int maxTextureUnits; + int maxTextureCoords; + int maxVertexAttribs; + int maxVertexUniformComponents; + int maxVaryingFloats; + int maxVertexTextureImageUnits; + int maxCombinedTextureImageUnits; + int maxTextureImageUnits; + int maxFragmentUniformComponents; + int maxDrawBuffers; + int maxVertexUniformVectors; + int maxVaryingVectors; + int maxFragmentUniformVectors; + int maxVertexOutputVectors; + int maxFragmentInputVectors; + int minProgramTexelOffset; + int maxProgramTexelOffset; + int maxClipDistances; + int maxComputeWorkGroupCountX; + int maxComputeWorkGroupCountY; + int maxComputeWorkGroupCountZ; + int maxComputeWorkGroupSizeX; + int maxComputeWorkGroupSizeY; + int maxComputeWorkGroupSizeZ; + int maxComputeUniformComponents; + int maxComputeTextureImageUnits; + int maxComputeImageUniforms; + int maxComputeAtomicCounters; + int maxComputeAtomicCounterBuffers; + int maxVaryingComponents; + int maxVertexOutputComponents; + int maxGeometryInputComponents; + int maxGeometryOutputComponents; + int maxFragmentInputComponents; + int maxImageUnits; + int maxCombinedImageUnitsAndFragmentOutputs; + int maxCombinedShaderOutputResources; + int maxImageSamples; + int maxVertexImageUniforms; + int maxTessControlImageUniforms; + int maxTessEvaluationImageUniforms; + int maxGeometryImageUniforms; + int maxFragmentImageUniforms; + int maxCombinedImageUniforms; + int maxGeometryTextureImageUnits; + int maxGeometryOutputVertices; + int maxGeometryTotalOutputComponents; + int maxGeometryUniformComponents; + int maxGeometryVaryingComponents; + int maxTessControlInputComponents; + int maxTessControlOutputComponents; + int maxTessControlTextureImageUnits; + int maxTessControlUniformComponents; + int maxTessControlTotalOutputComponents; + int maxTessEvaluationInputComponents; + int maxTessEvaluationOutputComponents; + int maxTessEvaluationTextureImageUnits; + int maxTessEvaluationUniformComponents; + int maxTessPatchComponents; + int maxPatchVertices; + int maxTessGenLevel; + int maxViewports; + int maxVertexAtomicCounters; + int maxTessControlAtomicCounters; + int maxTessEvaluationAtomicCounters; + int maxGeometryAtomicCounters; + int maxFragmentAtomicCounters; + int maxCombinedAtomicCounters; + int maxAtomicCounterBindings; + int maxVertexAtomicCounterBuffers; + int maxTessControlAtomicCounterBuffers; + int maxTessEvaluationAtomicCounterBuffers; + int maxGeometryAtomicCounterBuffers; + int maxFragmentAtomicCounterBuffers; + int maxCombinedAtomicCounterBuffers; + int maxAtomicCounterBufferSize; + int maxTransformFeedbackBuffers; + int maxTransformFeedbackInterleavedComponents; + int maxCullDistances; + int maxCombinedClipAndCullDistances; + int maxSamples; + int maxMeshOutputVerticesNV; + int maxMeshOutputPrimitivesNV; + int maxMeshWorkGroupSizeX_NV; + int maxMeshWorkGroupSizeY_NV; + int maxMeshWorkGroupSizeZ_NV; + int maxTaskWorkGroupSizeX_NV; + int maxTaskWorkGroupSizeY_NV; + int maxTaskWorkGroupSizeZ_NV; + int maxMeshViewCountNV; + int maxDualSourceDrawBuffersEXT; + + TLimits limits; +}; + +#endif // _RESOURCE_LIMITS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/ShHandle.h b/src/third_party/glslang/glslang/Include/ShHandle.h new file mode 100644 index 0000000..df07bd8 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/ShHandle.h @@ -0,0 +1,176 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _SHHANDLE_INCLUDED_ +#define _SHHANDLE_INCLUDED_ + +// +// Machine independent part of the compiler private objects +// sent as ShHandle to the driver. +// +// This should not be included by driver code. +// + +#define SH_EXPORTING +#include "../Public/ShaderLang.h" +#include "../MachineIndependent/Versions.h" +#include "InfoSink.h" + +class TCompiler; +class TLinker; +class TUniformMap; + +// +// The base class used to back handles returned to the driver. +// +class TShHandleBase { +public: + TShHandleBase() { pool = new glslang::TPoolAllocator; } + virtual ~TShHandleBase() { delete pool; } + virtual TCompiler* getAsCompiler() { return 0; } + virtual TLinker* getAsLinker() { return 0; } + virtual TUniformMap* getAsUniformMap() { return 0; } + virtual glslang::TPoolAllocator* getPool() const { return pool; } +private: + glslang::TPoolAllocator* pool; +}; + +// +// The base class for the machine dependent linker to derive from +// for managing where uniforms live. +// +class TUniformMap : public TShHandleBase { +public: + TUniformMap() { } + virtual ~TUniformMap() { } + virtual TUniformMap* getAsUniformMap() { return this; } + virtual int getLocation(const char* name) = 0; + virtual TInfoSink& getInfoSink() { return infoSink; } + TInfoSink infoSink; +}; + +class TIntermNode; + +// +// The base class for the machine dependent compiler to derive from +// for managing object code from the compile. +// +class TCompiler : public TShHandleBase { +public: + TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { } + virtual ~TCompiler() { } + EShLanguage getLanguage() { return language; } + virtual TInfoSink& getInfoSink() { return infoSink; } + + virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0; + + virtual TCompiler* getAsCompiler() { return this; } + virtual bool linkable() { return haveValidObjectCode; } + + TInfoSink& infoSink; +protected: + TCompiler& operator=(TCompiler&); + + EShLanguage language; + bool haveValidObjectCode; +}; + +// +// Link operations are based on a list of compile results... +// +typedef glslang::TVector TCompilerList; +typedef glslang::TVector THandleList; + +// +// The base class for the machine dependent linker to derive from +// to manage the resulting executable. +// + +class TLinker : public TShHandleBase { +public: + TLinker(EShExecutable e, TInfoSink& iSink) : + infoSink(iSink), + executable(e), + haveReturnableObjectCode(false), + appAttributeBindings(0), + fixedAttributeBindings(0), + excludedAttributes(0), + excludedCount(0), + uniformBindings(0) { } + virtual TLinker* getAsLinker() { return this; } + virtual ~TLinker() { } + virtual bool link(TCompilerList&, TUniformMap*) = 0; + virtual bool link(THandleList&) { return false; } + virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; } + virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; } + virtual void getAttributeBindings(ShBindingTable const **t) const = 0; + virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; } + virtual ShBindingTable* getUniformBindings() const { return uniformBindings; } + virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here + virtual TInfoSink& getInfoSink() { return infoSink; } + TInfoSink& infoSink; +protected: + TLinker& operator=(TLinker&); + EShExecutable executable; + bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver + + const ShBindingTable* appAttributeBindings; + const ShBindingTable* fixedAttributeBindings; + const int* excludedAttributes; + int excludedCount; + ShBindingTable* uniformBindings; // created by the linker +}; + +// +// This is the interface between the machine independent code +// and the machine dependent code. +// +// The machine dependent code should derive from the classes +// above. Then Construct*() and Delete*() will create and +// destroy the machine dependent objects, which contain the +// above machine independent information. +// +TCompiler* ConstructCompiler(EShLanguage, int); + +TShHandleBase* ConstructLinker(EShExecutable, int); +TShHandleBase* ConstructBindings(); +void DeleteLinker(TShHandleBase*); +void DeleteBindingList(TShHandleBase* bindingList); + +TUniformMap* ConstructUniformMap(); +void DeleteCompiler(TCompiler*); + +void DeleteUniformMap(TUniformMap*); + +#endif // _SHHANDLE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/Types.h b/src/third_party/glslang/glslang/Include/Types.h new file mode 100644 index 0000000..235ea3f --- /dev/null +++ b/src/third_party/glslang/glslang/Include/Types.h @@ -0,0 +1,2487 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2015-2016 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _TYPES_INCLUDED +#define _TYPES_INCLUDED + +#include "../Include/Common.h" +#include "../Include/BaseTypes.h" +#include "../Public/ShaderLang.h" +#include "arrays.h" + +#include + +namespace glslang { + +const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded + +const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed +inline bool IsAnonymous(const TString& name) +{ + return name.compare(0, 5, AnonymousPrefix) == 0; +} + +// +// Details within a sampler type +// +enum TSamplerDim { + EsdNone, + Esd1D, + Esd2D, + Esd3D, + EsdCube, + EsdRect, + EsdBuffer, + EsdSubpass, // goes only with non-sampled image (image is true) + EsdNumDims +}; + +struct TSampler { // misnomer now; includes images, textures without sampler, and textures with sampler + TBasicType type : 8; // type returned by sampler + TSamplerDim dim : 8; + bool arrayed : 1; + bool shadow : 1; + bool ms : 1; + bool image : 1; // image, combined should be false + bool combined : 1; // true means texture is combined with a sampler, false means texture with no sampler + bool sampler : 1; // true means a pure sampler, other fields should be clear() + +#ifdef GLSLANG_WEB + bool is1D() const { return false; } + bool isBuffer() const { return false; } + bool isRect() const { return false; } + bool isSubpass() const { return false; } + bool isCombined() const { return true; } + bool isImage() const { return false; } + bool isImageClass() const { return false; } + bool isMultiSample() const { return false; } + bool isExternal() const { return false; } + void setExternal(bool e) { } + bool isYuv() const { return false; } +#else + unsigned int vectorSize : 3; // vector return type size. + // Some languages support structures as sample results. Storing the whole structure in the + // TSampler is too large, so there is an index to a separate table. + static const unsigned structReturnIndexBits = 4; // number of index bits to use. + static const unsigned structReturnSlots = (1< TTypeList; + +typedef TVector TIdentifierList; + +// +// Following are a series of helper enums for managing layouts and qualifiers, +// used for TPublicType, TType, others. +// + +enum TLayoutPacking { + ElpNone, + ElpShared, // default, but different than saying nothing + ElpStd140, + ElpStd430, + ElpPacked, + ElpScalar, + ElpCount // If expanding, see bitfield width below +}; + +enum TLayoutMatrix { + ElmNone, + ElmRowMajor, + ElmColumnMajor, // default, but different than saying nothing + ElmCount // If expanding, see bitfield width below +}; + +// Union of geometry shader and tessellation shader geometry types. +// They don't go into TType, but rather have current state per shader or +// active parser type (TPublicType). +enum TLayoutGeometry { + ElgNone, + ElgPoints, + ElgLines, + ElgLinesAdjacency, + ElgLineStrip, + ElgTriangles, + ElgTrianglesAdjacency, + ElgTriangleStrip, + ElgQuads, + ElgIsolines, +}; + +enum TVertexSpacing { + EvsNone, + EvsEqual, + EvsFractionalEven, + EvsFractionalOdd +}; + +enum TVertexOrder { + EvoNone, + EvoCw, + EvoCcw +}; + +// Note: order matters, as type of format is done by comparison. +enum TLayoutFormat { + ElfNone, + + // Float image + ElfRgba32f, + ElfRgba16f, + ElfR32f, + ElfRgba8, + ElfRgba8Snorm, + + ElfEsFloatGuard, // to help with comparisons + + ElfRg32f, + ElfRg16f, + ElfR11fG11fB10f, + ElfR16f, + ElfRgba16, + ElfRgb10A2, + ElfRg16, + ElfRg8, + ElfR16, + ElfR8, + ElfRgba16Snorm, + ElfRg16Snorm, + ElfRg8Snorm, + ElfR16Snorm, + ElfR8Snorm, + + ElfFloatGuard, // to help with comparisons + + // Int image + ElfRgba32i, + ElfRgba16i, + ElfRgba8i, + ElfR32i, + + ElfEsIntGuard, // to help with comparisons + + ElfRg32i, + ElfRg16i, + ElfRg8i, + ElfR16i, + ElfR8i, + + ElfIntGuard, // to help with comparisons + + // Uint image + ElfRgba32ui, + ElfRgba16ui, + ElfRgba8ui, + ElfR32ui, + + ElfEsUintGuard, // to help with comparisons + + ElfRg32ui, + ElfRg16ui, + ElfRgb10a2ui, + ElfRg8ui, + ElfR16ui, + ElfR8ui, + + ElfCount +}; + +enum TLayoutDepth { + EldNone, + EldAny, + EldGreater, + EldLess, + EldUnchanged, + + EldCount +}; + +enum TBlendEquationShift { + // No 'EBlendNone': + // These are used as bit-shift amounts. A mask of such shifts will have type 'int', + // and in that space, 0 means no bits set, or none. In this enum, 0 means (1 << 0), a bit is set. + EBlendMultiply, + EBlendScreen, + EBlendOverlay, + EBlendDarken, + EBlendLighten, + EBlendColordodge, + EBlendColorburn, + EBlendHardlight, + EBlendSoftlight, + EBlendDifference, + EBlendExclusion, + EBlendHslHue, + EBlendHslSaturation, + EBlendHslColor, + EBlendHslLuminosity, + EBlendAllEquations, + + EBlendCount +}; + +enum TInterlockOrdering { + EioNone, + EioPixelInterlockOrdered, + EioPixelInterlockUnordered, + EioSampleInterlockOrdered, + EioSampleInterlockUnordered, + EioShadingRateInterlockOrdered, + EioShadingRateInterlockUnordered, + + EioCount, +}; + +enum TShaderInterface +{ + // Includes both uniform blocks and buffer blocks + EsiUniform = 0, + EsiInput, + EsiOutput, + EsiNone, + + EsiCount +}; + + +class TQualifier { +public: + static const int layoutNotSet = -1; + + void clear() + { + precision = EpqNone; + invariant = false; + makeTemporary(); + declaredBuiltIn = EbvNone; +#ifndef GLSLANG_WEB + noContraction = false; +#endif + } + + // drop qualifiers that don't belong in a temporary variable + void makeTemporary() + { + semanticName = nullptr; + storage = EvqTemporary; + builtIn = EbvNone; + clearInterstage(); + clearMemory(); + specConstant = false; + nonUniform = false; + clearLayout(); + } + + void clearInterstage() + { + clearInterpolation(); +#ifndef GLSLANG_WEB + patch = false; + sample = false; +#endif + } + + void clearInterpolation() + { + centroid = false; + smooth = false; + flat = false; +#ifndef GLSLANG_WEB + nopersp = false; + explicitInterp = false; + pervertexNV = false; + perPrimitiveNV = false; + perViewNV = false; + perTaskNV = false; +#endif + } + + void clearMemory() + { +#ifndef GLSLANG_WEB + coherent = false; + devicecoherent = false; + queuefamilycoherent = false; + workgroupcoherent = false; + subgroupcoherent = false; + shadercallcoherent = false; + nonprivate = false; + volatil = false; + restrict = false; + readonly = false; + writeonly = false; +#endif + } + + const char* semanticName; + TStorageQualifier storage : 6; + TBuiltInVariable builtIn : 9; + TBuiltInVariable declaredBuiltIn : 9; + static_assert(EbvLast < 256, "need to increase size of TBuiltInVariable bitfields!"); + TPrecisionQualifier precision : 3; + bool invariant : 1; // require canonical treatment for cross-shader invariance + bool centroid : 1; + bool smooth : 1; + bool flat : 1; + // having a constant_id is not sufficient: expressions have no id, but are still specConstant + bool specConstant : 1; + bool nonUniform : 1; + bool explicitOffset : 1; + +#ifdef GLSLANG_WEB + bool isWriteOnly() const { return false; } + bool isReadOnly() const { return false; } + bool isRestrict() const { return false; } + bool isCoherent() const { return false; } + bool isVolatile() const { return false; } + bool isSample() const { return false; } + bool isMemory() const { return false; } + bool isMemoryQualifierImageAndSSBOOnly() const { return false; } + bool bufferReferenceNeedsVulkanMemoryModel() const { return false; } + bool isInterpolation() const { return flat || smooth; } + bool isExplicitInterpolation() const { return false; } + bool isAuxiliary() const { return centroid; } + bool isPatch() const { return false; } + bool isNoContraction() const { return false; } + void setNoContraction() { } + bool isPervertexNV() const { return false; } +#else + bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects + bool nopersp : 1; + bool explicitInterp : 1; + bool pervertexNV : 1; + bool perPrimitiveNV : 1; + bool perViewNV : 1; + bool perTaskNV : 1; + bool patch : 1; + bool sample : 1; + bool restrict : 1; + bool readonly : 1; + bool writeonly : 1; + bool coherent : 1; + bool volatil : 1; + bool devicecoherent : 1; + bool queuefamilycoherent : 1; + bool workgroupcoherent : 1; + bool subgroupcoherent : 1; + bool shadercallcoherent : 1; + bool nonprivate : 1; + bool isWriteOnly() const { return writeonly; } + bool isReadOnly() const { return readonly; } + bool isRestrict() const { return restrict; } + bool isCoherent() const { return coherent; } + bool isVolatile() const { return volatil; } + bool isSample() const { return sample; } + bool isMemory() const + { + return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate; + } + bool isMemoryQualifierImageAndSSBOOnly() const + { + return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly; + } + bool bufferReferenceNeedsVulkanMemoryModel() const + { + // include qualifiers that map to load/store availability/visibility/nonprivate memory access operands + return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate; + } + bool isInterpolation() const + { + return flat || smooth || nopersp || explicitInterp; + } + bool isExplicitInterpolation() const + { + return explicitInterp; + } + bool isAuxiliary() const + { + return centroid || patch || sample || pervertexNV; + } + bool isPatch() const { return patch; } + bool isNoContraction() const { return noContraction; } + void setNoContraction() { noContraction = true; } + bool isPervertexNV() const { return pervertexNV; } +#endif + + bool isPipeInput() const + { + switch (storage) { + case EvqVaryingIn: + case EvqFragCoord: + case EvqPointCoord: + case EvqFace: + case EvqVertexId: + case EvqInstanceId: + return true; + default: + return false; + } + } + + bool isPipeOutput() const + { + switch (storage) { + case EvqPosition: + case EvqPointSize: + case EvqClipVertex: + case EvqVaryingOut: + case EvqFragColor: + case EvqFragDepth: + return true; + default: + return false; + } + } + + bool isParamInput() const + { + switch (storage) { + case EvqIn: + case EvqInOut: + case EvqConstReadOnly: + return true; + default: + return false; + } + } + + bool isParamOutput() const + { + switch (storage) { + case EvqOut: + case EvqInOut: + return true; + default: + return false; + } + } + + bool isUniformOrBuffer() const + { + switch (storage) { + case EvqUniform: + case EvqBuffer: + return true; + default: + return false; + } + } + + bool isIo() const + { + switch (storage) { + case EvqUniform: + case EvqBuffer: + case EvqVaryingIn: + case EvqFragCoord: + case EvqPointCoord: + case EvqFace: + case EvqVertexId: + case EvqInstanceId: + case EvqPosition: + case EvqPointSize: + case EvqClipVertex: + case EvqVaryingOut: + case EvqFragColor: + case EvqFragDepth: + return true; + default: + return false; + } + } + + // non-built-in symbols that might link between compilation units + bool isLinkable() const + { + switch (storage) { + case EvqGlobal: + case EvqVaryingIn: + case EvqVaryingOut: + case EvqUniform: + case EvqBuffer: + case EvqShared: + return true; + default: + return false; + } + } + +#ifdef GLSLANG_WEB + bool isPerView() const { return false; } + bool isTaskMemory() const { return false; } + bool isArrayedIo(EShLanguage language) const { return false; } +#else + bool isPerPrimitive() const { return perPrimitiveNV; } + bool isPerView() const { return perViewNV; } + bool isTaskMemory() const { return perTaskNV; } + + // True if this type of IO is supposed to be arrayed with extra level for per-vertex data + bool isArrayedIo(EShLanguage language) const + { + switch (language) { + case EShLangGeometry: + return isPipeInput(); + case EShLangTessControl: + return ! patch && (isPipeInput() || isPipeOutput()); + case EShLangTessEvaluation: + return ! patch && isPipeInput(); + case EShLangFragment: + return pervertexNV && isPipeInput(); + case EShLangMeshNV: + return ! perTaskNV && isPipeOutput(); + + default: + return false; + } + } +#endif + + // Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield + void clearLayout() // all layout + { + clearUniformLayout(); + +#ifndef GLSLANG_WEB + layoutPushConstant = false; + layoutBufferReference = false; + layoutPassthrough = false; + layoutViewportRelative = false; + // -2048 as the default value indicating layoutSecondaryViewportRelative is not set + layoutSecondaryViewportRelativeOffset = -2048; + layoutShaderRecord = false; + layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd; + layoutFormat = ElfNone; +#endif + + clearInterstageLayout(); + + layoutSpecConstantId = layoutSpecConstantIdEnd; + } + void clearInterstageLayout() + { + layoutLocation = layoutLocationEnd; + layoutComponent = layoutComponentEnd; +#ifndef GLSLANG_WEB + layoutIndex = layoutIndexEnd; + clearStreamLayout(); + clearXfbLayout(); +#endif + } + +#ifndef GLSLANG_WEB + void clearStreamLayout() + { + layoutStream = layoutStreamEnd; + } + void clearXfbLayout() + { + layoutXfbBuffer = layoutXfbBufferEnd; + layoutXfbStride = layoutXfbStrideEnd; + layoutXfbOffset = layoutXfbOffsetEnd; + } +#endif + + bool hasNonXfbLayout() const + { + return hasUniformLayout() || + hasAnyLocation() || + hasStream() || + hasFormat() || + isShaderRecord() || + isPushConstant() || + hasBufferReference(); + } + bool hasLayout() const + { + return hasNonXfbLayout() || + hasXfb(); + } + TLayoutMatrix layoutMatrix : 3; + TLayoutPacking layoutPacking : 4; + int layoutOffset; + int layoutAlign; + + unsigned int layoutLocation : 12; + static const unsigned int layoutLocationEnd = 0xFFF; + + unsigned int layoutComponent : 3; + static const unsigned int layoutComponentEnd = 4; + + unsigned int layoutSet : 7; + static const unsigned int layoutSetEnd = 0x3F; + + unsigned int layoutBinding : 16; + static const unsigned int layoutBindingEnd = 0xFFFF; + + unsigned int layoutIndex : 8; + static const unsigned int layoutIndexEnd = 0xFF; + + unsigned int layoutStream : 8; + static const unsigned int layoutStreamEnd = 0xFF; + + unsigned int layoutXfbBuffer : 4; + static const unsigned int layoutXfbBufferEnd = 0xF; + + unsigned int layoutXfbStride : 14; + static const unsigned int layoutXfbStrideEnd = 0x3FFF; + + unsigned int layoutXfbOffset : 13; + static const unsigned int layoutXfbOffsetEnd = 0x1FFF; + + unsigned int layoutAttachment : 8; // for input_attachment_index + static const unsigned int layoutAttachmentEnd = 0XFF; + + unsigned int layoutSpecConstantId : 11; + static const unsigned int layoutSpecConstantIdEnd = 0x7FF; + +#ifndef GLSLANG_WEB + // stored as log2 of the actual alignment value + unsigned int layoutBufferReferenceAlign : 6; + static const unsigned int layoutBufferReferenceAlignEnd = 0x3F; + + TLayoutFormat layoutFormat : 8; + + bool layoutPushConstant; + bool layoutBufferReference; + bool layoutPassthrough; + bool layoutViewportRelative; + int layoutSecondaryViewportRelativeOffset; + bool layoutShaderRecord; +#endif + + bool hasUniformLayout() const + { + return hasMatrix() || + hasPacking() || + hasOffset() || + hasBinding() || + hasSet() || + hasAlign(); + } + void clearUniformLayout() // only uniform specific + { + layoutMatrix = ElmNone; + layoutPacking = ElpNone; + layoutOffset = layoutNotSet; + layoutAlign = layoutNotSet; + + layoutSet = layoutSetEnd; + layoutBinding = layoutBindingEnd; +#ifndef GLSLANG_WEB + layoutAttachment = layoutAttachmentEnd; +#endif + } + + bool hasMatrix() const + { + return layoutMatrix != ElmNone; + } + bool hasPacking() const + { + return layoutPacking != ElpNone; + } + bool hasAlign() const + { + return layoutAlign != layoutNotSet; + } + bool hasAnyLocation() const + { + return hasLocation() || + hasComponent() || + hasIndex(); + } + bool hasLocation() const + { + return layoutLocation != layoutLocationEnd; + } + bool hasSet() const + { + return layoutSet != layoutSetEnd; + } + bool hasBinding() const + { + return layoutBinding != layoutBindingEnd; + } +#ifdef GLSLANG_WEB + bool hasOffset() const { return false; } + bool isNonPerspective() const { return false; } + bool hasIndex() const { return false; } + unsigned getIndex() const { return 0; } + bool hasComponent() const { return false; } + bool hasStream() const { return false; } + bool hasFormat() const { return false; } + bool hasXfb() const { return false; } + bool hasXfbBuffer() const { return false; } + bool hasXfbStride() const { return false; } + bool hasXfbOffset() const { return false; } + bool hasAttachment() const { return false; } + TLayoutFormat getFormat() const { return ElfNone; } + bool isPushConstant() const { return false; } + bool isShaderRecord() const { return false; } + bool hasBufferReference() const { return false; } + bool hasBufferReferenceAlign() const { return false; } + bool isNonUniform() const { return false; } +#else + bool hasOffset() const + { + return layoutOffset != layoutNotSet; + } + bool isNonPerspective() const { return nopersp; } + bool hasIndex() const + { + return layoutIndex != layoutIndexEnd; + } + unsigned getIndex() const { return layoutIndex; } + bool hasComponent() const + { + return layoutComponent != layoutComponentEnd; + } + bool hasStream() const + { + return layoutStream != layoutStreamEnd; + } + bool hasFormat() const + { + return layoutFormat != ElfNone; + } + bool hasXfb() const + { + return hasXfbBuffer() || + hasXfbStride() || + hasXfbOffset(); + } + bool hasXfbBuffer() const + { + return layoutXfbBuffer != layoutXfbBufferEnd; + } + bool hasXfbStride() const + { + return layoutXfbStride != layoutXfbStrideEnd; + } + bool hasXfbOffset() const + { + return layoutXfbOffset != layoutXfbOffsetEnd; + } + bool hasAttachment() const + { + return layoutAttachment != layoutAttachmentEnd; + } + TLayoutFormat getFormat() const { return layoutFormat; } + bool isPushConstant() const { return layoutPushConstant; } + bool isShaderRecord() const { return layoutShaderRecord; } + bool hasBufferReference() const { return layoutBufferReference; } + bool hasBufferReferenceAlign() const + { + return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd; + } + bool isNonUniform() const + { + return nonUniform; + } +#endif + bool hasSpecConstantId() const + { + // Not the same thing as being a specialization constant, this + // is just whether or not it was declared with an ID. + return layoutSpecConstantId != layoutSpecConstantIdEnd; + } + bool isSpecConstant() const + { + // True if type is a specialization constant, whether or not it + // had a specialization-constant ID, and false if it is not a + // true front-end constant. + return specConstant; + } + bool isFrontEndConstant() const + { + // True if the front-end knows the final constant value. + // This allows front-end constant folding. + return storage == EvqConst && ! specConstant; + } + bool isConstant() const + { + // True if is either kind of constant; specialization or regular. + return isFrontEndConstant() || isSpecConstant(); + } + void makeSpecConstant() + { + storage = EvqConst; + specConstant = true; + } + static const char* getLayoutPackingString(TLayoutPacking packing) + { + switch (packing) { + case ElpStd140: return "std140"; +#ifndef GLSLANG_WEB + case ElpPacked: return "packed"; + case ElpShared: return "shared"; + case ElpStd430: return "std430"; + case ElpScalar: return "scalar"; +#endif + default: return "none"; + } + } + static const char* getLayoutMatrixString(TLayoutMatrix m) + { + switch (m) { + case ElmColumnMajor: return "column_major"; + case ElmRowMajor: return "row_major"; + default: return "none"; + } + } +#ifdef GLSLANG_WEB + static const char* getLayoutFormatString(TLayoutFormat f) { return "none"; } +#else + static const char* getLayoutFormatString(TLayoutFormat f) + { + switch (f) { + case ElfRgba32f: return "rgba32f"; + case ElfRgba16f: return "rgba16f"; + case ElfRg32f: return "rg32f"; + case ElfRg16f: return "rg16f"; + case ElfR11fG11fB10f: return "r11f_g11f_b10f"; + case ElfR32f: return "r32f"; + case ElfR16f: return "r16f"; + case ElfRgba16: return "rgba16"; + case ElfRgb10A2: return "rgb10_a2"; + case ElfRgba8: return "rgba8"; + case ElfRg16: return "rg16"; + case ElfRg8: return "rg8"; + case ElfR16: return "r16"; + case ElfR8: return "r8"; + case ElfRgba16Snorm: return "rgba16_snorm"; + case ElfRgba8Snorm: return "rgba8_snorm"; + case ElfRg16Snorm: return "rg16_snorm"; + case ElfRg8Snorm: return "rg8_snorm"; + case ElfR16Snorm: return "r16_snorm"; + case ElfR8Snorm: return "r8_snorm"; + + case ElfRgba32i: return "rgba32i"; + case ElfRgba16i: return "rgba16i"; + case ElfRgba8i: return "rgba8i"; + case ElfRg32i: return "rg32i"; + case ElfRg16i: return "rg16i"; + case ElfRg8i: return "rg8i"; + case ElfR32i: return "r32i"; + case ElfR16i: return "r16i"; + case ElfR8i: return "r8i"; + + case ElfRgba32ui: return "rgba32ui"; + case ElfRgba16ui: return "rgba16ui"; + case ElfRgba8ui: return "rgba8ui"; + case ElfRg32ui: return "rg32ui"; + case ElfRg16ui: return "rg16ui"; + case ElfRgb10a2ui: return "rgb10_a2ui"; + case ElfRg8ui: return "rg8ui"; + case ElfR32ui: return "r32ui"; + case ElfR16ui: return "r16ui"; + case ElfR8ui: return "r8ui"; + default: return "none"; + } + } + static const char* getLayoutDepthString(TLayoutDepth d) + { + switch (d) { + case EldAny: return "depth_any"; + case EldGreater: return "depth_greater"; + case EldLess: return "depth_less"; + case EldUnchanged: return "depth_unchanged"; + default: return "none"; + } + } + static const char* getBlendEquationString(TBlendEquationShift e) + { + switch (e) { + case EBlendMultiply: return "blend_support_multiply"; + case EBlendScreen: return "blend_support_screen"; + case EBlendOverlay: return "blend_support_overlay"; + case EBlendDarken: return "blend_support_darken"; + case EBlendLighten: return "blend_support_lighten"; + case EBlendColordodge: return "blend_support_colordodge"; + case EBlendColorburn: return "blend_support_colorburn"; + case EBlendHardlight: return "blend_support_hardlight"; + case EBlendSoftlight: return "blend_support_softlight"; + case EBlendDifference: return "blend_support_difference"; + case EBlendExclusion: return "blend_support_exclusion"; + case EBlendHslHue: return "blend_support_hsl_hue"; + case EBlendHslSaturation: return "blend_support_hsl_saturation"; + case EBlendHslColor: return "blend_support_hsl_color"; + case EBlendHslLuminosity: return "blend_support_hsl_luminosity"; + case EBlendAllEquations: return "blend_support_all_equations"; + default: return "unknown"; + } + } + static const char* getGeometryString(TLayoutGeometry geometry) + { + switch (geometry) { + case ElgPoints: return "points"; + case ElgLines: return "lines"; + case ElgLinesAdjacency: return "lines_adjacency"; + case ElgLineStrip: return "line_strip"; + case ElgTriangles: return "triangles"; + case ElgTrianglesAdjacency: return "triangles_adjacency"; + case ElgTriangleStrip: return "triangle_strip"; + case ElgQuads: return "quads"; + case ElgIsolines: return "isolines"; + default: return "none"; + } + } + static const char* getVertexSpacingString(TVertexSpacing spacing) + { + switch (spacing) { + case EvsEqual: return "equal_spacing"; + case EvsFractionalEven: return "fractional_even_spacing"; + case EvsFractionalOdd: return "fractional_odd_spacing"; + default: return "none"; + } + } + static const char* getVertexOrderString(TVertexOrder order) + { + switch (order) { + case EvoCw: return "cw"; + case EvoCcw: return "ccw"; + default: return "none"; + } + } + static int mapGeometryToSize(TLayoutGeometry geometry) + { + switch (geometry) { + case ElgPoints: return 1; + case ElgLines: return 2; + case ElgLinesAdjacency: return 4; + case ElgTriangles: return 3; + case ElgTrianglesAdjacency: return 6; + default: return 0; + } + } + static const char* getInterlockOrderingString(TInterlockOrdering order) + { + switch (order) { + case EioPixelInterlockOrdered: return "pixel_interlock_ordered"; + case EioPixelInterlockUnordered: return "pixel_interlock_unordered"; + case EioSampleInterlockOrdered: return "sample_interlock_ordered"; + case EioSampleInterlockUnordered: return "sample_interlock_unordered"; + case EioShadingRateInterlockOrdered: return "shading_rate_interlock_ordered"; + case EioShadingRateInterlockUnordered: return "shading_rate_interlock_unordered"; + default: return "none"; + } + } +#endif +}; + +// Qualifiers that don't need to be keep per object. They have shader scope, not object scope. +// So, they will not be part of TType, TQualifier, etc. +struct TShaderQualifiers { + TLayoutGeometry geometry; // geometry/tessellation shader in/out primitives + bool pixelCenterInteger; // fragment shader + bool originUpperLeft; // fragment shader + int invocations; + int vertices; // for tessellation "vertices", geometry & mesh "max_vertices" + TVertexSpacing spacing; + TVertexOrder order; + bool pointMode; + int localSize[3]; // compute shader + bool localSizeNotDefault[3]; // compute shader + int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize +#ifndef GLSLANG_WEB + bool earlyFragmentTests; // fragment input + bool postDepthCoverage; // fragment input + TLayoutDepth layoutDepth; + bool blendEquation; // true if any blend equation was specified + int numViews; // multiview extenstions + TInterlockOrdering interlockOrdering; + bool layoutOverrideCoverage; // true if layout override_coverage set + bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set + bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set + int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set + bool layoutPrimitiveCulling; // true if layout primitive_culling set + TLayoutDepth getDepth() const { return layoutDepth; } +#else + TLayoutDepth getDepth() const { return EldNone; } +#endif + + void init() + { + geometry = ElgNone; + originUpperLeft = false; + pixelCenterInteger = false; + invocations = TQualifier::layoutNotSet; + vertices = TQualifier::layoutNotSet; + spacing = EvsNone; + order = EvoNone; + pointMode = false; + localSize[0] = 1; + localSize[1] = 1; + localSize[2] = 1; + localSizeNotDefault[0] = false; + localSizeNotDefault[1] = false; + localSizeNotDefault[2] = false; + localSizeSpecId[0] = TQualifier::layoutNotSet; + localSizeSpecId[1] = TQualifier::layoutNotSet; + localSizeSpecId[2] = TQualifier::layoutNotSet; +#ifndef GLSLANG_WEB + earlyFragmentTests = false; + postDepthCoverage = false; + layoutDepth = EldNone; + blendEquation = false; + numViews = TQualifier::layoutNotSet; + layoutOverrideCoverage = false; + layoutDerivativeGroupQuads = false; + layoutDerivativeGroupLinear = false; + layoutPrimitiveCulling = false; + primitives = TQualifier::layoutNotSet; + interlockOrdering = EioNone; +#endif + } + +#ifdef GLSLANG_WEB + bool hasBlendEquation() const { return false; } +#else + bool hasBlendEquation() const { return blendEquation; } +#endif + + // Merge in characteristics from the 'src' qualifier. They can override when + // set, but never erase when not set. + void merge(const TShaderQualifiers& src) + { + if (src.geometry != ElgNone) + geometry = src.geometry; + if (src.pixelCenterInteger) + pixelCenterInteger = src.pixelCenterInteger; + if (src.originUpperLeft) + originUpperLeft = src.originUpperLeft; + if (src.invocations != TQualifier::layoutNotSet) + invocations = src.invocations; + if (src.vertices != TQualifier::layoutNotSet) + vertices = src.vertices; + if (src.spacing != EvsNone) + spacing = src.spacing; + if (src.order != EvoNone) + order = src.order; + if (src.pointMode) + pointMode = true; + for (int i = 0; i < 3; ++i) { + if (src.localSize[i] > 1) + localSize[i] = src.localSize[i]; + } + for (int i = 0; i < 3; ++i) { + localSizeNotDefault[i] = src.localSizeNotDefault[i] || localSizeNotDefault[i]; + } + for (int i = 0; i < 3; ++i) { + if (src.localSizeSpecId[i] != TQualifier::layoutNotSet) + localSizeSpecId[i] = src.localSizeSpecId[i]; + } +#ifndef GLSLANG_WEB + if (src.earlyFragmentTests) + earlyFragmentTests = true; + if (src.postDepthCoverage) + postDepthCoverage = true; + if (src.layoutDepth) + layoutDepth = src.layoutDepth; + if (src.blendEquation) + blendEquation = src.blendEquation; + if (src.numViews != TQualifier::layoutNotSet) + numViews = src.numViews; + if (src.layoutOverrideCoverage) + layoutOverrideCoverage = src.layoutOverrideCoverage; + if (src.layoutDerivativeGroupQuads) + layoutDerivativeGroupQuads = src.layoutDerivativeGroupQuads; + if (src.layoutDerivativeGroupLinear) + layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear; + if (src.primitives != TQualifier::layoutNotSet) + primitives = src.primitives; + if (src.interlockOrdering != EioNone) + interlockOrdering = src.interlockOrdering; + if (src.layoutPrimitiveCulling) + layoutPrimitiveCulling = src.layoutPrimitiveCulling; +#endif + } +}; + +// +// TPublicType is just temporarily used while parsing and not quite the same +// information kept per node in TType. Due to the bison stack, it can't have +// types that it thinks have non-trivial constructors. It should +// just be used while recognizing the grammar, not anything else. +// Once enough is known about the situation, the proper information +// moved into a TType, or the parse context, etc. +// +class TPublicType { +public: + TBasicType basicType; + TSampler sampler; + TQualifier qualifier; + TShaderQualifiers shaderQualifiers; + int vectorSize : 4; + int matrixCols : 4; + int matrixRows : 4; + bool coopmat : 1; + TArraySizes* arraySizes; + const TType* userDef; + TSourceLoc loc; + TArraySizes* typeParameters; + +#ifdef GLSLANG_WEB + bool isCoopmat() const { return false; } +#else + bool isCoopmat() const { return coopmat; } +#endif + + void initType(const TSourceLoc& l) + { + basicType = EbtVoid; + vectorSize = 1; + matrixRows = 0; + matrixCols = 0; + arraySizes = nullptr; + userDef = nullptr; + loc = l; + typeParameters = nullptr; + coopmat = false; + } + + void initQualifiers(bool global = false) + { + qualifier.clear(); + if (global) + qualifier.storage = EvqGlobal; + } + + void init(const TSourceLoc& l, bool global = false) + { + initType(l); + sampler.clear(); + initQualifiers(global); + shaderQualifiers.init(); + } + + void setVector(int s) + { + matrixRows = 0; + matrixCols = 0; + vectorSize = s; + } + + void setMatrix(int c, int r) + { + matrixRows = r; + matrixCols = c; + vectorSize = 0; + } + + bool isScalar() const + { + return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr; + } + + // "Image" is a superset of "Subpass" + bool isImage() const { return basicType == EbtSampler && sampler.isImage(); } + bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); } +}; + +// +// Base class for things that have a type. +// +class TType { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + // for "empty" type (no args) or simple scalar/vector/matrix + explicit TType(TBasicType t = EbtVoid, TStorageQualifier q = EvqTemporary, int vs = 1, int mc = 0, int mr = 0, + bool isVector = false) : + basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + qualifier.storage = q; + assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices + } + // for explicit precision qualifier + TType(TBasicType t, TStorageQualifier q, TPrecisionQualifier p, int vs = 1, int mc = 0, int mr = 0, + bool isVector = false) : + basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + qualifier.storage = q; + qualifier.precision = p; + assert(p >= EpqNone && p <= EpqHigh); + assert(!(isMatrix() && vectorSize != 0)); // prevent vectorSize != 0 on matrices + } + // for turning a TPublicType into a TType, using a shallow copy + explicit TType(const TPublicType& p) : + basicType(p.basicType), + vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat), + arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters) + { + if (basicType == EbtSampler) + sampler = p.sampler; + else + sampler.clear(); + qualifier = p.qualifier; + if (p.userDef) { + if (p.userDef->basicType == EbtReference) { + basicType = EbtReference; + referentType = p.userDef->referentType; + } else { + structure = p.userDef->getWritableStruct(); // public type is short-lived; there are no sharing issues + } + typeName = NewPoolTString(p.userDef->getTypeName().c_str()); + } + if (p.isCoopmat() && p.typeParameters && p.typeParameters->getNumDims() > 0) { + int numBits = p.typeParameters->getDimSize(0); + if (p.basicType == EbtFloat && numBits == 16) { + basicType = EbtFloat16; + qualifier.precision = EpqNone; + } else if (p.basicType == EbtUint && numBits == 8) { + basicType = EbtUint8; + qualifier.precision = EpqNone; + } else if (p.basicType == EbtInt && numBits == 8) { + basicType = EbtInt8; + qualifier.precision = EpqNone; + } + } + } + // for construction of sampler types + TType(const TSampler& sampler, TStorageQualifier q = EvqUniform, TArraySizes* as = nullptr) : + basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr), + sampler(sampler), typeParameters(nullptr) + { + qualifier.clear(); + qualifier.storage = q; + } + // to efficiently make a dereferenced type + // without ever duplicating the outer structure that will be thrown away + // and using only shallow copy + TType(const TType& type, int derefIndex, bool rowMajor = false) + { + if (type.isArray()) { + shallowCopy(type); + if (type.getArraySizes()->getNumDims() == 1) { + arraySizes = nullptr; + } else { + // want our own copy of the array, so we can edit it + arraySizes = new TArraySizes; + arraySizes->copyDereferenced(*type.arraySizes); + } + } else if (type.basicType == EbtStruct || type.basicType == EbtBlock) { + // do a structure dereference + const TTypeList& memberList = *type.getStruct(); + shallowCopy(*memberList[derefIndex].type); + return; + } else { + // do a vector/matrix dereference + shallowCopy(type); + if (matrixCols > 0) { + // dereference from matrix to vector + if (rowMajor) + vectorSize = matrixCols; + else + vectorSize = matrixRows; + matrixCols = 0; + matrixRows = 0; + if (vectorSize == 1) + vector1 = true; + } else if (isVector()) { + // dereference from vector to scalar + vectorSize = 1; + vector1 = false; + } else if (isCoopMat()) { + coopmat = false; + typeParameters = nullptr; + } + } + } + // for making structures, ... + TType(TTypeList* userDef, const TString& n) : + basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + qualifier.clear(); + typeName = NewPoolTString(n.c_str()); + } + // For interface blocks + TType(TTypeList* userDef, const TString& n, const TQualifier& q) : + basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false), + qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr) + { + sampler.clear(); + typeName = NewPoolTString(n.c_str()); + } + // for block reference (first parameter must be EbtReference) + explicit TType(TBasicType t, const TType &p, const TString& n) : + basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), + arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr) + { + assert(t == EbtReference); + typeName = NewPoolTString(n.c_str()); + qualifier.clear(); + qualifier.storage = p.qualifier.storage; + referentType = p.clone(); + } + virtual ~TType() {} + + // Not for use across pool pops; it will cause multiple instances of TType to point to the same information. + // This only works if that information (like a structure's list of types) does not change and + // the instances are sharing the same pool. + void shallowCopy(const TType& copyOf) + { + basicType = copyOf.basicType; + sampler = copyOf.sampler; + qualifier = copyOf.qualifier; + vectorSize = copyOf.vectorSize; + matrixCols = copyOf.matrixCols; + matrixRows = copyOf.matrixRows; + vector1 = copyOf.vector1; + arraySizes = copyOf.arraySizes; // copying the pointer only, not the contents + fieldName = copyOf.fieldName; + typeName = copyOf.typeName; + if (isStruct()) { + structure = copyOf.structure; + } else { + referentType = copyOf.referentType; + } + typeParameters = copyOf.typeParameters; + coopmat = copyOf.isCoopMat(); + } + + // Make complete copy of the whole type graph rooted at 'copyOf'. + void deepCopy(const TType& copyOf) + { + TMap copied; // to enable copying a type graph as a graph, not a tree + deepCopy(copyOf, copied); + } + + // Recursively make temporary + void makeTemporary() + { + getQualifier().makeTemporary(); + + if (isStruct()) + for (unsigned int i = 0; i < structure->size(); ++i) + (*structure)[i].type->makeTemporary(); + } + + TType* clone() const + { + TType *newType = new TType(); + newType->deepCopy(*this); + + return newType; + } + + void makeVector() { vector1 = true; } + + virtual void hideMember() { basicType = EbtVoid; vectorSize = 1; } + virtual bool hiddenMember() const { return basicType == EbtVoid; } + + virtual void setFieldName(const TString& n) { fieldName = NewPoolTString(n.c_str()); } + virtual const TString& getTypeName() const + { + assert(typeName); + return *typeName; + } + + virtual const TString& getFieldName() const + { + assert(fieldName); + return *fieldName; + } + TShaderInterface getShaderInterface() const + { + if (basicType != EbtBlock) + return EsiNone; + + switch (qualifier.storage) { + default: + return EsiNone; + case EvqVaryingIn: + return EsiInput; + case EvqVaryingOut: + return EsiOutput; + case EvqUniform: + case EvqBuffer: + return EsiUniform; + } + } + + virtual TBasicType getBasicType() const { return basicType; } + virtual const TSampler& getSampler() const { return sampler; } + virtual TSampler& getSampler() { return sampler; } + + virtual TQualifier& getQualifier() { return qualifier; } + virtual const TQualifier& getQualifier() const { return qualifier; } + + virtual int getVectorSize() const { return vectorSize; } // returns 1 for either scalar or vector of size 1, valid for both + virtual int getMatrixCols() const { return matrixCols; } + virtual int getMatrixRows() const { return matrixRows; } + virtual int getOuterArraySize() const { return arraySizes->getOuterSize(); } + virtual TIntermTyped* getOuterArrayNode() const { return arraySizes->getOuterNode(); } + virtual int getCumulativeArraySize() const { return arraySizes->getCumulativeSize(); } +#ifdef GLSLANG_WEB + bool isArrayOfArrays() const { return false; } +#else + bool isArrayOfArrays() const { return arraySizes != nullptr && arraySizes->getNumDims() > 1; } +#endif + virtual int getImplicitArraySize() const { return arraySizes->getImplicitSize(); } + virtual const TArraySizes* getArraySizes() const { return arraySizes; } + virtual TArraySizes* getArraySizes() { return arraySizes; } + virtual TType* getReferentType() const { return referentType; } + virtual const TArraySizes* getTypeParameters() const { return typeParameters; } + virtual TArraySizes* getTypeParameters() { return typeParameters; } + + virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); } + virtual bool isScalarOrVec1() const { return isScalar() || vector1; } + virtual bool isVector() const { return vectorSize > 1 || vector1; } + virtual bool isMatrix() const { return matrixCols ? true : false; } + virtual bool isArray() const { return arraySizes != nullptr; } + virtual bool isSizedArray() const { return isArray() && arraySizes->isSized(); } + virtual bool isUnsizedArray() const { return isArray() && !arraySizes->isSized(); } + virtual bool isArrayVariablyIndexed() const { assert(isArray()); return arraySizes->isVariablyIndexed(); } + virtual void setArrayVariablyIndexed() { assert(isArray()); arraySizes->setVariablyIndexed(); } + virtual void updateImplicitArraySize(int size) { assert(isArray()); arraySizes->updateImplicitSize(size); } + virtual bool isStruct() const { return basicType == EbtStruct || basicType == EbtBlock; } + virtual bool isFloatingDomain() const { return basicType == EbtFloat || basicType == EbtDouble || basicType == EbtFloat16; } + virtual bool isIntegerDomain() const + { + switch (basicType) { + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtAtomicUint: + return true; + default: + break; + } + return false; + } + virtual bool isOpaque() const { return basicType == EbtSampler +#ifndef GLSLANG_WEB + || basicType == EbtAtomicUint || basicType == EbtAccStruct || basicType == EbtRayQuery +#endif + ; } + virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; } + + // "Image" is a superset of "Subpass" + virtual bool isImage() const { return basicType == EbtSampler && getSampler().isImage(); } + virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); } + virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); } + // Check the block-name convention of creating a block without populating it's members: + virtual bool isUnusableName() const { return isStruct() && structure == nullptr; } + virtual bool isParameterized() const { return typeParameters != nullptr; } +#ifdef GLSLANG_WEB + bool isAtomic() const { return false; } + bool isCoopMat() const { return false; } + bool isReference() const { return false; } +#else + bool isAtomic() const { return basicType == EbtAtomicUint; } + bool isCoopMat() const { return coopmat; } + bool isReference() const { return getBasicType() == EbtReference; } +#endif + + // return true if this type contains any subtype which satisfies the given predicate. + template + bool contains(P predicate) const + { + if (predicate(this)) + return true; + + const auto hasa = [predicate](const TTypeLoc& tl) { return tl.type->contains(predicate); }; + + return isStruct() && std::any_of(structure->begin(), structure->end(), hasa); + } + + // Recursively checks if the type contains the given basic type + virtual bool containsBasicType(TBasicType checkType) const + { + return contains([checkType](const TType* t) { return t->basicType == checkType; } ); + } + + // Recursively check the structure for any arrays, needed for some error checks + virtual bool containsArray() const + { + return contains([](const TType* t) { return t->isArray(); } ); + } + + // Check the structure for any structures, needed for some error checks + virtual bool containsStructure() const + { + return contains([this](const TType* t) { return t != this && t->isStruct(); } ); + } + + // Recursively check the structure for any unsized arrays, needed for triggering a copyUp(). + virtual bool containsUnsizedArray() const + { + return contains([](const TType* t) { return t->isUnsizedArray(); } ); + } + + virtual bool containsOpaque() const + { + return contains([](const TType* t) { return t->isOpaque(); } ); + } + + // Recursively checks if the type contains a built-in variable + virtual bool containsBuiltIn() const + { + return contains([](const TType* t) { return t->isBuiltIn(); } ); + } + + virtual bool containsNonOpaque() const + { + const auto nonOpaque = [](const TType* t) { + switch (t->basicType) { + case EbtVoid: + case EbtFloat: + case EbtDouble: + case EbtFloat16: + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtBool: + case EbtReference: + return true; + default: + return false; + } + }; + + return contains(nonOpaque); + } + + virtual bool containsSpecializationSize() const + { + return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } ); + } + +#ifdef GLSLANG_WEB + bool containsDouble() const { return false; } + bool contains16BitFloat() const { return false; } + bool contains64BitInt() const { return false; } + bool contains16BitInt() const { return false; } + bool contains8BitInt() const { return false; } + bool containsCoopMat() const { return false; } + bool containsReference() const { return false; } +#else + bool containsDouble() const + { + return containsBasicType(EbtDouble); + } + bool contains16BitFloat() const + { + return containsBasicType(EbtFloat16); + } + bool contains64BitInt() const + { + return containsBasicType(EbtInt64) || containsBasicType(EbtUint64); + } + bool contains16BitInt() const + { + return containsBasicType(EbtInt16) || containsBasicType(EbtUint16); + } + bool contains8BitInt() const + { + return containsBasicType(EbtInt8) || containsBasicType(EbtUint8); + } + bool containsCoopMat() const + { + return contains([](const TType* t) { return t->coopmat; } ); + } + bool containsReference() const + { + return containsBasicType(EbtReference); + } +#endif + + // Array editing methods. Array descriptors can be shared across + // type instances. This allows all uses of the same array + // to be updated at once. E.g., all nodes can be explicitly sized + // by tracking and correcting one implicit size. Or, all nodes + // can get the explicit size on a redeclaration that gives size. + // + // N.B.: Don't share with the shared symbol tables (symbols are + // marked as isReadOnly(). Such symbols with arrays that will be + // edited need to copyUp() on first use, so that + // A) the edits don't effect the shared symbol table, and + // B) the edits are shared across all users. + void updateArraySizes(const TType& type) + { + // For when we may already be sharing existing array descriptors, + // keeping the pointers the same, just updating the contents. + assert(arraySizes != nullptr); + assert(type.arraySizes != nullptr); + *arraySizes = *type.arraySizes; + } + void copyArraySizes(const TArraySizes& s) + { + // For setting a fresh new set of array sizes, not yet worrying about sharing. + arraySizes = new TArraySizes; + *arraySizes = s; + } + void transferArraySizes(TArraySizes* s) + { + // For setting an already allocated set of sizes that this type can use + // (no copy made). + arraySizes = s; + } + void clearArraySizes() + { + arraySizes = nullptr; + } + + // Add inner array sizes, to any existing sizes, via copy; the + // sizes passed in can still be reused for other purposes. + void copyArrayInnerSizes(const TArraySizes* s) + { + if (s != nullptr) { + if (arraySizes == nullptr) + copyArraySizes(*s); + else + arraySizes->addInnerSizes(*s); + } + } + void changeOuterArraySize(int s) { arraySizes->changeOuterSize(s); } + + // Recursively make the implicit array size the explicit array size. + // Expicit arrays are compile-time or link-time sized, never run-time sized. + // Sometimes, policy calls for an array to be run-time sized even if it was + // never variably indexed: Don't turn a 'skipNonvariablyIndexed' array into + // an explicit array. + void adoptImplicitArraySizes(bool skipNonvariablyIndexed) + { + if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed())) + changeOuterArraySize(getImplicitArraySize()); + // For multi-dim per-view arrays, set unsized inner dimension size to 1 + if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized()) + arraySizes->clearInnerUnsized(); + if (isStruct() && structure->size() > 0) { + int lastMember = (int)structure->size() - 1; + for (int i = 0; i < lastMember; ++i) + (*structure)[i].type->adoptImplicitArraySizes(false); + // implement the "last member of an SSBO" policy + (*structure)[lastMember].type->adoptImplicitArraySizes(getQualifier().storage == EvqBuffer); + } + } + + + void updateTypeParameters(const TType& type) + { + // For when we may already be sharing existing array descriptors, + // keeping the pointers the same, just updating the contents. + assert(typeParameters != nullptr); + assert(type.typeParameters != nullptr); + *typeParameters = *type.typeParameters; + } + void copyTypeParameters(const TArraySizes& s) + { + // For setting a fresh new set of type parameters, not yet worrying about sharing. + typeParameters = new TArraySizes; + *typeParameters = s; + } + void transferTypeParameters(TArraySizes* s) + { + // For setting an already allocated set of sizes that this type can use + // (no copy made). + typeParameters = s; + } + void clearTypeParameters() + { + typeParameters = nullptr; + } + + // Add inner array sizes, to any existing sizes, via copy; the + // sizes passed in can still be reused for other purposes. + void copyTypeParametersInnerSizes(const TArraySizes* s) + { + if (s != nullptr) { + if (typeParameters == nullptr) + copyTypeParameters(*s); + else + typeParameters->addInnerSizes(*s); + } + } + + + + const char* getBasicString() const + { + return TType::getBasicString(basicType); + } + + static const char* getBasicString(TBasicType t) + { + switch (t) { + case EbtFloat: return "float"; + case EbtInt: return "int"; + case EbtUint: return "uint"; + case EbtSampler: return "sampler/image"; +#ifndef GLSLANG_WEB + case EbtVoid: return "void"; + case EbtDouble: return "double"; + case EbtFloat16: return "float16_t"; + case EbtInt8: return "int8_t"; + case EbtUint8: return "uint8_t"; + case EbtInt16: return "int16_t"; + case EbtUint16: return "uint16_t"; + case EbtInt64: return "int64_t"; + case EbtUint64: return "uint64_t"; + case EbtBool: return "bool"; + case EbtAtomicUint: return "atomic_uint"; + case EbtStruct: return "structure"; + case EbtBlock: return "block"; + case EbtAccStruct: return "accelerationStructureNV"; + case EbtRayQuery: return "rayQueryEXT"; + case EbtReference: return "reference"; +#endif + default: return "unknown type"; + } + } + +#ifdef GLSLANG_WEB + TString getCompleteString() const { return ""; } + const char* getStorageQualifierString() const { return ""; } + const char* getBuiltInVariableString() const { return ""; } + const char* getPrecisionQualifierString() const { return ""; } + TString getBasicTypeString() const { return ""; } +#else + TString getCompleteString() const + { + TString typeString; + + const auto appendStr = [&](const char* s) { typeString.append(s); }; + const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); }; + const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); }; + + if (qualifier.hasLayout()) { + // To reduce noise, skip this if the only layout is an xfb_buffer + // with no triggering xfb_offset. + TQualifier noXfbBuffer = qualifier; + noXfbBuffer.layoutXfbBuffer = TQualifier::layoutXfbBufferEnd; + if (noXfbBuffer.hasLayout()) { + appendStr("layout("); + if (qualifier.hasAnyLocation()) { + appendStr(" location="); + appendUint(qualifier.layoutLocation); + if (qualifier.hasComponent()) { + appendStr(" component="); + appendUint(qualifier.layoutComponent); + } + if (qualifier.hasIndex()) { + appendStr(" index="); + appendUint(qualifier.layoutIndex); + } + } + if (qualifier.hasSet()) { + appendStr(" set="); + appendUint(qualifier.layoutSet); + } + if (qualifier.hasBinding()) { + appendStr(" binding="); + appendUint(qualifier.layoutBinding); + } + if (qualifier.hasStream()) { + appendStr(" stream="); + appendUint(qualifier.layoutStream); + } + if (qualifier.hasMatrix()) { + appendStr(" "); + appendStr(TQualifier::getLayoutMatrixString(qualifier.layoutMatrix)); + } + if (qualifier.hasPacking()) { + appendStr(" "); + appendStr(TQualifier::getLayoutPackingString(qualifier.layoutPacking)); + } + if (qualifier.hasOffset()) { + appendStr(" offset="); + appendInt(qualifier.layoutOffset); + } + if (qualifier.hasAlign()) { + appendStr(" align="); + appendInt(qualifier.layoutAlign); + } + if (qualifier.hasFormat()) { + appendStr(" "); + appendStr(TQualifier::getLayoutFormatString(qualifier.layoutFormat)); + } + if (qualifier.hasXfbBuffer() && qualifier.hasXfbOffset()) { + appendStr(" xfb_buffer="); + appendUint(qualifier.layoutXfbBuffer); + } + if (qualifier.hasXfbOffset()) { + appendStr(" xfb_offset="); + appendUint(qualifier.layoutXfbOffset); + } + if (qualifier.hasXfbStride()) { + appendStr(" xfb_stride="); + appendUint(qualifier.layoutXfbStride); + } + if (qualifier.hasAttachment()) { + appendStr(" input_attachment_index="); + appendUint(qualifier.layoutAttachment); + } + if (qualifier.hasSpecConstantId()) { + appendStr(" constant_id="); + appendUint(qualifier.layoutSpecConstantId); + } + if (qualifier.layoutPushConstant) + appendStr(" push_constant"); + if (qualifier.layoutBufferReference) + appendStr(" buffer_reference"); + if (qualifier.hasBufferReferenceAlign()) { + appendStr(" buffer_reference_align="); + appendUint(1u << qualifier.layoutBufferReferenceAlign); + } + + if (qualifier.layoutPassthrough) + appendStr(" passthrough"); + if (qualifier.layoutViewportRelative) + appendStr(" layoutViewportRelative"); + if (qualifier.layoutSecondaryViewportRelativeOffset != -2048) { + appendStr(" layoutSecondaryViewportRelativeOffset="); + appendInt(qualifier.layoutSecondaryViewportRelativeOffset); + } + if (qualifier.layoutShaderRecord) + appendStr(" shaderRecordNV"); + + appendStr(")"); + } + } + + if (qualifier.invariant) + appendStr(" invariant"); + if (qualifier.noContraction) + appendStr(" noContraction"); + if (qualifier.centroid) + appendStr(" centroid"); + if (qualifier.smooth) + appendStr(" smooth"); + if (qualifier.flat) + appendStr(" flat"); + if (qualifier.nopersp) + appendStr(" noperspective"); + if (qualifier.explicitInterp) + appendStr(" __explicitInterpAMD"); + if (qualifier.pervertexNV) + appendStr(" pervertexNV"); + if (qualifier.perPrimitiveNV) + appendStr(" perprimitiveNV"); + if (qualifier.perViewNV) + appendStr(" perviewNV"); + if (qualifier.perTaskNV) + appendStr(" taskNV"); + if (qualifier.patch) + appendStr(" patch"); + if (qualifier.sample) + appendStr(" sample"); + if (qualifier.coherent) + appendStr(" coherent"); + if (qualifier.devicecoherent) + appendStr(" devicecoherent"); + if (qualifier.queuefamilycoherent) + appendStr(" queuefamilycoherent"); + if (qualifier.workgroupcoherent) + appendStr(" workgroupcoherent"); + if (qualifier.subgroupcoherent) + appendStr(" subgroupcoherent"); + if (qualifier.shadercallcoherent) + appendStr(" shadercallcoherent"); + if (qualifier.nonprivate) + appendStr(" nonprivate"); + if (qualifier.volatil) + appendStr(" volatile"); + if (qualifier.restrict) + appendStr(" restrict"); + if (qualifier.readonly) + appendStr(" readonly"); + if (qualifier.writeonly) + appendStr(" writeonly"); + if (qualifier.specConstant) + appendStr(" specialization-constant"); + if (qualifier.nonUniform) + appendStr(" nonuniform"); + appendStr(" "); + appendStr(getStorageQualifierString()); + if (isArray()) { + for(int i = 0; i < (int)arraySizes->getNumDims(); ++i) { + int size = arraySizes->getDimSize(i); + if (size == UnsizedArraySize && i == 0 && arraySizes->isVariablyIndexed()) + appendStr(" runtime-sized array of"); + else { + if (size == UnsizedArraySize) { + appendStr(" unsized"); + if (i == 0) { + appendStr(" "); + appendInt(arraySizes->getImplicitSize()); + } + } else { + appendStr(" "); + appendInt(arraySizes->getDimSize(i)); + } + appendStr("-element array of"); + } + } + } + if (isParameterized()) { + appendStr("<"); + for(int i = 0; i < (int)typeParameters->getNumDims(); ++i) { + appendInt(typeParameters->getDimSize(i)); + if (i != (int)typeParameters->getNumDims() - 1) + appendStr(", "); + } + appendStr(">"); + } + if (qualifier.precision != EpqNone) { + appendStr(" "); + appendStr(getPrecisionQualifierString()); + } + if (isMatrix()) { + appendStr(" "); + appendInt(matrixCols); + appendStr("X"); + appendInt(matrixRows); + appendStr(" matrix of"); + } else if (isVector()) { + appendStr(" "); + appendInt(vectorSize); + appendStr("-component vector of"); + } + + appendStr(" "); + typeString.append(getBasicTypeString()); + + if (qualifier.builtIn != EbvNone) { + appendStr(" "); + appendStr(getBuiltInVariableString()); + } + + // Add struct/block members + if (isStruct() && structure) { + appendStr("{"); + bool hasHiddenMember = true; + for (size_t i = 0; i < structure->size(); ++i) { + if (! (*structure)[i].type->hiddenMember()) { + if (!hasHiddenMember) + appendStr(", "); + typeString.append((*structure)[i].type->getCompleteString()); + typeString.append(" "); + typeString.append((*structure)[i].type->getFieldName()); + hasHiddenMember = false; + } + } + appendStr("}"); + } + + return typeString; + } + + TString getBasicTypeString() const + { + if (basicType == EbtSampler) + return sampler.getString(); + else + return getBasicString(); + } + + const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); } + const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); } + const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); } +#endif + + const TTypeList* getStruct() const { assert(isStruct()); return structure; } + void setStruct(TTypeList* s) { assert(isStruct()); structure = s; } + TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads + void setBasicType(const TBasicType& t) { basicType = t; } + + int computeNumComponents() const + { + int components = 0; + + if (getBasicType() == EbtStruct || getBasicType() == EbtBlock) { + for (TTypeList::const_iterator tl = getStruct()->begin(); tl != getStruct()->end(); tl++) + components += ((*tl).type)->computeNumComponents(); + } else if (matrixCols) + components = matrixCols * matrixRows; + else + components = vectorSize; + + if (arraySizes != nullptr) { + components *= arraySizes->getCumulativeSize(); + } + + return components; + } + + // append this type's mangled name to the passed in 'name' + void appendMangledName(TString& name) const + { + buildMangledName(name); + name += ';' ; + } + + // Do two structure types match? They could be declared independently, + // in different places, but still might satisfy the definition of matching. + // From the spec: + // + // "Structures must have the same name, sequence of type names, and + // type definitions, and member names to be considered the same type. + // This rule applies recursively for nested or embedded types." + // + bool sameStructType(const TType& right) const + { + // Most commonly, they are both nullptr, or the same pointer to the same actual structure + if ((!isStruct() && !right.isStruct()) || + (isStruct() && right.isStruct() && structure == right.structure)) + return true; + + // Both being nullptr was caught above, now they both have to be structures of the same number of elements + if (!isStruct() || !right.isStruct() || + structure->size() != right.structure->size()) + return false; + + // Structure names have to match + if (*typeName != *right.typeName) + return false; + + // Compare the names and types of all the members, which have to match + for (unsigned int i = 0; i < structure->size(); ++i) { + if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName()) + return false; + + if (*(*structure)[i].type != *(*right.structure)[i].type) + return false; + } + + return true; + } + + bool sameReferenceType(const TType& right) const + { + if (isReference() != right.isReference()) + return false; + + if (!isReference() && !right.isReference()) + return true; + + assert(referentType != nullptr); + assert(right.referentType != nullptr); + + if (referentType == right.referentType) + return true; + + return *referentType == *right.referentType; + } + + // See if two types match, in all aspects except arrayness + bool sameElementType(const TType& right) const + { + return basicType == right.basicType && sameElementShape(right); + } + + // See if two type's arrayness match + bool sameArrayness(const TType& right) const + { + return ((arraySizes == nullptr && right.arraySizes == nullptr) || + (arraySizes != nullptr && right.arraySizes != nullptr && *arraySizes == *right.arraySizes)); + } + + // See if two type's arrayness match in everything except their outer dimension + bool sameInnerArrayness(const TType& right) const + { + assert(arraySizes != nullptr && right.arraySizes != nullptr); + return arraySizes->sameInnerArrayness(*right.arraySizes); + } + + // See if two type's parameters match + bool sameTypeParameters(const TType& right) const + { + return ((typeParameters == nullptr && right.typeParameters == nullptr) || + (typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters)); + } + + // See if two type's elements match in all ways except basic type + bool sameElementShape(const TType& right) const + { + return sampler == right.sampler && + vectorSize == right.vectorSize && + matrixCols == right.matrixCols && + matrixRows == right.matrixRows && + vector1 == right.vector1 && + isCoopMat() == right.isCoopMat() && + sameStructType(right) && + sameReferenceType(right); + } + + // See if a cooperative matrix type parameter with unspecified parameters is + // an OK function parameter + bool coopMatParameterOK(const TType& right) const + { + return isCoopMat() && right.isCoopMat() && (getBasicType() == right.getBasicType()) && + typeParameters == nullptr && right.typeParameters != nullptr; + } + + bool sameCoopMatBaseType(const TType &right) const { + bool rv = coopmat && right.coopmat; + if (getBasicType() == EbtFloat || getBasicType() == EbtFloat16) + rv = right.getBasicType() == EbtFloat || right.getBasicType() == EbtFloat16; + else if (getBasicType() == EbtUint || getBasicType() == EbtUint8) + rv = right.getBasicType() == EbtUint || right.getBasicType() == EbtUint8; + else if (getBasicType() == EbtInt || getBasicType() == EbtInt8) + rv = right.getBasicType() == EbtInt || right.getBasicType() == EbtInt8; + else + rv = false; + return rv; + } + + + // See if two types match in all ways (just the actual type, not qualification) + bool operator==(const TType& right) const + { + return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right); + } + + bool operator!=(const TType& right) const + { + return ! operator==(right); + } + + unsigned int getBufferReferenceAlignment() const + { +#ifndef GLSLANG_WEB + if (getBasicType() == glslang::EbtReference) { + return getReferentType()->getQualifier().hasBufferReferenceAlign() ? + (1u << getReferentType()->getQualifier().layoutBufferReferenceAlign) : 16u; + } +#endif + return 0; + } + +protected: + // Require consumer to pick between deep copy and shallow copy. + TType(const TType& type); + TType& operator=(const TType& type); + + // Recursively copy a type graph, while preserving the graph-like + // quality. That is, don't make more than one copy of a structure that + // gets reused multiple times in the type graph. + void deepCopy(const TType& copyOf, TMap& copiedMap) + { + shallowCopy(copyOf); + + if (copyOf.arraySizes) { + arraySizes = new TArraySizes; + *arraySizes = *copyOf.arraySizes; + } + + if (copyOf.typeParameters) { + typeParameters = new TArraySizes; + *typeParameters = *copyOf.typeParameters; + } + + if (copyOf.isStruct() && copyOf.structure) { + auto prevCopy = copiedMap.find(copyOf.structure); + if (prevCopy != copiedMap.end()) + structure = prevCopy->second; + else { + structure = new TTypeList; + copiedMap[copyOf.structure] = structure; + for (unsigned int i = 0; i < copyOf.structure->size(); ++i) { + TTypeLoc typeLoc; + typeLoc.loc = (*copyOf.structure)[i].loc; + typeLoc.type = new TType(); + typeLoc.type->deepCopy(*(*copyOf.structure)[i].type, copiedMap); + structure->push_back(typeLoc); + } + } + } + + if (copyOf.fieldName) + fieldName = NewPoolTString(copyOf.fieldName->c_str()); + if (copyOf.typeName) + typeName = NewPoolTString(copyOf.typeName->c_str()); + } + + + void buildMangledName(TString&) const; + + TBasicType basicType : 8; + int vectorSize : 4; // 1 means either scalar or 1-component vector; see vector1 to disambiguate. + int matrixCols : 4; + int matrixRows : 4; + bool vector1 : 1; // Backward-compatible tracking of a 1-component vector distinguished from a scalar. + // GLSL 4.5 never has a 1-component vector; so this will always be false until such + // functionality is added. + // HLSL does have a 1-component vectors, so this will be true to disambiguate + // from a scalar. + bool coopmat : 1; + TQualifier qualifier; + + TArraySizes* arraySizes; // nullptr unless an array; can be shared across types + // A type can't be both a structure (EbtStruct/EbtBlock) and a reference (EbtReference), so + // conserve space by making these a union + union { + TTypeList* structure; // invalid unless this is a struct; can be shared across types + TType *referentType; // invalid unless this is an EbtReference + }; + TString *fieldName; // for structure field names + TString *typeName; // for structure type name + TSampler sampler; + TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types +}; + +} // end namespace glslang + +#endif // _TYPES_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/arrays.h b/src/third_party/glslang/glslang/Include/arrays.h new file mode 100644 index 0000000..7f047d9 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/arrays.h @@ -0,0 +1,341 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Implement types for tracking GLSL arrays, arrays of arrays, etc. +// + +#ifndef _ARRAYS_INCLUDED +#define _ARRAYS_INCLUDED + +#include + +namespace glslang { + +// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else. +const int UnsizedArraySize = 0; + +class TIntermTyped; +extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*); + +// Specialization constants need both a nominal size and a node that defines +// the specialization constant being used. Array types are the same when their +// size and specialization constant nodes are the same. +struct TArraySize { + unsigned int size; + TIntermTyped* node; // nullptr means no specialization constant node + bool operator==(const TArraySize& rhs) const + { + if (size != rhs.size) + return false; + if (node == nullptr || rhs.node == nullptr) + return node == rhs.node; + + return SameSpecializationConstants(node, rhs.node); + } +}; + +// +// TSmallArrayVector is used as the container for the set of sizes in TArraySizes. +// It has generic-container semantics, while TArraySizes has array-of-array semantics. +// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy. +// +struct TSmallArrayVector { + // + // TODO: memory: TSmallArrayVector is intended to be smaller. + // Almost all arrays could be handled by two sizes each fitting + // in 16 bits, needing a real vector only in the cases where there + // are more than 3 sizes or a size needing more than 16 bits. + // + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TSmallArrayVector() : sizes(nullptr) { } + virtual ~TSmallArrayVector() { dealloc(); } + + // For breaking into two non-shared copies, independently modifiable. + TSmallArrayVector& operator=(const TSmallArrayVector& from) + { + if (from.sizes == nullptr) + sizes = nullptr; + else { + alloc(); + *sizes = *from.sizes; + } + + return *this; + } + + int size() const + { + if (sizes == nullptr) + return 0; + return (int)sizes->size(); + } + + unsigned int frontSize() const + { + assert(sizes != nullptr && sizes->size() > 0); + return sizes->front().size; + } + + TIntermTyped* frontNode() const + { + assert(sizes != nullptr && sizes->size() > 0); + return sizes->front().node; + } + + void changeFront(unsigned int s) + { + assert(sizes != nullptr); + // this should only happen for implicitly sized arrays, not specialization constants + assert(sizes->front().node == nullptr); + sizes->front().size = s; + } + + void push_back(unsigned int e, TIntermTyped* n) + { + alloc(); + TArraySize pair = { e, n }; + sizes->push_back(pair); + } + + void push_back(const TSmallArrayVector& newDims) + { + alloc(); + sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end()); + } + + void pop_front() + { + assert(sizes != nullptr && sizes->size() > 0); + if (sizes->size() == 1) + dealloc(); + else + sizes->erase(sizes->begin()); + } + + // 'this' should currently not be holding anything, and copyNonFront + // will make it hold a copy of all but the first element of rhs. + // (This would be useful for making a type that is dereferenced by + // one dimension.) + void copyNonFront(const TSmallArrayVector& rhs) + { + assert(sizes == nullptr); + if (rhs.size() > 1) { + alloc(); + sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end()); + } + } + + unsigned int getDimSize(int i) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + return (*sizes)[i].size; + } + + void setDimSize(int i, unsigned int size) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + assert((*sizes)[i].node == nullptr); + (*sizes)[i].size = size; + } + + TIntermTyped* getDimNode(int i) const + { + assert(sizes != nullptr && (int)sizes->size() > i); + return (*sizes)[i].node; + } + + bool operator==(const TSmallArrayVector& rhs) const + { + if (sizes == nullptr && rhs.sizes == nullptr) + return true; + if (sizes == nullptr || rhs.sizes == nullptr) + return false; + return *sizes == *rhs.sizes; + } + bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); } + +protected: + TSmallArrayVector(const TSmallArrayVector&); + + void alloc() + { + if (sizes == nullptr) + sizes = new TVector; + } + void dealloc() + { + delete sizes; + sizes = nullptr; + } + + TVector* sizes; // will either hold such a pointer, or in the future, hold the two array sizes +}; + +// +// Represent an array, or array of arrays, to arbitrary depth. This is not +// done through a hierarchy of types in a type tree, rather all contiguous arrayness +// in the type hierarchy is localized into this single cumulative object. +// +// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero +// for the vast majority of types that are non-array types. +// +// Order Policy: these are all identical: +// - left to right order within a contiguous set of ...[..][..][..]... in the source language +// - index order 0, 1, 2, ... within the 'sizes' member below +// - outer-most to inner-most +// +struct TArraySizes { + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + + TArraySizes() : implicitArraySize(1), variablyIndexed(false) { } + + // For breaking into two non-shared copies, independently modifiable. + TArraySizes& operator=(const TArraySizes& from) + { + implicitArraySize = from.implicitArraySize; + variablyIndexed = from.variablyIndexed; + sizes = from.sizes; + + return *this; + } + + // translate from array-of-array semantics to container semantics + int getNumDims() const { return sizes.size(); } + int getDimSize(int dim) const { return sizes.getDimSize(dim); } + TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); } + void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); } + int getOuterSize() const { return sizes.frontSize(); } + TIntermTyped* getOuterNode() const { return sizes.frontNode(); } + int getCumulativeSize() const + { + int size = 1; + for (int d = 0; d < sizes.size(); ++d) { + // this only makes sense in paths that have a known array size + assert(sizes.getDimSize(d) != UnsizedArraySize); + size *= sizes.getDimSize(d); + } + return size; + } + void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); } + void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); } + void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); } + void addInnerSize(TArraySize pair) { + sizes.push_back(pair.size, pair.node); + } + void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); } + void changeOuterSize(int s) { sizes.changeFront((unsigned)s); } + int getImplicitSize() const { return implicitArraySize; } + void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); } + bool isInnerUnsized() const + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize) + return true; + } + + return false; + } + bool clearInnerUnsized() + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize) + setDimSize(d, 1); + } + + return false; + } + bool isInnerSpecialization() const + { + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimNode(d) != nullptr) + return true; + } + + return false; + } + bool isOuterSpecialization() + { + return sizes.getDimNode(0) != nullptr; + } + + bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); } + bool isSized() const { return getOuterSize() != UnsizedArraySize; } + void dereference() { sizes.pop_front(); } + void copyDereferenced(const TArraySizes& rhs) + { + assert(sizes.size() == 0); + if (rhs.sizes.size() > 1) + sizes.copyNonFront(rhs.sizes); + } + + bool sameInnerArrayness(const TArraySizes& rhs) const + { + if (sizes.size() != rhs.sizes.size()) + return false; + + for (int d = 1; d < sizes.size(); ++d) { + if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) || + sizes.getDimNode(d) != rhs.sizes.getDimNode(d)) + return false; + } + + return true; + } + + void setVariablyIndexed() { variablyIndexed = true; } + bool isVariablyIndexed() const { return variablyIndexed; } + + bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; } + bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; } + +protected: + TSmallArrayVector sizes; + + TArraySizes(const TArraySizes&); + + // For tracking maximum referenced compile-time constant index. + // Applies only to the outer-most dimension. Potentially becomes + // the implicit size of the array, if not variably indexed and + // otherwise legal. + int implicitArraySize; + bool variablyIndexed; // true if array is indexed with a non compile-time constant +}; + +} // end namespace glslang + +#endif // _ARRAYS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/Include/glslang_c_interface.h b/src/third_party/glslang/glslang/Include/glslang_c_interface.h new file mode 100644 index 0000000..4b32e2b --- /dev/null +++ b/src/third_party/glslang/glslang/Include/glslang_c_interface.h @@ -0,0 +1,249 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#ifndef GLSLANG_C_IFACE_H_INCLUDED +#define GLSLANG_C_IFACE_H_INCLUDED + +#include +#include + +#include "glslang_c_shader_types.h" + +typedef struct glslang_shader_s glslang_shader_t; +typedef struct glslang_program_s glslang_program_t; + +/* TLimits counterpart */ +typedef struct glslang_limits_s { + bool non_inductive_for_loops; + bool while_loops; + bool do_while_loops; + bool general_uniform_indexing; + bool general_attribute_matrix_vector_indexing; + bool general_varying_indexing; + bool general_sampler_indexing; + bool general_variable_indexing; + bool general_constant_matrix_vector_indexing; +} glslang_limits_t; + +/* TBuiltInResource counterpart */ +typedef struct glslang_resource_s { + int max_lights; + int max_clip_planes; + int max_texture_units; + int max_texture_coords; + int max_vertex_attribs; + int max_vertex_uniform_components; + int max_varying_floats; + int max_vertex_texture_image_units; + int max_combined_texture_image_units; + int max_texture_image_units; + int max_fragment_uniform_components; + int max_draw_buffers; + int max_vertex_uniform_vectors; + int max_varying_vectors; + int max_fragment_uniform_vectors; + int max_vertex_output_vectors; + int max_fragment_input_vectors; + int min_program_texel_offset; + int max_program_texel_offset; + int max_clip_distances; + int max_compute_work_group_count_x; + int max_compute_work_group_count_y; + int max_compute_work_group_count_z; + int max_compute_work_group_size_x; + int max_compute_work_group_size_y; + int max_compute_work_group_size_z; + int max_compute_uniform_components; + int max_compute_texture_image_units; + int max_compute_image_uniforms; + int max_compute_atomic_counters; + int max_compute_atomic_counter_buffers; + int max_varying_components; + int max_vertex_output_components; + int max_geometry_input_components; + int max_geometry_output_components; + int max_fragment_input_components; + int max_image_units; + int max_combined_image_units_and_fragment_outputs; + int max_combined_shader_output_resources; + int max_image_samples; + int max_vertex_image_uniforms; + int max_tess_control_image_uniforms; + int max_tess_evaluation_image_uniforms; + int max_geometry_image_uniforms; + int max_fragment_image_uniforms; + int max_combined_image_uniforms; + int max_geometry_texture_image_units; + int max_geometry_output_vertices; + int max_geometry_total_output_components; + int max_geometry_uniform_components; + int max_geometry_varying_components; + int max_tess_control_input_components; + int max_tess_control_output_components; + int max_tess_control_texture_image_units; + int max_tess_control_uniform_components; + int max_tess_control_total_output_components; + int max_tess_evaluation_input_components; + int max_tess_evaluation_output_components; + int max_tess_evaluation_texture_image_units; + int max_tess_evaluation_uniform_components; + int max_tess_patch_components; + int max_patch_vertices; + int max_tess_gen_level; + int max_viewports; + int max_vertex_atomic_counters; + int max_tess_control_atomic_counters; + int max_tess_evaluation_atomic_counters; + int max_geometry_atomic_counters; + int max_fragment_atomic_counters; + int max_combined_atomic_counters; + int max_atomic_counter_bindings; + int max_vertex_atomic_counter_buffers; + int max_tess_control_atomic_counter_buffers; + int max_tess_evaluation_atomic_counter_buffers; + int max_geometry_atomic_counter_buffers; + int max_fragment_atomic_counter_buffers; + int max_combined_atomic_counter_buffers; + int max_atomic_counter_buffer_size; + int max_transform_feedback_buffers; + int max_transform_feedback_interleaved_components; + int max_cull_distances; + int max_combined_clip_and_cull_distances; + int max_samples; + int max_mesh_output_vertices_nv; + int max_mesh_output_primitives_nv; + int max_mesh_work_group_size_x_nv; + int max_mesh_work_group_size_y_nv; + int max_mesh_work_group_size_z_nv; + int max_task_work_group_size_x_nv; + int max_task_work_group_size_y_nv; + int max_task_work_group_size_z_nv; + int max_mesh_view_count_nv; + int maxDualSourceDrawBuffersEXT; + + glslang_limits_t limits; +} glslang_resource_t; + +typedef struct glslang_input_s { + glslang_source_t language; + glslang_stage_t stage; + glslang_client_t client; + glslang_target_client_version_t client_version; + glslang_target_language_t target_language; + glslang_target_language_version_t target_language_version; + /** Shader source code */ + const char* code; + int default_version; + glslang_profile_t default_profile; + int force_default_version_and_profile; + int forward_compatible; + glslang_messages_t messages; + const glslang_resource_t* resource; +} glslang_input_t; + +/* Inclusion result structure allocated by C include_local/include_system callbacks */ +typedef struct glsl_include_result_s { + /* Header file name or NULL if inclusion failed */ + const char* header_name; + + /* Header contents or NULL */ + const char* header_data; + size_t header_length; + +} glsl_include_result_t; + +/* Callback for local file inclusion */ +typedef glsl_include_result_t* (*glsl_include_local_func)(void* ctx, const char* header_name, const char* includer_name, + size_t include_depth); + +/* Callback for system file inclusion */ +typedef glsl_include_result_t* (*glsl_include_system_func)(void* ctx, const char* header_name, + const char* includer_name, size_t include_depth); + +/* Callback for include result destruction */ +typedef int (*glsl_free_include_result_func)(void* ctx, glsl_include_result_t* result); + +/* Collection of callbacks for GLSL preprocessor */ +typedef struct glsl_include_callbacks_s { + glsl_include_system_func include_system; + glsl_include_local_func include_local; + glsl_free_include_result_func free_include_result; +} glsl_include_callbacks_t; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef GLSLANG_IS_SHARED_LIBRARY + #ifdef _WIN32 + #ifdef GLSLANG_EXPORTING + #define GLSLANG_EXPORT __declspec(dllexport) + #else + #define GLSLANG_EXPORT __declspec(dllimport) + #endif + #elif __GNUC__ >= 4 + #define GLSLANG_EXPORT __attribute__((visibility("default"))) + #endif +#endif // GLSLANG_IS_SHARED_LIBRARY + +#ifndef GLSLANG_EXPORT +#define GLSLANG_EXPORT +#endif + +GLSLANG_EXPORT int glslang_initialize_process(); +GLSLANG_EXPORT void glslang_finalize_process(); + +GLSLANG_EXPORT glslang_shader_t* glslang_shader_create(const glslang_input_t* input); +GLSLANG_EXPORT void glslang_shader_delete(glslang_shader_t* shader); +GLSLANG_EXPORT int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input); +GLSLANG_EXPORT int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input); +GLSLANG_EXPORT const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader); +GLSLANG_EXPORT const char* glslang_shader_get_info_log(glslang_shader_t* shader); +GLSLANG_EXPORT const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader); + +GLSLANG_EXPORT glslang_program_t* glslang_program_create(); +GLSLANG_EXPORT void glslang_program_delete(glslang_program_t* program); +GLSLANG_EXPORT void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader); +GLSLANG_EXPORT int glslang_program_link(glslang_program_t* program, int messages); // glslang_messages_t +GLSLANG_EXPORT void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage); +GLSLANG_EXPORT size_t glslang_program_SPIRV_get_size(glslang_program_t* program); +GLSLANG_EXPORT void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int*); +GLSLANG_EXPORT unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program); +GLSLANG_EXPORT const char* glslang_program_SPIRV_get_messages(glslang_program_t* program); +GLSLANG_EXPORT const char* glslang_program_get_info_log(glslang_program_t* program); +GLSLANG_EXPORT const char* glslang_program_get_info_debug_log(glslang_program_t* program); + +#ifdef __cplusplus +} +#endif + +#endif /* #ifdef GLSLANG_C_IFACE_INCLUDED */ diff --git a/src/third_party/glslang/glslang/Include/glslang_c_shader_types.h b/src/third_party/glslang/glslang/Include/glslang_c_shader_types.h new file mode 100644 index 0000000..d01a115 --- /dev/null +++ b/src/third_party/glslang/glslang/Include/glslang_c_shader_types.h @@ -0,0 +1,185 @@ +/** + This code is based on the glslang_c_interface implementation by Viktor Latypov +**/ + +/** +BSD 2-Clause License + +Copyright (c) 2019, Viktor Latypov +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +**/ + +#ifndef C_SHADER_TYPES_H_INCLUDED +#define C_SHADER_TYPES_H_INCLUDED + +#define LAST_ELEMENT_MARKER(x) x + +/* EShLanguage counterpart */ +typedef enum { + GLSLANG_STAGE_VERTEX, + GLSLANG_STAGE_TESSCONTROL, + GLSLANG_STAGE_TESSEVALUATION, + GLSLANG_STAGE_GEOMETRY, + GLSLANG_STAGE_FRAGMENT, + GLSLANG_STAGE_COMPUTE, + GLSLANG_STAGE_RAYGEN_NV, + GLSLANG_STAGE_INTERSECT_NV, + GLSLANG_STAGE_ANYHIT_NV, + GLSLANG_STAGE_CLOSESTHIT_NV, + GLSLANG_STAGE_MISS_NV, + GLSLANG_STAGE_CALLABLE_NV, + GLSLANG_STAGE_TASK_NV, + GLSLANG_STAGE_MESH_NV, + LAST_ELEMENT_MARKER(GLSLANG_STAGE_COUNT), +} glslang_stage_t; // would be better as stage, but this is ancient now + +/* EShLanguageMask counterpart */ +typedef enum { + GLSLANG_STAGE_VERTEX_MASK = (1 << GLSLANG_STAGE_VERTEX), + GLSLANG_STAGE_TESSCONTROL_MASK = (1 << GLSLANG_STAGE_TESSCONTROL), + GLSLANG_STAGE_TESSEVALUATION_MASK = (1 << GLSLANG_STAGE_TESSEVALUATION), + GLSLANG_STAGE_GEOMETRY_MASK = (1 << GLSLANG_STAGE_GEOMETRY), + GLSLANG_STAGE_FRAGMENT_MASK = (1 << GLSLANG_STAGE_FRAGMENT), + GLSLANG_STAGE_COMPUTE_MASK = (1 << GLSLANG_STAGE_COMPUTE), + GLSLANG_STAGE_RAYGEN_NV_MASK = (1 << GLSLANG_STAGE_RAYGEN_NV), + GLSLANG_STAGE_INTERSECT_NV_MASK = (1 << GLSLANG_STAGE_INTERSECT_NV), + GLSLANG_STAGE_ANYHIT_NV_MASK = (1 << GLSLANG_STAGE_ANYHIT_NV), + GLSLANG_STAGE_CLOSESTHIT_NV_MASK = (1 << GLSLANG_STAGE_CLOSESTHIT_NV), + GLSLANG_STAGE_MISS_NV_MASK = (1 << GLSLANG_STAGE_MISS_NV), + GLSLANG_STAGE_CALLABLE_NV_MASK = (1 << GLSLANG_STAGE_CALLABLE_NV), + GLSLANG_STAGE_TASK_NV_MASK = (1 << GLSLANG_STAGE_TASK_NV), + GLSLANG_STAGE_MESH_NV_MASK = (1 << GLSLANG_STAGE_MESH_NV), + LAST_ELEMENT_MARKER(GLSLANG_STAGE_MASK_COUNT), +} glslang_stage_mask_t; + +/* EShSource counterpart */ +typedef enum { + GLSLANG_SOURCE_NONE, + GLSLANG_SOURCE_GLSL, + GLSLANG_SOURCE_HLSL, + LAST_ELEMENT_MARKER(GLSLANG_SOURCE_COUNT), +} glslang_source_t; + +/* EShClient counterpart */ +typedef enum { + GLSLANG_CLIENT_NONE, + GLSLANG_CLIENT_VULKAN, + GLSLANG_CLIENT_OPENGL, + LAST_ELEMENT_MARKER(GLSLANG_CLIENT_COUNT), +} glslang_client_t; + +/* EShTargetLanguage counterpart */ +typedef enum { + GLSLANG_TARGET_NONE, + GLSLANG_TARGET_SPV, + LAST_ELEMENT_MARKER(GLSLANG_TARGET_COUNT), +} glslang_target_language_t; + +/* SH_TARGET_ClientVersion counterpart */ +typedef enum { + GLSLANG_TARGET_VULKAN_1_0 = (1 << 22), + GLSLANG_TARGET_VULKAN_1_1 = (1 << 22) | (1 << 12), + GLSLANG_TARGET_OPENGL_450 = 450, + LAST_ELEMENT_MARKER(GLSLANG_TARGET_CLIENT_VERSION_COUNT), +} glslang_target_client_version_t; + +/* SH_TARGET_LanguageVersion counterpart */ +typedef enum { + GLSLANG_TARGET_SPV_1_0 = (1 << 16), + GLSLANG_TARGET_SPV_1_1 = (1 << 16) | (1 << 8), + GLSLANG_TARGET_SPV_1_2 = (1 << 16) | (2 << 8), + GLSLANG_TARGET_SPV_1_3 = (1 << 16) | (3 << 8), + GLSLANG_TARGET_SPV_1_4 = (1 << 16) | (4 << 8), + GLSLANG_TARGET_SPV_1_5 = (1 << 16) | (5 << 8), + LAST_ELEMENT_MARKER(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT), +} glslang_target_language_version_t; + +/* EShExecutable counterpart */ +typedef enum { GLSLANG_EX_VERTEX_FRAGMENT, GLSLANG_EX_FRAGMENT } glslang_executable_t; + +/* EShOptimizationLevel counterpart */ +typedef enum { + GLSLANG_OPT_NO_GENERATION, + GLSLANG_OPT_NONE, + GLSLANG_OPT_SIMPLE, + GLSLANG_OPT_FULL, + LAST_ELEMENT_MARKER(GLSLANG_OPT_LEVEL_COUNT), +} glslang_optimization_level_t; + +/* EShTextureSamplerTransformMode counterpart */ +typedef enum { + GLSLANG_TEX_SAMP_TRANS_KEEP, + GLSLANG_TEX_SAMP_TRANS_UPGRADE_TEXTURE_REMOVE_SAMPLER, + LAST_ELEMENT_MARKER(GLSLANG_TEX_SAMP_TRANS_COUNT), +} glslang_texture_sampler_transform_mode_t; + +/* EShMessages counterpart */ +typedef enum { + GLSLANG_MSG_DEFAULT_BIT = 0, + GLSLANG_MSG_RELAXED_ERRORS_BIT = (1 << 0), + GLSLANG_MSG_SUPPRESS_WARNINGS_BIT = (1 << 1), + GLSLANG_MSG_AST_BIT = (1 << 2), + GLSLANG_MSG_SPV_RULES_BIT = (1 << 3), + GLSLANG_MSG_VULKAN_RULES_BIT = (1 << 4), + GLSLANG_MSG_ONLY_PREPROCESSOR_BIT = (1 << 5), + GLSLANG_MSG_READ_HLSL_BIT = (1 << 6), + GLSLANG_MSG_CASCADING_ERRORS_BIT = (1 << 7), + GLSLANG_MSG_KEEP_UNCALLED_BIT = (1 << 8), + GLSLANG_MSG_HLSL_OFFSETS_BIT = (1 << 9), + GLSLANG_MSG_DEBUG_INFO_BIT = (1 << 10), + GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT = (1 << 11), + GLSLANG_MSG_HLSL_LEGALIZATION_BIT = (1 << 12), + GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT = (1 << 13), + GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT = (1 << 14), + LAST_ELEMENT_MARKER(GLSLANG_MSG_COUNT), +} glslang_messages_t; + +/* EShReflectionOptions counterpart */ +typedef enum { + GLSLANG_REFLECTION_DEFAULT_BIT = 0, + GLSLANG_REFLECTION_STRICT_ARRAY_SUFFIX_BIT = (1 << 0), + GLSLANG_REFLECTION_BASIC_ARRAY_SUFFIX_BIT = (1 << 1), + GLSLANG_REFLECTION_INTERMEDIATE_IOO_BIT = (1 << 2), + GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3), + GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4), + GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5), + GLSLANG_REFLECTION_ALL_IO_VARIABLES_BIT = (1 << 6), + GLSLANG_REFLECTION_SHARED_STD140_SSBO_BIT = (1 << 7), + GLSLANG_REFLECTION_SHARED_STD140_UBO_BIT = (1 << 8), + LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT), +} glslang_reflection_options_t; + +/* EProfile counterpart (from Versions.h) */ +typedef enum { + GLSLANG_BAD_PROFILE = 0, + GLSLANG_NO_PROFILE = (1 << 0), + GLSLANG_CORE_PROFILE = (1 << 1), + GLSLANG_COMPATIBILITY_PROFILE = (1 << 2), + GLSLANG_ES_PROFILE = (1 << 3), + LAST_ELEMENT_MARKER(GLSLANG_PROFILE_COUNT), +} glslang_profile_t; + +#undef LAST_ELEMENT_MARKER + +#endif diff --git a/src/third_party/glslang/glslang/Include/intermediate.h b/src/third_party/glslang/glslang/Include/intermediate.h new file mode 100644 index 0000000..30cb6fb --- /dev/null +++ b/src/third_party/glslang/glslang/Include/intermediate.h @@ -0,0 +1,1806 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Definition of the in-memory high-level intermediate representation +// of shaders. This is a tree that parser creates. +// +// Nodes in the tree are defined as a hierarchy of classes derived from +// TIntermNode. Each is a node in a tree. There is no preset branching factor; +// each node can have it's own type of list of children. +// + +#ifndef __INTERMEDIATE_H +#define __INTERMEDIATE_H + +#if defined(_MSC_VER) && _MSC_VER >= 1900 + #pragma warning(disable : 4464) // relative include path contains '..' + #pragma warning(disable : 5026) // 'glslang::TIntermUnary': move constructor was implicitly defined as deleted +#endif + +#include "../Include/Common.h" +#include "../Include/Types.h" +#include "../Include/ConstantUnion.h" + +namespace glslang { + +class TIntermediate; + +// +// Operators used by the high-level (parse tree) representation. +// +enum TOperator { + EOpNull, // if in a node, should only mean a node is still being built + EOpSequence, // denotes a list of statements, or parameters, etc. + EOpLinkerObjects, // for aggregate node of objects the linker may need, if not reference by the rest of the AST + EOpFunctionCall, + EOpFunction, // For function definition + EOpParameters, // an aggregate listing the parameters to a function + + // + // Unary operators + // + + EOpNegative, + EOpLogicalNot, + EOpVectorLogicalNot, + EOpBitwiseNot, + + EOpPostIncrement, + EOpPostDecrement, + EOpPreIncrement, + EOpPreDecrement, + + EOpCopyObject, + + // (u)int* -> bool + EOpConvInt8ToBool, + EOpConvUint8ToBool, + EOpConvInt16ToBool, + EOpConvUint16ToBool, + EOpConvIntToBool, + EOpConvUintToBool, + EOpConvInt64ToBool, + EOpConvUint64ToBool, + + // float* -> bool + EOpConvFloat16ToBool, + EOpConvFloatToBool, + EOpConvDoubleToBool, + + // bool -> (u)int* + EOpConvBoolToInt8, + EOpConvBoolToUint8, + EOpConvBoolToInt16, + EOpConvBoolToUint16, + EOpConvBoolToInt, + EOpConvBoolToUint, + EOpConvBoolToInt64, + EOpConvBoolToUint64, + + // bool -> float* + EOpConvBoolToFloat16, + EOpConvBoolToFloat, + EOpConvBoolToDouble, + + // int8_t -> (u)int* + EOpConvInt8ToInt16, + EOpConvInt8ToInt, + EOpConvInt8ToInt64, + EOpConvInt8ToUint8, + EOpConvInt8ToUint16, + EOpConvInt8ToUint, + EOpConvInt8ToUint64, + + // uint8_t -> (u)int* + EOpConvUint8ToInt8, + EOpConvUint8ToInt16, + EOpConvUint8ToInt, + EOpConvUint8ToInt64, + EOpConvUint8ToUint16, + EOpConvUint8ToUint, + EOpConvUint8ToUint64, + + // int8_t -> float* + EOpConvInt8ToFloat16, + EOpConvInt8ToFloat, + EOpConvInt8ToDouble, + + // uint8_t -> float* + EOpConvUint8ToFloat16, + EOpConvUint8ToFloat, + EOpConvUint8ToDouble, + + // int16_t -> (u)int* + EOpConvInt16ToInt8, + EOpConvInt16ToInt, + EOpConvInt16ToInt64, + EOpConvInt16ToUint8, + EOpConvInt16ToUint16, + EOpConvInt16ToUint, + EOpConvInt16ToUint64, + + // uint16_t -> (u)int* + EOpConvUint16ToInt8, + EOpConvUint16ToInt16, + EOpConvUint16ToInt, + EOpConvUint16ToInt64, + EOpConvUint16ToUint8, + EOpConvUint16ToUint, + EOpConvUint16ToUint64, + + // int16_t -> float* + EOpConvInt16ToFloat16, + EOpConvInt16ToFloat, + EOpConvInt16ToDouble, + + // uint16_t -> float* + EOpConvUint16ToFloat16, + EOpConvUint16ToFloat, + EOpConvUint16ToDouble, + + // int32_t -> (u)int* + EOpConvIntToInt8, + EOpConvIntToInt16, + EOpConvIntToInt64, + EOpConvIntToUint8, + EOpConvIntToUint16, + EOpConvIntToUint, + EOpConvIntToUint64, + + // uint32_t -> (u)int* + EOpConvUintToInt8, + EOpConvUintToInt16, + EOpConvUintToInt, + EOpConvUintToInt64, + EOpConvUintToUint8, + EOpConvUintToUint16, + EOpConvUintToUint64, + + // int32_t -> float* + EOpConvIntToFloat16, + EOpConvIntToFloat, + EOpConvIntToDouble, + + // uint32_t -> float* + EOpConvUintToFloat16, + EOpConvUintToFloat, + EOpConvUintToDouble, + + // int64_t -> (u)int* + EOpConvInt64ToInt8, + EOpConvInt64ToInt16, + EOpConvInt64ToInt, + EOpConvInt64ToUint8, + EOpConvInt64ToUint16, + EOpConvInt64ToUint, + EOpConvInt64ToUint64, + + // uint64_t -> (u)int* + EOpConvUint64ToInt8, + EOpConvUint64ToInt16, + EOpConvUint64ToInt, + EOpConvUint64ToInt64, + EOpConvUint64ToUint8, + EOpConvUint64ToUint16, + EOpConvUint64ToUint, + + // int64_t -> float* + EOpConvInt64ToFloat16, + EOpConvInt64ToFloat, + EOpConvInt64ToDouble, + + // uint64_t -> float* + EOpConvUint64ToFloat16, + EOpConvUint64ToFloat, + EOpConvUint64ToDouble, + + // float16_t -> (u)int* + EOpConvFloat16ToInt8, + EOpConvFloat16ToInt16, + EOpConvFloat16ToInt, + EOpConvFloat16ToInt64, + EOpConvFloat16ToUint8, + EOpConvFloat16ToUint16, + EOpConvFloat16ToUint, + EOpConvFloat16ToUint64, + + // float16_t -> float* + EOpConvFloat16ToFloat, + EOpConvFloat16ToDouble, + + // float -> (u)int* + EOpConvFloatToInt8, + EOpConvFloatToInt16, + EOpConvFloatToInt, + EOpConvFloatToInt64, + EOpConvFloatToUint8, + EOpConvFloatToUint16, + EOpConvFloatToUint, + EOpConvFloatToUint64, + + // float -> float* + EOpConvFloatToFloat16, + EOpConvFloatToDouble, + + // float64 _t-> (u)int* + EOpConvDoubleToInt8, + EOpConvDoubleToInt16, + EOpConvDoubleToInt, + EOpConvDoubleToInt64, + EOpConvDoubleToUint8, + EOpConvDoubleToUint16, + EOpConvDoubleToUint, + EOpConvDoubleToUint64, + + // float64_t -> float* + EOpConvDoubleToFloat16, + EOpConvDoubleToFloat, + + // uint64_t <-> pointer + EOpConvUint64ToPtr, + EOpConvPtrToUint64, + + // uvec2 <-> pointer + EOpConvUvec2ToPtr, + EOpConvPtrToUvec2, + + // + // binary operations + // + + EOpAdd, + EOpSub, + EOpMul, + EOpDiv, + EOpMod, + EOpRightShift, + EOpLeftShift, + EOpAnd, + EOpInclusiveOr, + EOpExclusiveOr, + EOpEqual, + EOpNotEqual, + EOpVectorEqual, + EOpVectorNotEqual, + EOpLessThan, + EOpGreaterThan, + EOpLessThanEqual, + EOpGreaterThanEqual, + EOpComma, + + EOpVectorTimesScalar, + EOpVectorTimesMatrix, + EOpMatrixTimesVector, + EOpMatrixTimesScalar, + + EOpLogicalOr, + EOpLogicalXor, + EOpLogicalAnd, + + EOpIndexDirect, + EOpIndexIndirect, + EOpIndexDirectStruct, + + EOpVectorSwizzle, + + EOpMethod, + EOpScoping, + + // + // Built-in functions mapped to operators + // + + EOpRadians, + EOpDegrees, + EOpSin, + EOpCos, + EOpTan, + EOpAsin, + EOpAcos, + EOpAtan, + EOpSinh, + EOpCosh, + EOpTanh, + EOpAsinh, + EOpAcosh, + EOpAtanh, + + EOpPow, + EOpExp, + EOpLog, + EOpExp2, + EOpLog2, + EOpSqrt, + EOpInverseSqrt, + + EOpAbs, + EOpSign, + EOpFloor, + EOpTrunc, + EOpRound, + EOpRoundEven, + EOpCeil, + EOpFract, + EOpModf, + EOpMin, + EOpMax, + EOpClamp, + EOpMix, + EOpStep, + EOpSmoothStep, + + EOpIsNan, + EOpIsInf, + + EOpFma, + + EOpFrexp, + EOpLdexp, + + EOpFloatBitsToInt, + EOpFloatBitsToUint, + EOpIntBitsToFloat, + EOpUintBitsToFloat, + EOpDoubleBitsToInt64, + EOpDoubleBitsToUint64, + EOpInt64BitsToDouble, + EOpUint64BitsToDouble, + EOpFloat16BitsToInt16, + EOpFloat16BitsToUint16, + EOpInt16BitsToFloat16, + EOpUint16BitsToFloat16, + EOpPackSnorm2x16, + EOpUnpackSnorm2x16, + EOpPackUnorm2x16, + EOpUnpackUnorm2x16, + EOpPackSnorm4x8, + EOpUnpackSnorm4x8, + EOpPackUnorm4x8, + EOpUnpackUnorm4x8, + EOpPackHalf2x16, + EOpUnpackHalf2x16, + EOpPackDouble2x32, + EOpUnpackDouble2x32, + EOpPackInt2x32, + EOpUnpackInt2x32, + EOpPackUint2x32, + EOpUnpackUint2x32, + EOpPackFloat2x16, + EOpUnpackFloat2x16, + EOpPackInt2x16, + EOpUnpackInt2x16, + EOpPackUint2x16, + EOpUnpackUint2x16, + EOpPackInt4x16, + EOpUnpackInt4x16, + EOpPackUint4x16, + EOpUnpackUint4x16, + EOpPack16, + EOpPack32, + EOpPack64, + EOpUnpack32, + EOpUnpack16, + EOpUnpack8, + + EOpLength, + EOpDistance, + EOpDot, + EOpCross, + EOpNormalize, + EOpFaceForward, + EOpReflect, + EOpRefract, + + EOpMin3, + EOpMax3, + EOpMid3, + + EOpDPdx, // Fragment only + EOpDPdy, // Fragment only + EOpFwidth, // Fragment only + EOpDPdxFine, // Fragment only + EOpDPdyFine, // Fragment only + EOpFwidthFine, // Fragment only + EOpDPdxCoarse, // Fragment only + EOpDPdyCoarse, // Fragment only + EOpFwidthCoarse, // Fragment only + + EOpInterpolateAtCentroid, // Fragment only + EOpInterpolateAtSample, // Fragment only + EOpInterpolateAtOffset, // Fragment only + EOpInterpolateAtVertex, + + EOpMatrixTimesMatrix, + EOpOuterProduct, + EOpDeterminant, + EOpMatrixInverse, + EOpTranspose, + + EOpFtransform, + + EOpNoise, + + EOpEmitVertex, // geometry only + EOpEndPrimitive, // geometry only + EOpEmitStreamVertex, // geometry only + EOpEndStreamPrimitive, // geometry only + + EOpBarrier, + EOpMemoryBarrier, + EOpMemoryBarrierAtomicCounter, + EOpMemoryBarrierBuffer, + EOpMemoryBarrierImage, + EOpMemoryBarrierShared, // compute only + EOpGroupMemoryBarrier, // compute only + + EOpBallot, + EOpReadInvocation, + EOpReadFirstInvocation, + + EOpAnyInvocation, + EOpAllInvocations, + EOpAllInvocationsEqual, + + EOpSubgroupGuardStart, + EOpSubgroupBarrier, + EOpSubgroupMemoryBarrier, + EOpSubgroupMemoryBarrierBuffer, + EOpSubgroupMemoryBarrierImage, + EOpSubgroupMemoryBarrierShared, // compute only + EOpSubgroupElect, + EOpSubgroupAll, + EOpSubgroupAny, + EOpSubgroupAllEqual, + EOpSubgroupBroadcast, + EOpSubgroupBroadcastFirst, + EOpSubgroupBallot, + EOpSubgroupInverseBallot, + EOpSubgroupBallotBitExtract, + EOpSubgroupBallotBitCount, + EOpSubgroupBallotInclusiveBitCount, + EOpSubgroupBallotExclusiveBitCount, + EOpSubgroupBallotFindLSB, + EOpSubgroupBallotFindMSB, + EOpSubgroupShuffle, + EOpSubgroupShuffleXor, + EOpSubgroupShuffleUp, + EOpSubgroupShuffleDown, + EOpSubgroupAdd, + EOpSubgroupMul, + EOpSubgroupMin, + EOpSubgroupMax, + EOpSubgroupAnd, + EOpSubgroupOr, + EOpSubgroupXor, + EOpSubgroupInclusiveAdd, + EOpSubgroupInclusiveMul, + EOpSubgroupInclusiveMin, + EOpSubgroupInclusiveMax, + EOpSubgroupInclusiveAnd, + EOpSubgroupInclusiveOr, + EOpSubgroupInclusiveXor, + EOpSubgroupExclusiveAdd, + EOpSubgroupExclusiveMul, + EOpSubgroupExclusiveMin, + EOpSubgroupExclusiveMax, + EOpSubgroupExclusiveAnd, + EOpSubgroupExclusiveOr, + EOpSubgroupExclusiveXor, + EOpSubgroupClusteredAdd, + EOpSubgroupClusteredMul, + EOpSubgroupClusteredMin, + EOpSubgroupClusteredMax, + EOpSubgroupClusteredAnd, + EOpSubgroupClusteredOr, + EOpSubgroupClusteredXor, + EOpSubgroupQuadBroadcast, + EOpSubgroupQuadSwapHorizontal, + EOpSubgroupQuadSwapVertical, + EOpSubgroupQuadSwapDiagonal, + + EOpSubgroupPartition, + EOpSubgroupPartitionedAdd, + EOpSubgroupPartitionedMul, + EOpSubgroupPartitionedMin, + EOpSubgroupPartitionedMax, + EOpSubgroupPartitionedAnd, + EOpSubgroupPartitionedOr, + EOpSubgroupPartitionedXor, + EOpSubgroupPartitionedInclusiveAdd, + EOpSubgroupPartitionedInclusiveMul, + EOpSubgroupPartitionedInclusiveMin, + EOpSubgroupPartitionedInclusiveMax, + EOpSubgroupPartitionedInclusiveAnd, + EOpSubgroupPartitionedInclusiveOr, + EOpSubgroupPartitionedInclusiveXor, + EOpSubgroupPartitionedExclusiveAdd, + EOpSubgroupPartitionedExclusiveMul, + EOpSubgroupPartitionedExclusiveMin, + EOpSubgroupPartitionedExclusiveMax, + EOpSubgroupPartitionedExclusiveAnd, + EOpSubgroupPartitionedExclusiveOr, + EOpSubgroupPartitionedExclusiveXor, + + EOpSubgroupGuardStop, + + EOpMinInvocations, + EOpMaxInvocations, + EOpAddInvocations, + EOpMinInvocationsNonUniform, + EOpMaxInvocationsNonUniform, + EOpAddInvocationsNonUniform, + EOpMinInvocationsInclusiveScan, + EOpMaxInvocationsInclusiveScan, + EOpAddInvocationsInclusiveScan, + EOpMinInvocationsInclusiveScanNonUniform, + EOpMaxInvocationsInclusiveScanNonUniform, + EOpAddInvocationsInclusiveScanNonUniform, + EOpMinInvocationsExclusiveScan, + EOpMaxInvocationsExclusiveScan, + EOpAddInvocationsExclusiveScan, + EOpMinInvocationsExclusiveScanNonUniform, + EOpMaxInvocationsExclusiveScanNonUniform, + EOpAddInvocationsExclusiveScanNonUniform, + EOpSwizzleInvocations, + EOpSwizzleInvocationsMasked, + EOpWriteInvocation, + EOpMbcnt, + + EOpCubeFaceIndex, + EOpCubeFaceCoord, + EOpTime, + + EOpAtomicAdd, + EOpAtomicMin, + EOpAtomicMax, + EOpAtomicAnd, + EOpAtomicOr, + EOpAtomicXor, + EOpAtomicExchange, + EOpAtomicCompSwap, + EOpAtomicLoad, + EOpAtomicStore, + + EOpAtomicCounterIncrement, // results in pre-increment value + EOpAtomicCounterDecrement, // results in post-decrement value + EOpAtomicCounter, + EOpAtomicCounterAdd, + EOpAtomicCounterSubtract, + EOpAtomicCounterMin, + EOpAtomicCounterMax, + EOpAtomicCounterAnd, + EOpAtomicCounterOr, + EOpAtomicCounterXor, + EOpAtomicCounterExchange, + EOpAtomicCounterCompSwap, + + EOpAny, + EOpAll, + + EOpCooperativeMatrixLoad, + EOpCooperativeMatrixStore, + EOpCooperativeMatrixMulAdd, + + EOpBeginInvocationInterlock, // Fragment only + EOpEndInvocationInterlock, // Fragment only + + EOpIsHelperInvocation, + + EOpDebugPrintf, + + // + // Branch + // + + EOpKill, // Fragment only + EOpReturn, + EOpBreak, + EOpContinue, + EOpCase, + EOpDefault, + EOpDemote, // Fragment only + + // + // Constructors + // + + EOpConstructGuardStart, + EOpConstructInt, // these first scalar forms also identify what implicit conversion is needed + EOpConstructUint, + EOpConstructInt8, + EOpConstructUint8, + EOpConstructInt16, + EOpConstructUint16, + EOpConstructInt64, + EOpConstructUint64, + EOpConstructBool, + EOpConstructFloat, + EOpConstructDouble, + // Keep vector and matrix constructors in a consistent relative order for + // TParseContext::constructBuiltIn, which converts between 8/16/32 bit + // vector constructors + EOpConstructVec2, + EOpConstructVec3, + EOpConstructVec4, + EOpConstructMat2x2, + EOpConstructMat2x3, + EOpConstructMat2x4, + EOpConstructMat3x2, + EOpConstructMat3x3, + EOpConstructMat3x4, + EOpConstructMat4x2, + EOpConstructMat4x3, + EOpConstructMat4x4, + EOpConstructDVec2, + EOpConstructDVec3, + EOpConstructDVec4, + EOpConstructBVec2, + EOpConstructBVec3, + EOpConstructBVec4, + EOpConstructI8Vec2, + EOpConstructI8Vec3, + EOpConstructI8Vec4, + EOpConstructU8Vec2, + EOpConstructU8Vec3, + EOpConstructU8Vec4, + EOpConstructI16Vec2, + EOpConstructI16Vec3, + EOpConstructI16Vec4, + EOpConstructU16Vec2, + EOpConstructU16Vec3, + EOpConstructU16Vec4, + EOpConstructIVec2, + EOpConstructIVec3, + EOpConstructIVec4, + EOpConstructUVec2, + EOpConstructUVec3, + EOpConstructUVec4, + EOpConstructI64Vec2, + EOpConstructI64Vec3, + EOpConstructI64Vec4, + EOpConstructU64Vec2, + EOpConstructU64Vec3, + EOpConstructU64Vec4, + EOpConstructDMat2x2, + EOpConstructDMat2x3, + EOpConstructDMat2x4, + EOpConstructDMat3x2, + EOpConstructDMat3x3, + EOpConstructDMat3x4, + EOpConstructDMat4x2, + EOpConstructDMat4x3, + EOpConstructDMat4x4, + EOpConstructIMat2x2, + EOpConstructIMat2x3, + EOpConstructIMat2x4, + EOpConstructIMat3x2, + EOpConstructIMat3x3, + EOpConstructIMat3x4, + EOpConstructIMat4x2, + EOpConstructIMat4x3, + EOpConstructIMat4x4, + EOpConstructUMat2x2, + EOpConstructUMat2x3, + EOpConstructUMat2x4, + EOpConstructUMat3x2, + EOpConstructUMat3x3, + EOpConstructUMat3x4, + EOpConstructUMat4x2, + EOpConstructUMat4x3, + EOpConstructUMat4x4, + EOpConstructBMat2x2, + EOpConstructBMat2x3, + EOpConstructBMat2x4, + EOpConstructBMat3x2, + EOpConstructBMat3x3, + EOpConstructBMat3x4, + EOpConstructBMat4x2, + EOpConstructBMat4x3, + EOpConstructBMat4x4, + EOpConstructFloat16, + EOpConstructF16Vec2, + EOpConstructF16Vec3, + EOpConstructF16Vec4, + EOpConstructF16Mat2x2, + EOpConstructF16Mat2x3, + EOpConstructF16Mat2x4, + EOpConstructF16Mat3x2, + EOpConstructF16Mat3x3, + EOpConstructF16Mat3x4, + EOpConstructF16Mat4x2, + EOpConstructF16Mat4x3, + EOpConstructF16Mat4x4, + EOpConstructStruct, + EOpConstructTextureSampler, + EOpConstructNonuniform, // expected to be transformed away, not present in final AST + EOpConstructReference, + EOpConstructCooperativeMatrix, + EOpConstructGuardEnd, + + // + // moves + // + + EOpAssign, + EOpAddAssign, + EOpSubAssign, + EOpMulAssign, + EOpVectorTimesMatrixAssign, + EOpVectorTimesScalarAssign, + EOpMatrixTimesScalarAssign, + EOpMatrixTimesMatrixAssign, + EOpDivAssign, + EOpModAssign, + EOpAndAssign, + EOpInclusiveOrAssign, + EOpExclusiveOrAssign, + EOpLeftShiftAssign, + EOpRightShiftAssign, + + // + // Array operators + // + + // Can apply to arrays, vectors, or matrices. + // Can be decomposed to a constant at compile time, but this does not always happen, + // due to link-time effects. So, consumer can expect either a link-time sized or + // run-time sized array. + EOpArrayLength, + + // + // Image operations + // + + EOpImageGuardBegin, + + EOpImageQuerySize, + EOpImageQuerySamples, + EOpImageLoad, + EOpImageStore, + EOpImageLoadLod, + EOpImageStoreLod, + EOpImageAtomicAdd, + EOpImageAtomicMin, + EOpImageAtomicMax, + EOpImageAtomicAnd, + EOpImageAtomicOr, + EOpImageAtomicXor, + EOpImageAtomicExchange, + EOpImageAtomicCompSwap, + EOpImageAtomicLoad, + EOpImageAtomicStore, + + EOpSubpassLoad, + EOpSubpassLoadMS, + EOpSparseImageLoad, + EOpSparseImageLoadLod, + + EOpImageGuardEnd, + + // + // Texture operations + // + + EOpTextureGuardBegin, + + EOpTextureQuerySize, + EOpTextureQueryLod, + EOpTextureQueryLevels, + EOpTextureQuerySamples, + + EOpSamplingGuardBegin, + + EOpTexture, + EOpTextureProj, + EOpTextureLod, + EOpTextureOffset, + EOpTextureFetch, + EOpTextureFetchOffset, + EOpTextureProjOffset, + EOpTextureLodOffset, + EOpTextureProjLod, + EOpTextureProjLodOffset, + EOpTextureGrad, + EOpTextureGradOffset, + EOpTextureProjGrad, + EOpTextureProjGradOffset, + EOpTextureGather, + EOpTextureGatherOffset, + EOpTextureGatherOffsets, + EOpTextureClamp, + EOpTextureOffsetClamp, + EOpTextureGradClamp, + EOpTextureGradOffsetClamp, + EOpTextureGatherLod, + EOpTextureGatherLodOffset, + EOpTextureGatherLodOffsets, + EOpFragmentMaskFetch, + EOpFragmentFetch, + + EOpSparseTextureGuardBegin, + + EOpSparseTexture, + EOpSparseTextureLod, + EOpSparseTextureOffset, + EOpSparseTextureFetch, + EOpSparseTextureFetchOffset, + EOpSparseTextureLodOffset, + EOpSparseTextureGrad, + EOpSparseTextureGradOffset, + EOpSparseTextureGather, + EOpSparseTextureGatherOffset, + EOpSparseTextureGatherOffsets, + EOpSparseTexelsResident, + EOpSparseTextureClamp, + EOpSparseTextureOffsetClamp, + EOpSparseTextureGradClamp, + EOpSparseTextureGradOffsetClamp, + EOpSparseTextureGatherLod, + EOpSparseTextureGatherLodOffset, + EOpSparseTextureGatherLodOffsets, + + EOpSparseTextureGuardEnd, + + EOpImageFootprintGuardBegin, + EOpImageSampleFootprintNV, + EOpImageSampleFootprintClampNV, + EOpImageSampleFootprintLodNV, + EOpImageSampleFootprintGradNV, + EOpImageSampleFootprintGradClampNV, + EOpImageFootprintGuardEnd, + EOpSamplingGuardEnd, + EOpTextureGuardEnd, + + // + // Integer operations + // + + EOpAddCarry, + EOpSubBorrow, + EOpUMulExtended, + EOpIMulExtended, + EOpBitfieldExtract, + EOpBitfieldInsert, + EOpBitFieldReverse, + EOpBitCount, + EOpFindLSB, + EOpFindMSB, + + EOpCountLeadingZeros, + EOpCountTrailingZeros, + EOpAbsDifference, + EOpAddSaturate, + EOpSubSaturate, + EOpAverage, + EOpAverageRounded, + EOpMul32x16, + + EOpTrace, + EOpReportIntersection, + EOpIgnoreIntersection, + EOpTerminateRay, + EOpExecuteCallable, + EOpWritePackedPrimitiveIndices4x8NV, + + // + // GL_EXT_ray_query operations + // + + EOpRayQueryInitialize, + EOpRayQueryTerminate, + EOpRayQueryGenerateIntersection, + EOpRayQueryConfirmIntersection, + EOpRayQueryProceed, + EOpRayQueryGetIntersectionType, + EOpRayQueryGetRayTMin, + EOpRayQueryGetRayFlags, + EOpRayQueryGetIntersectionT, + EOpRayQueryGetIntersectionInstanceCustomIndex, + EOpRayQueryGetIntersectionInstanceId, + EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset, + EOpRayQueryGetIntersectionGeometryIndex, + EOpRayQueryGetIntersectionPrimitiveIndex, + EOpRayQueryGetIntersectionBarycentrics, + EOpRayQueryGetIntersectionFrontFace, + EOpRayQueryGetIntersectionCandidateAABBOpaque, + EOpRayQueryGetIntersectionObjectRayDirection, + EOpRayQueryGetIntersectionObjectRayOrigin, + EOpRayQueryGetWorldRayDirection, + EOpRayQueryGetWorldRayOrigin, + EOpRayQueryGetIntersectionObjectToWorld, + EOpRayQueryGetIntersectionWorldToObject, + + // + // HLSL operations + // + + EOpClip, // discard if input value < 0 + EOpIsFinite, + EOpLog10, // base 10 log + EOpRcp, // 1/x + EOpSaturate, // clamp from 0 to 1 + EOpSinCos, // sin and cos in out parameters + EOpGenMul, // mul(x,y) on any of mat/vec/scalars + EOpDst, // x = 1, y=src0.y * src1.y, z=src0.z, w=src1.w + EOpInterlockedAdd, // atomic ops, but uses [optional] out arg instead of return + EOpInterlockedAnd, // ... + EOpInterlockedCompareExchange, // ... + EOpInterlockedCompareStore, // ... + EOpInterlockedExchange, // ... + EOpInterlockedMax, // ... + EOpInterlockedMin, // ... + EOpInterlockedOr, // ... + EOpInterlockedXor, // ... + EOpAllMemoryBarrierWithGroupSync, // memory barriers without non-hlsl AST equivalents + EOpDeviceMemoryBarrier, // ... + EOpDeviceMemoryBarrierWithGroupSync, // ... + EOpWorkgroupMemoryBarrier, // ... + EOpWorkgroupMemoryBarrierWithGroupSync, // ... + EOpEvaluateAttributeSnapped, // InterpolateAtOffset with int position on 16x16 grid + EOpF32tof16, // HLSL conversion: half of a PackHalf2x16 + EOpF16tof32, // HLSL conversion: half of an UnpackHalf2x16 + EOpLit, // HLSL lighting coefficient vector + EOpTextureBias, // HLSL texture bias: will be lowered to EOpTexture + EOpAsDouble, // slightly different from EOpUint64BitsToDouble + EOpD3DCOLORtoUBYTE4, // convert and swizzle 4-component color to UBYTE4 range + + EOpMethodSample, // Texture object methods. These are translated to existing + EOpMethodSampleBias, // AST methods, and exist to represent HLSL semantics until that + EOpMethodSampleCmp, // translation is performed. See HlslParseContext::decomposeSampleMethods(). + EOpMethodSampleCmpLevelZero, // ... + EOpMethodSampleGrad, // ... + EOpMethodSampleLevel, // ... + EOpMethodLoad, // ... + EOpMethodGetDimensions, // ... + EOpMethodGetSamplePosition, // ... + EOpMethodGather, // ... + EOpMethodCalculateLevelOfDetail, // ... + EOpMethodCalculateLevelOfDetailUnclamped, // ... + + // Load already defined above for textures + EOpMethodLoad2, // Structure buffer object methods. These are translated to existing + EOpMethodLoad3, // AST methods, and exist to represent HLSL semantics until that + EOpMethodLoad4, // translation is performed. See HlslParseContext::decomposeSampleMethods(). + EOpMethodStore, // ... + EOpMethodStore2, // ... + EOpMethodStore3, // ... + EOpMethodStore4, // ... + EOpMethodIncrementCounter, // ... + EOpMethodDecrementCounter, // ... + // EOpMethodAppend is defined for geo shaders below + EOpMethodConsume, + + // SM5 texture methods + EOpMethodGatherRed, // These are covered under the above EOpMethodSample comment about + EOpMethodGatherGreen, // translation to existing AST opcodes. They exist temporarily + EOpMethodGatherBlue, // because HLSL arguments are slightly different. + EOpMethodGatherAlpha, // ... + EOpMethodGatherCmp, // ... + EOpMethodGatherCmpRed, // ... + EOpMethodGatherCmpGreen, // ... + EOpMethodGatherCmpBlue, // ... + EOpMethodGatherCmpAlpha, // ... + + // geometry methods + EOpMethodAppend, // Geometry shader methods + EOpMethodRestartStrip, // ... + + // matrix + EOpMatrixSwizzle, // select multiple matrix components (non-column) + + // SM6 wave ops + EOpWaveGetLaneCount, // Will decompose to gl_SubgroupSize. + EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID. + EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()). + EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()). + + // Shader Clock Ops + EOpReadClockSubgroupKHR, + EOpReadClockDeviceKHR, +}; + +class TIntermTraverser; +class TIntermOperator; +class TIntermAggregate; +class TIntermUnary; +class TIntermBinary; +class TIntermConstantUnion; +class TIntermSelection; +class TIntermSwitch; +class TIntermBranch; +class TIntermTyped; +class TIntermMethod; +class TIntermSymbol; +class TIntermLoop; + +} // end namespace glslang + +// +// Base class for the tree nodes +// +// (Put outside the glslang namespace, as it's used as part of the external interface.) +// +class TIntermNode { +public: + POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) + + TIntermNode() { loc.init(); } + virtual const glslang::TSourceLoc& getLoc() const { return loc; } + virtual void setLoc(const glslang::TSourceLoc& l) { loc = l; } + virtual void traverse(glslang::TIntermTraverser*) = 0; + virtual glslang::TIntermTyped* getAsTyped() { return 0; } + virtual glslang::TIntermOperator* getAsOperator() { return 0; } + virtual glslang::TIntermConstantUnion* getAsConstantUnion() { return 0; } + virtual glslang::TIntermAggregate* getAsAggregate() { return 0; } + virtual glslang::TIntermUnary* getAsUnaryNode() { return 0; } + virtual glslang::TIntermBinary* getAsBinaryNode() { return 0; } + virtual glslang::TIntermSelection* getAsSelectionNode() { return 0; } + virtual glslang::TIntermSwitch* getAsSwitchNode() { return 0; } + virtual glslang::TIntermMethod* getAsMethodNode() { return 0; } + virtual glslang::TIntermSymbol* getAsSymbolNode() { return 0; } + virtual glslang::TIntermBranch* getAsBranchNode() { return 0; } + virtual glslang::TIntermLoop* getAsLoopNode() { return 0; } + + virtual const glslang::TIntermTyped* getAsTyped() const { return 0; } + virtual const glslang::TIntermOperator* getAsOperator() const { return 0; } + virtual const glslang::TIntermConstantUnion* getAsConstantUnion() const { return 0; } + virtual const glslang::TIntermAggregate* getAsAggregate() const { return 0; } + virtual const glslang::TIntermUnary* getAsUnaryNode() const { return 0; } + virtual const glslang::TIntermBinary* getAsBinaryNode() const { return 0; } + virtual const glslang::TIntermSelection* getAsSelectionNode() const { return 0; } + virtual const glslang::TIntermSwitch* getAsSwitchNode() const { return 0; } + virtual const glslang::TIntermMethod* getAsMethodNode() const { return 0; } + virtual const glslang::TIntermSymbol* getAsSymbolNode() const { return 0; } + virtual const glslang::TIntermBranch* getAsBranchNode() const { return 0; } + virtual const glslang::TIntermLoop* getAsLoopNode() const { return 0; } + virtual ~TIntermNode() { } + +protected: + TIntermNode(const TIntermNode&); + TIntermNode& operator=(const TIntermNode&); + glslang::TSourceLoc loc; +}; + +namespace glslang { + +// +// This is just to help yacc. +// +struct TIntermNodePair { + TIntermNode* node1; + TIntermNode* node2; +}; + +// +// Intermediate class for nodes that have a type. +// +class TIntermTyped : public TIntermNode { +public: + TIntermTyped(const TType& t) { type.shallowCopy(t); } + TIntermTyped(TBasicType basicType) { TType bt(basicType); type.shallowCopy(bt); } + virtual TIntermTyped* getAsTyped() { return this; } + virtual const TIntermTyped* getAsTyped() const { return this; } + virtual void setType(const TType& t) { type.shallowCopy(t); } + virtual const TType& getType() const { return type; } + virtual TType& getWritableType() { return type; } + + virtual TBasicType getBasicType() const { return type.getBasicType(); } + virtual TQualifier& getQualifier() { return type.getQualifier(); } + virtual const TQualifier& getQualifier() const { return type.getQualifier(); } + virtual void propagatePrecision(TPrecisionQualifier); + virtual int getVectorSize() const { return type.getVectorSize(); } + virtual int getMatrixCols() const { return type.getMatrixCols(); } + virtual int getMatrixRows() const { return type.getMatrixRows(); } + virtual bool isMatrix() const { return type.isMatrix(); } + virtual bool isArray() const { return type.isArray(); } + virtual bool isVector() const { return type.isVector(); } + virtual bool isScalar() const { return type.isScalar(); } + virtual bool isStruct() const { return type.isStruct(); } + virtual bool isFloatingDomain() const { return type.isFloatingDomain(); } + virtual bool isIntegerDomain() const { return type.isIntegerDomain(); } + bool isAtomic() const { return type.isAtomic(); } + bool isReference() const { return type.isReference(); } + TString getCompleteString() const { return type.getCompleteString(); } + +protected: + TIntermTyped& operator=(const TIntermTyped&); + TType type; +}; + +// +// Handle for, do-while, and while loops. +// +class TIntermLoop : public TIntermNode { +public: + TIntermLoop(TIntermNode* aBody, TIntermTyped* aTest, TIntermTyped* aTerminal, bool testFirst) : + body(aBody), + test(aTest), + terminal(aTerminal), + first(testFirst), + unroll(false), + dontUnroll(false), + dependency(0), + minIterations(0), + maxIterations(iterationsInfinite), + iterationMultiple(1), + peelCount(0), + partialCount(0) + { } + + virtual TIntermLoop* getAsLoopNode() { return this; } + virtual const TIntermLoop* getAsLoopNode() const { return this; } + virtual void traverse(TIntermTraverser*); + TIntermNode* getBody() const { return body; } + TIntermTyped* getTest() const { return test; } + TIntermTyped* getTerminal() const { return terminal; } + bool testFirst() const { return first; } + + void setUnroll() { unroll = true; } + void setDontUnroll() { + dontUnroll = true; + peelCount = 0; + partialCount = 0; + } + bool getUnroll() const { return unroll; } + bool getDontUnroll() const { return dontUnroll; } + + static const unsigned int dependencyInfinite = 0xFFFFFFFF; + static const unsigned int iterationsInfinite = 0xFFFFFFFF; + void setLoopDependency(int d) { dependency = d; } + int getLoopDependency() const { return dependency; } + + void setMinIterations(unsigned int v) { minIterations = v; } + unsigned int getMinIterations() const { return minIterations; } + void setMaxIterations(unsigned int v) { maxIterations = v; } + unsigned int getMaxIterations() const { return maxIterations; } + void setIterationMultiple(unsigned int v) { iterationMultiple = v; } + unsigned int getIterationMultiple() const { return iterationMultiple; } + void setPeelCount(unsigned int v) { + peelCount = v; + dontUnroll = false; + } + unsigned int getPeelCount() const { return peelCount; } + void setPartialCount(unsigned int v) { + partialCount = v; + dontUnroll = false; + } + unsigned int getPartialCount() const { return partialCount; } + +protected: + TIntermNode* body; // code to loop over + TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops + TIntermTyped* terminal; // exists for for-loops + bool first; // true for while and for, not for do-while + bool unroll; // true if unroll requested + bool dontUnroll; // true if request to not unroll + unsigned int dependency; // loop dependency hint; 0 means not set or unknown + unsigned int minIterations; // as per the SPIR-V specification + unsigned int maxIterations; // as per the SPIR-V specification + unsigned int iterationMultiple; // as per the SPIR-V specification + unsigned int peelCount; // as per the SPIR-V specification + unsigned int partialCount; // as per the SPIR-V specification +}; + +// +// Handle case, break, continue, return, and kill. +// +class TIntermBranch : public TIntermNode { +public: + TIntermBranch(TOperator op, TIntermTyped* e) : + flowOp(op), + expression(e) { } + virtual TIntermBranch* getAsBranchNode() { return this; } + virtual const TIntermBranch* getAsBranchNode() const { return this; } + virtual void traverse(TIntermTraverser*); + TOperator getFlowOp() const { return flowOp; } + TIntermTyped* getExpression() const { return expression; } + void setExpression(TIntermTyped* pExpression) { expression = pExpression; } + void updatePrecision(TPrecisionQualifier parentPrecision); +protected: + TOperator flowOp; + TIntermTyped* expression; +}; + +// +// Represent method names before seeing their calling signature +// or resolving them to operations. Just an expression as the base object +// and a textural name. +// +class TIntermMethod : public TIntermTyped { +public: + TIntermMethod(TIntermTyped* o, const TType& t, const TString& m) : TIntermTyped(t), object(o), method(m) { } + virtual TIntermMethod* getAsMethodNode() { return this; } + virtual const TIntermMethod* getAsMethodNode() const { return this; } + virtual const TString& getMethodName() const { return method; } + virtual TIntermTyped* getObject() const { return object; } + virtual void traverse(TIntermTraverser*); +protected: + TIntermTyped* object; + TString method; +}; + +// +// Nodes that correspond to symbols or constants in the source code. +// +class TIntermSymbol : public TIntermTyped { +public: + // if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from + // per process threadPoolAllocator, then it causes increased memory usage per compile + // it is essential to use "symbol = sym" to assign to symbol + TIntermSymbol(int i, const TString& n, const TType& t) + : TIntermTyped(t), id(i), +#ifndef GLSLANG_WEB + flattenSubset(-1), +#endif + constSubtree(nullptr) + { name = n; } + virtual int getId() const { return id; } + virtual void changeId(int i) { id = i; } + virtual const TString& getName() const { return name; } + virtual void traverse(TIntermTraverser*); + virtual TIntermSymbol* getAsSymbolNode() { return this; } + virtual const TIntermSymbol* getAsSymbolNode() const { return this; } + void setConstArray(const TConstUnionArray& c) { constArray = c; } + const TConstUnionArray& getConstArray() const { return constArray; } + void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } + TIntermTyped* getConstSubtree() const { return constSubtree; } +#ifndef GLSLANG_WEB + void setFlattenSubset(int subset) { flattenSubset = subset; } + int getFlattenSubset() const { return flattenSubset; } // -1 means full object +#endif + + // This is meant for cases where a node has already been constructed, and + // later on, it becomes necessary to switch to a different symbol. + virtual void switchId(int newId) { id = newId; } + +protected: + int id; // the unique id of the symbol this node represents +#ifndef GLSLANG_WEB + int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced +#endif + TString name; // the name of the symbol this node represents + TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value + TIntermTyped* constSubtree; +}; + +class TIntermConstantUnion : public TIntermTyped { +public: + TIntermConstantUnion(const TConstUnionArray& ua, const TType& t) : TIntermTyped(t), constArray(ua), literal(false) { } + const TConstUnionArray& getConstArray() const { return constArray; } + virtual TIntermConstantUnion* getAsConstantUnion() { return this; } + virtual const TIntermConstantUnion* getAsConstantUnion() const { return this; } + virtual void traverse(TIntermTraverser*); + virtual TIntermTyped* fold(TOperator, const TIntermTyped*) const; + virtual TIntermTyped* fold(TOperator, const TType&) const; + void setLiteral() { literal = true; } + void setExpression() { literal = false; } + bool isLiteral() const { return literal; } + +protected: + TIntermConstantUnion& operator=(const TIntermConstantUnion&); + + const TConstUnionArray constArray; + bool literal; // true if node represents a literal in the source code +}; + +// Represent the independent aspects of a texturing TOperator +struct TCrackedTextureOp { + bool query; + bool proj; + bool lod; + bool fetch; + bool offset; + bool offsets; + bool gather; + bool grad; + bool subpass; + bool lodClamp; + bool fragMask; +}; + +// +// Intermediate class for node types that hold operators. +// +class TIntermOperator : public TIntermTyped { +public: + virtual TIntermOperator* getAsOperator() { return this; } + virtual const TIntermOperator* getAsOperator() const { return this; } + TOperator getOp() const { return op; } + void setOp(TOperator newOp) { op = newOp; } + bool modifiesState() const; + bool isConstructor() const; + bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; } + bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; } +#ifdef GLSLANG_WEB + bool isImage() const { return false; } + bool isSparseTexture() const { return false; } + bool isImageFootprint() const { return false; } + bool isSparseImage() const { return false; } + bool isSubgroup() const { return false; } +#else + bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; } + bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; } + bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; } + bool isSparseImage() const { return op == EOpSparseImageLoad; } + bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; } +#endif + + void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; } + TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ? + operationPrecision : + type.getQualifier().precision; } + TString getCompleteString() const + { + TString cs = type.getCompleteString(); + if (getOperationPrecision() != type.getQualifier().precision) { + cs += ", operation at "; + cs += GetPrecisionQualifierString(getOperationPrecision()); + } + + return cs; + } + + // Crack the op into the individual dimensions of texturing operation. + void crackTexture(TSampler sampler, TCrackedTextureOp& cracked) const + { + cracked.query = false; + cracked.proj = false; + cracked.lod = false; + cracked.fetch = false; + cracked.offset = false; + cracked.offsets = false; + cracked.gather = false; + cracked.grad = false; + cracked.subpass = false; + cracked.lodClamp = false; + cracked.fragMask = false; + + switch (op) { + case EOpImageQuerySize: + case EOpImageQuerySamples: + case EOpTextureQuerySize: + case EOpTextureQueryLod: + case EOpTextureQueryLevels: + case EOpTextureQuerySamples: + case EOpSparseTexelsResident: + cracked.query = true; + break; + case EOpTexture: + case EOpSparseTexture: + break; + case EOpTextureProj: + cracked.proj = true; + break; + case EOpTextureLod: + case EOpSparseTextureLod: + cracked.lod = true; + break; + case EOpTextureOffset: + case EOpSparseTextureOffset: + cracked.offset = true; + break; + case EOpTextureFetch: + case EOpSparseTextureFetch: + cracked.fetch = true; + if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D) + cracked.lod = true; + break; + case EOpTextureFetchOffset: + case EOpSparseTextureFetchOffset: + cracked.fetch = true; + cracked.offset = true; + if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D) + cracked.lod = true; + break; + case EOpTextureProjOffset: + cracked.offset = true; + cracked.proj = true; + break; + case EOpTextureLodOffset: + case EOpSparseTextureLodOffset: + cracked.offset = true; + cracked.lod = true; + break; + case EOpTextureProjLod: + cracked.lod = true; + cracked.proj = true; + break; + case EOpTextureProjLodOffset: + cracked.offset = true; + cracked.lod = true; + cracked.proj = true; + break; + case EOpTextureGrad: + case EOpSparseTextureGrad: + cracked.grad = true; + break; + case EOpTextureGradOffset: + case EOpSparseTextureGradOffset: + cracked.grad = true; + cracked.offset = true; + break; + case EOpTextureProjGrad: + cracked.grad = true; + cracked.proj = true; + break; + case EOpTextureProjGradOffset: + cracked.grad = true; + cracked.offset = true; + cracked.proj = true; + break; +#ifndef GLSLANG_WEB + case EOpTextureClamp: + case EOpSparseTextureClamp: + cracked.lodClamp = true; + break; + case EOpTextureOffsetClamp: + case EOpSparseTextureOffsetClamp: + cracked.offset = true; + cracked.lodClamp = true; + break; + case EOpTextureGradClamp: + case EOpSparseTextureGradClamp: + cracked.grad = true; + cracked.lodClamp = true; + break; + case EOpTextureGradOffsetClamp: + case EOpSparseTextureGradOffsetClamp: + cracked.grad = true; + cracked.offset = true; + cracked.lodClamp = true; + break; + case EOpTextureGather: + case EOpSparseTextureGather: + cracked.gather = true; + break; + case EOpTextureGatherOffset: + case EOpSparseTextureGatherOffset: + cracked.gather = true; + cracked.offset = true; + break; + case EOpTextureGatherOffsets: + case EOpSparseTextureGatherOffsets: + cracked.gather = true; + cracked.offsets = true; + break; + case EOpTextureGatherLod: + case EOpSparseTextureGatherLod: + cracked.gather = true; + cracked.lod = true; + break; + case EOpTextureGatherLodOffset: + case EOpSparseTextureGatherLodOffset: + cracked.gather = true; + cracked.offset = true; + cracked.lod = true; + break; + case EOpTextureGatherLodOffsets: + case EOpSparseTextureGatherLodOffsets: + cracked.gather = true; + cracked.offsets = true; + cracked.lod = true; + break; + case EOpImageLoadLod: + case EOpImageStoreLod: + case EOpSparseImageLoadLod: + cracked.lod = true; + break; + case EOpFragmentMaskFetch: + cracked.subpass = sampler.dim == EsdSubpass; + cracked.fragMask = true; + break; + case EOpFragmentFetch: + cracked.subpass = sampler.dim == EsdSubpass; + cracked.fragMask = true; + break; + case EOpImageSampleFootprintNV: + break; + case EOpImageSampleFootprintClampNV: + cracked.lodClamp = true; + break; + case EOpImageSampleFootprintLodNV: + cracked.lod = true; + break; + case EOpImageSampleFootprintGradNV: + cracked.grad = true; + break; + case EOpImageSampleFootprintGradClampNV: + cracked.lodClamp = true; + cracked.grad = true; + break; + case EOpSubpassLoad: + case EOpSubpassLoadMS: + cracked.subpass = true; + break; +#endif + default: + break; + } + } + +protected: + TIntermOperator(TOperator o) : TIntermTyped(EbtFloat), op(o), operationPrecision(EpqNone) {} + TIntermOperator(TOperator o, TType& t) : TIntermTyped(t), op(o), operationPrecision(EpqNone) {} + TOperator op; + // The result precision is in the inherited TType, and is usually meant to be both + // the operation precision and the result precision. However, some more complex things, + // like built-in function calls, distinguish between the two, in which case non-EqpNone + // 'operationPrecision' overrides the result precision as far as operation precision + // is concerned. + TPrecisionQualifier operationPrecision; +}; + +// +// Nodes for all the basic binary math operators. +// +class TIntermBinary : public TIntermOperator { +public: + TIntermBinary(TOperator o) : TIntermOperator(o) {} + virtual void traverse(TIntermTraverser*); + virtual void setLeft(TIntermTyped* n) { left = n; } + virtual void setRight(TIntermTyped* n) { right = n; } + virtual TIntermTyped* getLeft() const { return left; } + virtual TIntermTyped* getRight() const { return right; } + virtual TIntermBinary* getAsBinaryNode() { return this; } + virtual const TIntermBinary* getAsBinaryNode() const { return this; } + virtual void updatePrecision(); +protected: + TIntermTyped* left; + TIntermTyped* right; +}; + +// +// Nodes for unary math operators. +// +class TIntermUnary : public TIntermOperator { +public: + TIntermUnary(TOperator o, TType& t) : TIntermOperator(o, t), operand(0) {} + TIntermUnary(TOperator o) : TIntermOperator(o), operand(0) {} + virtual void traverse(TIntermTraverser*); + virtual void setOperand(TIntermTyped* o) { operand = o; } + virtual TIntermTyped* getOperand() { return operand; } + virtual const TIntermTyped* getOperand() const { return operand; } + virtual TIntermUnary* getAsUnaryNode() { return this; } + virtual const TIntermUnary* getAsUnaryNode() const { return this; } + virtual void updatePrecision(); +protected: + TIntermTyped* operand; +}; + +typedef TVector TIntermSequence; +typedef TVector TQualifierList; +// +// Nodes that operate on an arbitrary sized set of children. +// +class TIntermAggregate : public TIntermOperator { +public: + TIntermAggregate() : TIntermOperator(EOpNull), userDefined(false), pragmaTable(nullptr) { } + TIntermAggregate(TOperator o) : TIntermOperator(o), pragmaTable(nullptr) { } + ~TIntermAggregate() { delete pragmaTable; } + virtual TIntermAggregate* getAsAggregate() { return this; } + virtual const TIntermAggregate* getAsAggregate() const { return this; } + virtual void setOperator(TOperator o) { op = o; } + virtual TIntermSequence& getSequence() { return sequence; } + virtual const TIntermSequence& getSequence() const { return sequence; } + virtual void setName(const TString& n) { name = n; } + virtual const TString& getName() const { return name; } + virtual void traverse(TIntermTraverser*); + virtual void setUserDefined() { userDefined = true; } + virtual bool isUserDefined() { return userDefined; } + virtual TQualifierList& getQualifierList() { return qualifier; } + virtual const TQualifierList& getQualifierList() const { return qualifier; } + void setOptimize(bool o) { optimize = o; } + void setDebug(bool d) { debug = d; } + bool getOptimize() const { return optimize; } + bool getDebug() const { return debug; } + void setPragmaTable(const TPragmaTable& pTable); + const TPragmaTable& getPragmaTable() const { return *pragmaTable; } +protected: + TIntermAggregate(const TIntermAggregate&); // disallow copy constructor + TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator + TIntermSequence sequence; + TQualifierList qualifier; + TString name; + bool userDefined; // used for user defined function names + bool optimize; + bool debug; + TPragmaTable* pragmaTable; +}; + +// +// For if tests. +// +class TIntermSelection : public TIntermTyped { +public: + TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB) : + TIntermTyped(EbtVoid), condition(cond), trueBlock(trueB), falseBlock(falseB), + shortCircuit(true), + flatten(false), dontFlatten(false) {} + TIntermSelection(TIntermTyped* cond, TIntermNode* trueB, TIntermNode* falseB, const TType& type) : + TIntermTyped(type), condition(cond), trueBlock(trueB), falseBlock(falseB), + shortCircuit(true), + flatten(false), dontFlatten(false) {} + virtual void traverse(TIntermTraverser*); + virtual TIntermTyped* getCondition() const { return condition; } + virtual TIntermNode* getTrueBlock() const { return trueBlock; } + virtual TIntermNode* getFalseBlock() const { return falseBlock; } + virtual TIntermSelection* getAsSelectionNode() { return this; } + virtual const TIntermSelection* getAsSelectionNode() const { return this; } + + void setNoShortCircuit() { shortCircuit = false; } + bool getShortCircuit() const { return shortCircuit; } + + void setFlatten() { flatten = true; } + void setDontFlatten() { dontFlatten = true; } + bool getFlatten() const { return flatten; } + bool getDontFlatten() const { return dontFlatten; } + +protected: + TIntermTyped* condition; + TIntermNode* trueBlock; + TIntermNode* falseBlock; + bool shortCircuit; // normally all if-then-else and all GLSL ?: short-circuit, but HLSL ?: does not + bool flatten; // true if flatten requested + bool dontFlatten; // true if requested to not flatten +}; + +// +// For switch statements. Designed use is that a switch will have sequence of nodes +// that are either case/default nodes or a *single* node that represents all the code +// in between (if any) consecutive case/defaults. So, a traversal need only deal with +// 0 or 1 nodes per case/default statement. +// +class TIntermSwitch : public TIntermNode { +public: + TIntermSwitch(TIntermTyped* cond, TIntermAggregate* b) : condition(cond), body(b), + flatten(false), dontFlatten(false) {} + virtual void traverse(TIntermTraverser*); + virtual TIntermNode* getCondition() const { return condition; } + virtual TIntermAggregate* getBody() const { return body; } + virtual TIntermSwitch* getAsSwitchNode() { return this; } + virtual const TIntermSwitch* getAsSwitchNode() const { return this; } + + void setFlatten() { flatten = true; } + void setDontFlatten() { dontFlatten = true; } + bool getFlatten() const { return flatten; } + bool getDontFlatten() const { return dontFlatten; } + +protected: + TIntermTyped* condition; + TIntermAggregate* body; + bool flatten; // true if flatten requested + bool dontFlatten; // true if requested to not flatten +}; + +enum TVisit +{ + EvPreVisit, + EvInVisit, + EvPostVisit +}; + +// +// For traversing the tree. User should derive from this, +// put their traversal specific data in it, and then pass +// it to a Traverse method. +// +// When using this, just fill in the methods for nodes you want visited. +// Return false from a pre-visit to skip visiting that node's subtree. +// +// Explicitly set postVisit to true if you want post visiting, otherwise, +// filled in methods will only be called at pre-visit time (before processing +// the subtree). Similarly for inVisit for in-order visiting of nodes with +// multiple children. +// +// If you only want post-visits, explicitly turn off preVisit (and inVisit) +// and turn on postVisit. +// +// In general, for the visit*() methods, return true from interior nodes +// to have the traversal continue on to children. +// +// If you process children yourself, or don't want them processed, return false. +// +class TIntermTraverser { +public: + POOL_ALLOCATOR_NEW_DELETE(glslang::GetThreadPoolAllocator()) + TIntermTraverser(bool preVisit = true, bool inVisit = false, bool postVisit = false, bool rightToLeft = false) : + preVisit(preVisit), + inVisit(inVisit), + postVisit(postVisit), + rightToLeft(rightToLeft), + depth(0), + maxDepth(0) { } + virtual ~TIntermTraverser() { } + + virtual void visitSymbol(TIntermSymbol*) { } + virtual void visitConstantUnion(TIntermConstantUnion*) { } + virtual bool visitBinary(TVisit, TIntermBinary*) { return true; } + virtual bool visitUnary(TVisit, TIntermUnary*) { return true; } + virtual bool visitSelection(TVisit, TIntermSelection*) { return true; } + virtual bool visitAggregate(TVisit, TIntermAggregate*) { return true; } + virtual bool visitLoop(TVisit, TIntermLoop*) { return true; } + virtual bool visitBranch(TVisit, TIntermBranch*) { return true; } + virtual bool visitSwitch(TVisit, TIntermSwitch*) { return true; } + + int getMaxDepth() const { return maxDepth; } + + void incrementDepth(TIntermNode *current) + { + depth++; + maxDepth = (std::max)(maxDepth, depth); + path.push_back(current); + } + + void decrementDepth() + { + depth--; + path.pop_back(); + } + + TIntermNode *getParentNode() + { + return path.size() == 0 ? NULL : path.back(); + } + + const bool preVisit; + const bool inVisit; + const bool postVisit; + const bool rightToLeft; + +protected: + TIntermTraverser& operator=(TIntermTraverser&); + + int depth; + int maxDepth; + + // All the nodes from root to the current node's parent during traversing. + TVector path; +}; + +// KHR_vulkan_glsl says "Two arrays sized with specialization constants are the same type only if +// sized with the same symbol, involving no operations" +inline bool SameSpecializationConstants(TIntermTyped* node1, TIntermTyped* node2) +{ + return node1->getAsSymbolNode() && node2->getAsSymbolNode() && + node1->getAsSymbolNode()->getId() == node2->getAsSymbolNode()->getId(); +} + +} // end namespace glslang + +#endif // __INTERMEDIATE_H diff --git a/src/third_party/glslang/glslang/MachineIndependent/Constant.cpp b/src/third_party/glslang/glslang/MachineIndependent/Constant.cpp new file mode 100644 index 0000000..e21cf42 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Constant.cpp @@ -0,0 +1,1428 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2018-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "localintermediate.h" +#include +#include +#include +#include + +namespace { + +using namespace glslang; + +typedef union { + double d; + int i[2]; +} DoubleIntUnion; + +// Some helper functions + +bool isNan(double x) +{ + DoubleIntUnion u; + // tough to find a platform independent library function, do it directly + u.d = x; + int bitPatternL = u.i[0]; + int bitPatternH = u.i[1]; + return (bitPatternH & 0x7ff80000) == 0x7ff80000 && + ((bitPatternH & 0xFFFFF) != 0 || bitPatternL != 0); +} + +bool isInf(double x) +{ + DoubleIntUnion u; + // tough to find a platform independent library function, do it directly + u.d = x; + int bitPatternL = u.i[0]; + int bitPatternH = u.i[1]; + return (bitPatternH & 0x7ff00000) == 0x7ff00000 && + (bitPatternH & 0xFFFFF) == 0 && bitPatternL == 0; +} + +const double pi = 3.1415926535897932384626433832795; + +} // end anonymous namespace + + +namespace glslang { + +// +// The fold functions see if an operation on a constant can be done in place, +// without generating run-time code. +// +// Returns the node to keep using, which may or may not be the node passed in. +// +// Note: As of version 1.2, all constant operations must be folded. It is +// not opportunistic, but rather a semantic requirement. +// + +// +// Do folding between a pair of nodes. +// 'this' is the left-hand operand and 'rightConstantNode' is the right-hand operand. +// +// Returns a new node representing the result. +// +TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* rightConstantNode) const +{ + // For most cases, the return type matches the argument type, so set that + // up and just code to exceptions below. + TType returnType; + returnType.shallowCopy(getType()); + + // + // A pair of nodes is to be folded together + // + + const TIntermConstantUnion *rightNode = rightConstantNode->getAsConstantUnion(); + TConstUnionArray leftUnionArray = getConstArray(); + TConstUnionArray rightUnionArray = rightNode->getConstArray(); + + // Figure out the size of the result + int newComps; + int constComps; + switch(op) { + case EOpMatrixTimesMatrix: + newComps = rightNode->getMatrixCols() * getMatrixRows(); + break; + case EOpMatrixTimesVector: + newComps = getMatrixRows(); + break; + case EOpVectorTimesMatrix: + newComps = rightNode->getMatrixCols(); + break; + default: + newComps = getType().computeNumComponents(); + constComps = rightConstantNode->getType().computeNumComponents(); + if (constComps == 1 && newComps > 1) { + // for a case like vec4 f = vec4(2,3,4,5) + 1.2; + TConstUnionArray smearedArray(newComps, rightNode->getConstArray()[0]); + rightUnionArray = smearedArray; + } else if (constComps > 1 && newComps == 1) { + // for a case like vec4 f = 1.2 + vec4(2,3,4,5); + newComps = constComps; + rightUnionArray = rightNode->getConstArray(); + TConstUnionArray smearedArray(newComps, getConstArray()[0]); + leftUnionArray = smearedArray; + returnType.shallowCopy(rightNode->getType()); + } + break; + } + + TConstUnionArray newConstArray(newComps); + TType constBool(EbtBool, EvqConst); + + switch(op) { + case EOpAdd: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] + rightUnionArray[i]; + break; + case EOpSub: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] - rightUnionArray[i]; + break; + + case EOpMul: + case EOpVectorTimesScalar: + case EOpMatrixTimesScalar: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] * rightUnionArray[i]; + break; + case EOpMatrixTimesMatrix: + for (int row = 0; row < getMatrixRows(); row++) { + for (int column = 0; column < rightNode->getMatrixCols(); column++) { + double sum = 0.0f; + for (int i = 0; i < rightNode->getMatrixRows(); i++) + sum += leftUnionArray[i * getMatrixRows() + row].getDConst() * rightUnionArray[column * rightNode->getMatrixRows() + i].getDConst(); + newConstArray[column * getMatrixRows() + row].setDConst(sum); + } + } + returnType.shallowCopy(TType(getType().getBasicType(), EvqConst, 0, rightNode->getMatrixCols(), getMatrixRows())); + break; + case EOpDiv: + for (int i = 0; i < newComps; i++) { + switch (getType().getBasicType()) { + case EbtDouble: + case EbtFloat: + case EbtFloat16: + if (rightUnionArray[i].getDConst() != 0.0) + newConstArray[i].setDConst(leftUnionArray[i].getDConst() / rightUnionArray[i].getDConst()); + else if (leftUnionArray[i].getDConst() > 0.0) + newConstArray[i].setDConst((double)INFINITY); + else if (leftUnionArray[i].getDConst() < 0.0) + newConstArray[i].setDConst(-(double)INFINITY); + else + newConstArray[i].setDConst((double)NAN); + break; + + case EbtInt: + if (rightUnionArray[i] == 0) + newConstArray[i].setIConst(0x7FFFFFFF); + else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll) + newConstArray[i].setIConst((int)-0x80000000ll); + else + newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst()); + break; + + case EbtUint: + if (rightUnionArray[i] == 0u) + newConstArray[i].setUConst(0xFFFFFFFFu); + else + newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst()); + break; + +#ifndef GLSLANG_WEB + case EbtInt8: + if (rightUnionArray[i] == (signed char)0) + newConstArray[i].setI8Const((signed char)0x7F); + else if (rightUnionArray[i].getI8Const() == (signed char)-1 && leftUnionArray[i].getI8Const() == (signed char)-0x80) + newConstArray[i].setI8Const((signed char)-0x80); + else + newConstArray[i].setI8Const(leftUnionArray[i].getI8Const() / rightUnionArray[i].getI8Const()); + break; + + case EbtUint8: + if (rightUnionArray[i] == (unsigned char)0u) + newConstArray[i].setU8Const((unsigned char)0xFFu); + else + newConstArray[i].setU8Const(leftUnionArray[i].getU8Const() / rightUnionArray[i].getU8Const()); + break; + + case EbtInt16: + if (rightUnionArray[i] == (signed short)0) + newConstArray[i].setI16Const((signed short)0x7FFF); + else if (rightUnionArray[i].getI16Const() == (signed short)-1 && leftUnionArray[i].getI16Const() == (signed short)-0x8000) + newConstArray[i].setI16Const((signed short)-0x8000); + else + newConstArray[i].setI16Const(leftUnionArray[i].getI16Const() / rightUnionArray[i].getI16Const()); + break; + + case EbtUint16: + if (rightUnionArray[i] == (unsigned short)0u) + newConstArray[i].setU16Const((unsigned short)0xFFFFu); + else + newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const()); + break; + + case EbtInt64: + if (rightUnionArray[i] == 0ll) + newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll); + else if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == (long long)-0x8000000000000000ll) + newConstArray[i].setI64Const((long long)-0x8000000000000000ll); + else + newConstArray[i].setI64Const(leftUnionArray[i].getI64Const() / rightUnionArray[i].getI64Const()); + break; + + case EbtUint64: + if (rightUnionArray[i] == 0ull) + newConstArray[i].setU64Const(0xFFFFFFFFFFFFFFFFull); + else + newConstArray[i].setU64Const(leftUnionArray[i].getU64Const() / rightUnionArray[i].getU64Const()); + break; + default: + return 0; +#endif + } + } + break; + + case EOpMatrixTimesVector: + for (int i = 0; i < getMatrixRows(); i++) { + double sum = 0.0f; + for (int j = 0; j < rightNode->getVectorSize(); j++) { + sum += leftUnionArray[j*getMatrixRows() + i].getDConst() * rightUnionArray[j].getDConst(); + } + newConstArray[i].setDConst(sum); + } + + returnType.shallowCopy(TType(getBasicType(), EvqConst, getMatrixRows())); + break; + + case EOpVectorTimesMatrix: + for (int i = 0; i < rightNode->getMatrixCols(); i++) { + double sum = 0.0f; + for (int j = 0; j < getVectorSize(); j++) + sum += leftUnionArray[j].getDConst() * rightUnionArray[i*rightNode->getMatrixRows() + j].getDConst(); + newConstArray[i].setDConst(sum); + } + + returnType.shallowCopy(TType(getBasicType(), EvqConst, rightNode->getMatrixCols())); + break; + + case EOpMod: + for (int i = 0; i < newComps; i++) { + if (rightUnionArray[i] == 0) + newConstArray[i] = leftUnionArray[i]; + else { + switch (getType().getBasicType()) { + case EbtInt: + if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == INT_MIN) { + newConstArray[i].setIConst(0); + break; + } else goto modulo_default; +#ifndef GLSLANG_WEB + case EbtInt64: + if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) { + newConstArray[i].setI64Const(0); + break; + } else goto modulo_default; + case EbtInt16: + if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) { + newConstArray[i].setIConst(0); + break; + } else goto modulo_default; +#endif + default: + modulo_default: + newConstArray[i] = leftUnionArray[i] % rightUnionArray[i]; + } + } + } + break; + + case EOpRightShift: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] >> rightUnionArray[i]; + break; + + case EOpLeftShift: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] << rightUnionArray[i]; + break; + + case EOpAnd: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] & rightUnionArray[i]; + break; + case EOpInclusiveOr: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] | rightUnionArray[i]; + break; + case EOpExclusiveOr: + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] ^ rightUnionArray[i]; + break; + + case EOpLogicalAnd: // this code is written for possible future use, will not get executed currently + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] && rightUnionArray[i]; + break; + + case EOpLogicalOr: // this code is written for possible future use, will not get executed currently + for (int i = 0; i < newComps; i++) + newConstArray[i] = leftUnionArray[i] || rightUnionArray[i]; + break; + + case EOpLogicalXor: + for (int i = 0; i < newComps; i++) { + switch (getType().getBasicType()) { + case EbtBool: newConstArray[i].setBConst((leftUnionArray[i] == rightUnionArray[i]) ? false : true); break; + default: assert(false && "Default missing"); + } + } + break; + + case EOpLessThan: + newConstArray[0].setBConst(leftUnionArray[0] < rightUnionArray[0]); + returnType.shallowCopy(constBool); + break; + case EOpGreaterThan: + newConstArray[0].setBConst(leftUnionArray[0] > rightUnionArray[0]); + returnType.shallowCopy(constBool); + break; + case EOpLessThanEqual: + newConstArray[0].setBConst(! (leftUnionArray[0] > rightUnionArray[0])); + returnType.shallowCopy(constBool); + break; + case EOpGreaterThanEqual: + newConstArray[0].setBConst(! (leftUnionArray[0] < rightUnionArray[0])); + returnType.shallowCopy(constBool); + break; + case EOpEqual: + newConstArray[0].setBConst(rightNode->getConstArray() == leftUnionArray); + returnType.shallowCopy(constBool); + break; + case EOpNotEqual: + newConstArray[0].setBConst(rightNode->getConstArray() != leftUnionArray); + returnType.shallowCopy(constBool); + break; + + default: + return 0; + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType); + newNode->setLoc(getLoc()); + + return newNode; +} + +// +// Do single unary node folding +// +// Returns a new node representing the result. +// +TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType) const +{ + // First, size the result, which is mostly the same as the argument's size, + // but not always, and classify what is componentwise. + // Also, eliminate cases that can't be compile-time constant. + int resultSize; + bool componentWise = true; + + int objectSize = getType().computeNumComponents(); + switch (op) { + case EOpDeterminant: + case EOpAny: + case EOpAll: + case EOpLength: + componentWise = false; + resultSize = 1; + break; + + case EOpEmitStreamVertex: + case EOpEndStreamPrimitive: + // These don't fold + return nullptr; + + case EOpPackSnorm2x16: + case EOpPackUnorm2x16: + case EOpPackHalf2x16: + componentWise = false; + resultSize = 1; + break; + + case EOpUnpackSnorm2x16: + case EOpUnpackUnorm2x16: + case EOpUnpackHalf2x16: + componentWise = false; + resultSize = 2; + break; + + case EOpPack16: + case EOpPack32: + case EOpPack64: + case EOpUnpack32: + case EOpUnpack16: + case EOpUnpack8: + case EOpNormalize: + componentWise = false; + resultSize = objectSize; + break; + + default: + resultSize = objectSize; + break; + } + + // Set up for processing + TConstUnionArray newConstArray(resultSize); + const TConstUnionArray& unionArray = getConstArray(); + + // Process non-component-wise operations + switch (op) { + case EOpLength: + case EOpNormalize: + { + double sum = 0; + for (int i = 0; i < objectSize; i++) + sum += unionArray[i].getDConst() * unionArray[i].getDConst(); + double length = sqrt(sum); + if (op == EOpLength) + newConstArray[0].setDConst(length); + else { + for (int i = 0; i < objectSize; i++) + newConstArray[i].setDConst(unionArray[i].getDConst() / length); + } + break; + } + + case EOpAny: + { + bool result = false; + for (int i = 0; i < objectSize; i++) { + if (unionArray[i].getBConst()) + result = true; + } + newConstArray[0].setBConst(result); + break; + } + case EOpAll: + { + bool result = true; + for (int i = 0; i < objectSize; i++) { + if (! unionArray[i].getBConst()) + result = false; + } + newConstArray[0].setBConst(result); + break; + } + + case EOpPackSnorm2x16: + case EOpPackUnorm2x16: + case EOpPackHalf2x16: + case EOpPack16: + case EOpPack32: + case EOpPack64: + case EOpUnpack32: + case EOpUnpack16: + case EOpUnpack8: + + case EOpUnpackSnorm2x16: + case EOpUnpackUnorm2x16: + case EOpUnpackHalf2x16: + + case EOpDeterminant: + case EOpMatrixInverse: + case EOpTranspose: + return nullptr; + + default: + assert(componentWise); + break; + } + + // Turn off the componentwise loop + if (! componentWise) + objectSize = 0; + + // Process component-wise operations + for (int i = 0; i < objectSize; i++) { + switch (op) { + case EOpNegative: + switch (getType().getBasicType()) { + case EbtDouble: + case EbtFloat16: + case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break; + case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break; + case EbtUint: newConstArray[i].setUConst(static_cast(-static_cast(unionArray[i].getUConst()))); break; +#ifndef GLSLANG_WEB + case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break; + case EbtUint8: newConstArray[i].setU8Const(static_cast(-static_cast(unionArray[i].getU8Const()))); break; + case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break; + case EbtUint16:newConstArray[i].setU16Const(static_cast(-static_cast(unionArray[i].getU16Const()))); break; + case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break; + case EbtUint64: newConstArray[i].setU64Const(static_cast(-static_cast(unionArray[i].getU64Const()))); break; +#endif + default: + return nullptr; + } + break; + case EOpLogicalNot: + case EOpVectorLogicalNot: + switch (getType().getBasicType()) { + case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break; + default: + return nullptr; + } + break; + case EOpBitwiseNot: + newConstArray[i] = ~unionArray[i]; + break; + case EOpRadians: + newConstArray[i].setDConst(unionArray[i].getDConst() * pi / 180.0); + break; + case EOpDegrees: + newConstArray[i].setDConst(unionArray[i].getDConst() * 180.0 / pi); + break; + case EOpSin: + newConstArray[i].setDConst(sin(unionArray[i].getDConst())); + break; + case EOpCos: + newConstArray[i].setDConst(cos(unionArray[i].getDConst())); + break; + case EOpTan: + newConstArray[i].setDConst(tan(unionArray[i].getDConst())); + break; + case EOpAsin: + newConstArray[i].setDConst(asin(unionArray[i].getDConst())); + break; + case EOpAcos: + newConstArray[i].setDConst(acos(unionArray[i].getDConst())); + break; + case EOpAtan: + newConstArray[i].setDConst(atan(unionArray[i].getDConst())); + break; + + case EOpDPdx: + case EOpDPdy: + case EOpFwidth: + case EOpDPdxFine: + case EOpDPdyFine: + case EOpFwidthFine: + case EOpDPdxCoarse: + case EOpDPdyCoarse: + case EOpFwidthCoarse: + // The derivatives are all mandated to create a constant 0. + newConstArray[i].setDConst(0.0); + break; + + case EOpExp: + newConstArray[i].setDConst(exp(unionArray[i].getDConst())); + break; + case EOpLog: + newConstArray[i].setDConst(log(unionArray[i].getDConst())); + break; + case EOpExp2: + { + const double inv_log2_e = 0.69314718055994530941723212145818; + newConstArray[i].setDConst(exp(unionArray[i].getDConst() * inv_log2_e)); + break; + } + case EOpLog2: + { + const double log2_e = 1.4426950408889634073599246810019; + newConstArray[i].setDConst(log2_e * log(unionArray[i].getDConst())); + break; + } + case EOpSqrt: + newConstArray[i].setDConst(sqrt(unionArray[i].getDConst())); + break; + case EOpInverseSqrt: + newConstArray[i].setDConst(1.0 / sqrt(unionArray[i].getDConst())); + break; + + case EOpAbs: + if (unionArray[i].getType() == EbtDouble) + newConstArray[i].setDConst(fabs(unionArray[i].getDConst())); + else if (unionArray[i].getType() == EbtInt) + newConstArray[i].setIConst(abs(unionArray[i].getIConst())); + else + newConstArray[i] = unionArray[i]; + break; + case EOpSign: + #define SIGN(X) (X == 0 ? 0 : (X < 0 ? -1 : 1)) + if (unionArray[i].getType() == EbtDouble) + newConstArray[i].setDConst(SIGN(unionArray[i].getDConst())); + else + newConstArray[i].setIConst(SIGN(unionArray[i].getIConst())); + break; + case EOpFloor: + newConstArray[i].setDConst(floor(unionArray[i].getDConst())); + break; + case EOpTrunc: + if (unionArray[i].getDConst() > 0) + newConstArray[i].setDConst(floor(unionArray[i].getDConst())); + else + newConstArray[i].setDConst(ceil(unionArray[i].getDConst())); + break; + case EOpRound: + newConstArray[i].setDConst(floor(0.5 + unionArray[i].getDConst())); + break; + case EOpRoundEven: + { + double flr = floor(unionArray[i].getDConst()); + bool even = flr / 2.0 == floor(flr / 2.0); + double rounded = even ? ceil(unionArray[i].getDConst() - 0.5) : floor(unionArray[i].getDConst() + 0.5); + newConstArray[i].setDConst(rounded); + break; + } + case EOpCeil: + newConstArray[i].setDConst(ceil(unionArray[i].getDConst())); + break; + case EOpFract: + { + double x = unionArray[i].getDConst(); + newConstArray[i].setDConst(x - floor(x)); + break; + } + + case EOpIsNan: + { + newConstArray[i].setBConst(isNan(unionArray[i].getDConst())); + break; + } + case EOpIsInf: + { + newConstArray[i].setBConst(isInf(unionArray[i].getDConst())); + break; + } + + case EOpConvIntToBool: + newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break; + case EOpConvUintToBool: + newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break; + case EOpConvBoolToInt: + newConstArray[i].setIConst(unionArray[i].getBConst()); break; + case EOpConvBoolToUint: + newConstArray[i].setUConst(unionArray[i].getBConst()); break; + case EOpConvIntToUint: + newConstArray[i].setUConst(unionArray[i].getIConst()); break; + case EOpConvUintToInt: + newConstArray[i].setIConst(unionArray[i].getUConst()); break; + + case EOpConvFloatToBool: + case EOpConvDoubleToBool: + newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break; + + case EOpConvBoolToFloat: + case EOpConvBoolToDouble: + newConstArray[i].setDConst(unionArray[i].getBConst()); break; + + case EOpConvIntToFloat: + case EOpConvIntToDouble: + newConstArray[i].setDConst(unionArray[i].getIConst()); break; + + case EOpConvUintToFloat: + case EOpConvUintToDouble: + newConstArray[i].setDConst(unionArray[i].getUConst()); break; + + case EOpConvDoubleToFloat: + case EOpConvFloatToDouble: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + + case EOpConvFloatToUint: + case EOpConvDoubleToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getDConst())); break; + + case EOpConvFloatToInt: + case EOpConvDoubleToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getDConst())); break; + +#ifndef GLSLANG_WEB + case EOpConvInt8ToBool: + newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break; + case EOpConvUint8ToBool: + newConstArray[i].setBConst(unionArray[i].getU8Const() != 0); break; + case EOpConvInt16ToBool: + newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break; + case EOpConvUint16ToBool: + newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break; + case EOpConvInt64ToBool: + newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break; + case EOpConvUint64ToBool: + newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break; + case EOpConvFloat16ToBool: + newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break; + + case EOpConvBoolToInt8: + newConstArray[i].setI8Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint8: + newConstArray[i].setU8Const(unionArray[i].getBConst()); break; + case EOpConvBoolToInt16: + newConstArray[i].setI16Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint16: + newConstArray[i].setU16Const(unionArray[i].getBConst()); break; + case EOpConvBoolToInt64: + newConstArray[i].setI64Const(unionArray[i].getBConst()); break; + case EOpConvBoolToUint64: + newConstArray[i].setU64Const(unionArray[i].getBConst()); break; + case EOpConvBoolToFloat16: + newConstArray[i].setDConst(unionArray[i].getBConst()); break; + + case EOpConvInt8ToInt16: + newConstArray[i].setI16Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToInt: + newConstArray[i].setIConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToInt64: + newConstArray[i].setI64Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint8: + newConstArray[i].setU8Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint16: + newConstArray[i].setU16Const(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint: + newConstArray[i].setUConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI8Const()); break; + case EOpConvUint8ToInt8: + newConstArray[i].setI8Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt16: + newConstArray[i].setI16Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt: + newConstArray[i].setIConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint16: + newConstArray[i].setU16Const(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint: + newConstArray[i].setUConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToUint64: + newConstArray[i].setU64Const(unionArray[i].getU8Const()); break; + case EOpConvInt8ToFloat16: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToFloat: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvInt8ToDouble: + newConstArray[i].setDConst(unionArray[i].getI8Const()); break; + case EOpConvUint8ToFloat16: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToFloat: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + case EOpConvUint8ToDouble: + newConstArray[i].setDConst(unionArray[i].getU8Const()); break; + + case EOpConvInt16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getI16Const())); break; + case EOpConvInt16ToInt: + newConstArray[i].setIConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToInt64: + newConstArray[i].setI64Const(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getI16Const())); break; + case EOpConvInt16ToUint16: + newConstArray[i].setU16Const(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint: + newConstArray[i].setUConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI16Const()); break; + case EOpConvUint16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getU16Const())); break; + case EOpConvUint16ToInt16: + newConstArray[i].setI16Const(unionArray[i].getU16Const()); break; + case EOpConvUint16ToInt: + newConstArray[i].setIConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU16Const()); break; + case EOpConvUint16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getU16Const())); break; + + case EOpConvUint16ToUint: + newConstArray[i].setUConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToUint64: + newConstArray[i].setU64Const(unionArray[i].getU16Const()); break; + case EOpConvInt16ToFloat16: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToFloat: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvInt16ToDouble: + newConstArray[i].setDConst(unionArray[i].getI16Const()); break; + case EOpConvUint16ToFloat16: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToFloat: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + case EOpConvUint16ToDouble: + newConstArray[i].setDConst(unionArray[i].getU16Const()); break; + + case EOpConvIntToInt8: + newConstArray[i].setI8Const((signed char)unionArray[i].getIConst()); break; + case EOpConvIntToInt16: + newConstArray[i].setI16Const((signed short)unionArray[i].getIConst()); break; + case EOpConvIntToInt64: + newConstArray[i].setI64Const(unionArray[i].getIConst()); break; + case EOpConvIntToUint8: + newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break; + case EOpConvIntToUint16: + newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break; + case EOpConvIntToUint64: + newConstArray[i].setU64Const(unionArray[i].getIConst()); break; + + case EOpConvUintToInt8: + newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break; + case EOpConvUintToInt16: + newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break; + case EOpConvUintToInt64: + newConstArray[i].setI64Const(unionArray[i].getUConst()); break; + case EOpConvUintToUint8: + newConstArray[i].setU8Const((unsigned char)unionArray[i].getUConst()); break; + case EOpConvUintToUint16: + newConstArray[i].setU16Const((unsigned short)unionArray[i].getUConst()); break; + case EOpConvUintToUint64: + newConstArray[i].setU64Const(unionArray[i].getUConst()); break; + case EOpConvIntToFloat16: + newConstArray[i].setDConst(unionArray[i].getIConst()); break; + case EOpConvUintToFloat16: + newConstArray[i].setDConst(unionArray[i].getUConst()); break; + case EOpConvInt64ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToUint64: + newConstArray[i].setU64Const(unionArray[i].getI64Const()); break; + case EOpConvUint64ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToInt64: + newConstArray[i].setI64Const(unionArray[i].getU64Const()); break; + case EOpConvUint64ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvInt64ToFloat16: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToFloat: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvInt64ToDouble: + newConstArray[i].setDConst(static_cast(unionArray[i].getI64Const())); break; + case EOpConvUint64ToFloat16: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToFloat: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvUint64ToDouble: + newConstArray[i].setDConst(static_cast(unionArray[i].getU64Const())); break; + case EOpConvFloat16ToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt: + newConstArray[i].setIConst(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint: + newConstArray[i].setUConst(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloat16ToFloat: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvFloat16ToDouble: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvFloatToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvFloatToFloat16: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvDoubleToInt8: + newConstArray[i].setI8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToInt16: + newConstArray[i].setI16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToInt64: + newConstArray[i].setI64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint8: + newConstArray[i].setU8Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint16: + newConstArray[i].setU16Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToUint64: + newConstArray[i].setU64Const(static_cast(unionArray[i].getDConst())); break; + case EOpConvDoubleToFloat16: + newConstArray[i].setDConst(unionArray[i].getDConst()); break; + case EOpConvPtrToUint64: + case EOpConvUint64ToPtr: + case EOpConstructReference: + newConstArray[i].setU64Const(unionArray[i].getU64Const()); break; +#endif + + // TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out + + case EOpSinh: + case EOpCosh: + case EOpTanh: + case EOpAsinh: + case EOpAcosh: + case EOpAtanh: + + case EOpFloatBitsToInt: + case EOpFloatBitsToUint: + case EOpIntBitsToFloat: + case EOpUintBitsToFloat: + case EOpDoubleBitsToInt64: + case EOpDoubleBitsToUint64: + case EOpInt64BitsToDouble: + case EOpUint64BitsToDouble: + case EOpFloat16BitsToInt16: + case EOpFloat16BitsToUint16: + case EOpInt16BitsToFloat16: + case EOpUint16BitsToFloat16: + default: + return nullptr; + } + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, returnType); + newNode->getWritableType().getQualifier().storage = EvqConst; + newNode->setLoc(getLoc()); + + return newNode; +} + +// +// Do constant folding for an aggregate node that has all its children +// as constants and an operator that requires constant folding. +// +TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode) +{ + if (aggrNode == nullptr) + return aggrNode; + + if (! areAllChildConst(aggrNode)) + return aggrNode; + + if (aggrNode->isConstructor()) + return foldConstructor(aggrNode); + + TIntermSequence& children = aggrNode->getSequence(); + + // First, see if this is an operation to constant fold, kick out if not, + // see what size the result is if so. + + bool componentwise = false; // will also say componentwise if a scalar argument gets repeated to make per-component results + int objectSize; + switch (aggrNode->getOp()) { + case EOpAtan: + case EOpPow: + case EOpMin: + case EOpMax: + case EOpMix: + case EOpMod: + case EOpClamp: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpVectorEqual: + case EOpVectorNotEqual: + componentwise = true; + objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + break; + case EOpCross: + case EOpReflect: + case EOpRefract: + case EOpFaceForward: + objectSize = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + break; + case EOpDistance: + case EOpDot: + objectSize = 1; + break; + case EOpOuterProduct: + objectSize = children[0]->getAsTyped()->getType().getVectorSize() * + children[1]->getAsTyped()->getType().getVectorSize(); + break; + case EOpStep: + componentwise = true; + objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(), + children[1]->getAsTyped()->getType().getVectorSize()); + break; + case EOpSmoothStep: + componentwise = true; + objectSize = std::max(children[0]->getAsTyped()->getType().getVectorSize(), + children[2]->getAsTyped()->getType().getVectorSize()); + break; + default: + return aggrNode; + } + TConstUnionArray newConstArray(objectSize); + + TVector childConstUnions; + for (unsigned int arg = 0; arg < children.size(); ++arg) + childConstUnions.push_back(children[arg]->getAsConstantUnion()->getConstArray()); + + if (componentwise) { + for (int comp = 0; comp < objectSize; comp++) { + + // some arguments are scalars instead of matching vectors; simulate a smear + int arg0comp = std::min(comp, children[0]->getAsTyped()->getType().getVectorSize() - 1); + int arg1comp = 0; + if (children.size() > 1) + arg1comp = std::min(comp, children[1]->getAsTyped()->getType().getVectorSize() - 1); + int arg2comp = 0; + if (children.size() > 2) + arg2comp = std::min(comp, children[2]->getAsTyped()->getType().getVectorSize() - 1); + + switch (aggrNode->getOp()) { + case EOpAtan: + newConstArray[comp].setDConst(atan2(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EOpPow: + newConstArray[comp].setDConst(pow(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EOpMod: + { + double arg0 = childConstUnions[0][arg0comp].getDConst(); + double arg1 = childConstUnions[1][arg1comp].getDConst(); + double result = arg0 - arg1 * floor(arg0 / arg1); + newConstArray[comp].setDConst(result); + break; + } + case EOpMin: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::min(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::min(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpMax: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpClamp: + switch(children[0]->getAsTyped()->getBasicType()) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()), + childConstUnions[2][arg2comp].getDConst())); + break; + case EbtUint: + newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()), + childConstUnions[2][arg2comp].getUConst())); + break; +#ifndef GLSLANG_WEB + case EbtInt8: + newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()), + childConstUnions[2][arg2comp].getI8Const())); + break; + case EbtUint8: + newConstArray[comp].setU8Const(std::min(std::max(childConstUnions[0][arg0comp].getU8Const(), childConstUnions[1][arg1comp].getU8Const()), + childConstUnions[2][arg2comp].getU8Const())); + break; + case EbtInt16: + newConstArray[comp].setI16Const(std::min(std::max(childConstUnions[0][arg0comp].getI16Const(), childConstUnions[1][arg1comp].getI16Const()), + childConstUnions[2][arg2comp].getI16Const())); + break; + case EbtUint16: + newConstArray[comp].setU16Const(std::min(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()), + childConstUnions[2][arg2comp].getU16Const())); + break; + case EbtInt: + newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()), + childConstUnions[2][arg2comp].getIConst())); + break; + case EbtInt64: + newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()), + childConstUnions[2][arg2comp].getI64Const())); + break; + case EbtUint64: + newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()), + childConstUnions[2][arg2comp].getU64Const())); + break; +#endif + default: assert(false && "Default missing"); + } + break; + case EOpLessThan: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp]); + break; + case EOpGreaterThan: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp]); + break; + case EOpLessThanEqual: + newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] > childConstUnions[1][arg1comp])); + break; + case EOpGreaterThanEqual: + newConstArray[comp].setBConst(! (childConstUnions[0][arg0comp] < childConstUnions[1][arg1comp])); + break; + case EOpVectorEqual: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] == childConstUnions[1][arg1comp]); + break; + case EOpVectorNotEqual: + newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]); + break; + case EOpMix: + if (!children[0]->getAsTyped()->isFloatingDomain()) + return aggrNode; + if (children[2]->getAsTyped()->getBasicType() == EbtBool) { + newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() + ? childConstUnions[1][arg1comp].getDConst() + : childConstUnions[0][arg0comp].getDConst()); + } else { + newConstArray[comp].setDConst( + childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) + + childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst()); + } + break; + case EOpStep: + newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0); + break; + case EOpSmoothStep: + { + double t = (childConstUnions[2][arg2comp].getDConst() - childConstUnions[0][arg0comp].getDConst()) / + (childConstUnions[1][arg1comp].getDConst() - childConstUnions[0][arg0comp].getDConst()); + if (t < 0.0) + t = 0.0; + if (t > 1.0) + t = 1.0; + newConstArray[comp].setDConst(t * t * (3.0 - 2.0 * t)); + break; + } + default: + return aggrNode; + } + } + } else { + // Non-componentwise... + + int numComps = children[0]->getAsConstantUnion()->getType().computeNumComponents(); + double dot; + + switch (aggrNode->getOp()) { + case EOpDistance: + { + double sum = 0.0; + for (int comp = 0; comp < numComps; ++comp) { + double diff = childConstUnions[1][comp].getDConst() - childConstUnions[0][comp].getDConst(); + sum += diff * diff; + } + newConstArray[0].setDConst(sqrt(sum)); + break; + } + case EOpDot: + newConstArray[0].setDConst(childConstUnions[0].dot(childConstUnions[1])); + break; + case EOpCross: + newConstArray[0] = childConstUnions[0][1] * childConstUnions[1][2] - childConstUnions[0][2] * childConstUnions[1][1]; + newConstArray[1] = childConstUnions[0][2] * childConstUnions[1][0] - childConstUnions[0][0] * childConstUnions[1][2]; + newConstArray[2] = childConstUnions[0][0] * childConstUnions[1][1] - childConstUnions[0][1] * childConstUnions[1][0]; + break; + case EOpFaceForward: + // If dot(Nref, I) < 0 return N, otherwise return -N: Arguments are (N, I, Nref). + dot = childConstUnions[1].dot(childConstUnions[2]); + for (int comp = 0; comp < numComps; ++comp) { + if (dot < 0.0) + newConstArray[comp] = childConstUnions[0][comp]; + else + newConstArray[comp].setDConst(-childConstUnions[0][comp].getDConst()); + } + break; + case EOpReflect: + // I - 2 * dot(N, I) * N: Arguments are (I, N). + dot = childConstUnions[0].dot(childConstUnions[1]); + dot *= 2.0; + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(childConstUnions[0][comp].getDConst() - dot * childConstUnions[1][comp].getDConst()); + break; + case EOpRefract: + { + // Arguments are (I, N, eta). + // k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) + // if (k < 0.0) + // return dvec(0.0) + // else + // return eta * I - (eta * dot(N, I) + sqrt(k)) * N + dot = childConstUnions[0].dot(childConstUnions[1]); + double eta = childConstUnions[2][0].getDConst(); + double k = 1.0 - eta * eta * (1.0 - dot * dot); + if (k < 0.0) { + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(0.0); + } else { + for (int comp = 0; comp < numComps; ++comp) + newConstArray[comp].setDConst(eta * childConstUnions[0][comp].getDConst() - (eta * dot + sqrt(k)) * childConstUnions[1][comp].getDConst()); + } + break; + } + case EOpOuterProduct: + { + int numRows = numComps; + int numCols = children[1]->getAsConstantUnion()->getType().computeNumComponents(); + for (int row = 0; row < numRows; ++row) + for (int col = 0; col < numCols; ++col) + newConstArray[col * numRows + row] = childConstUnions[0][row] * childConstUnions[1][col]; + break; + } + default: + return aggrNode; + } + } + + TIntermConstantUnion *newNode = new TIntermConstantUnion(newConstArray, aggrNode->getType()); + newNode->getWritableType().getQualifier().storage = EvqConst; + newNode->setLoc(aggrNode->getLoc()); + + return newNode; +} + +bool TIntermediate::areAllChildConst(TIntermAggregate* aggrNode) +{ + bool allConstant = true; + + // check if all the child nodes are constants so that they can be inserted into + // the parent node + if (aggrNode) { + TIntermSequence& childSequenceVector = aggrNode->getSequence(); + for (TIntermSequence::iterator p = childSequenceVector.begin(); + p != childSequenceVector.end(); p++) { + if (!(*p)->getAsTyped()->getAsConstantUnion()) + return false; + } + } + + return allConstant; +} + +TIntermTyped* TIntermediate::foldConstructor(TIntermAggregate* aggrNode) +{ + bool error = false; + + TConstUnionArray unionArray(aggrNode->getType().computeNumComponents()); + if (aggrNode->getSequence().size() == 1) + error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType(), true); + else + error = parseConstTree(aggrNode, unionArray, aggrNode->getOp(), aggrNode->getType()); + + if (error) + return aggrNode; + + return addConstantUnion(unionArray, aggrNode->getType(), aggrNode->getLoc()); +} + +// +// Constant folding of a bracket (array-style) dereference or struct-like dot +// dereference. Can handle anything except a multi-character swizzle, though +// all swizzles may go to foldSwizzle(). +// +TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, const TSourceLoc& loc) +{ + TType dereferencedType(node->getType(), index); + dereferencedType.getQualifier().storage = EvqConst; + TIntermTyped* result = 0; + int size = dereferencedType.computeNumComponents(); + + // arrays, vectors, matrices, all use simple multiplicative math + // while structures need to add up heterogeneous members + int start; + if (node->getType().isCoopMat()) + start = 0; + else if (node->isArray() || ! node->isStruct()) + start = size * index; + else { + // it is a structure + assert(node->isStruct()); + start = 0; + for (int i = 0; i < index; ++i) + start += (*node->getType().getStruct())[i].type->computeNumComponents(); + } + + result = addConstantUnion(TConstUnionArray(node->getAsConstantUnion()->getConstArray(), start, size), node->getType(), loc); + + if (result == 0) + result = node; + else + result->setType(dereferencedType); + + return result; +} + +// +// Make a constant vector node or constant scalar node, representing a given +// constant vector and constant swizzle into it. +// +TIntermTyped* TIntermediate::foldSwizzle(TIntermTyped* node, TSwizzleSelectors& selectors, const TSourceLoc& loc) +{ + const TConstUnionArray& unionArray = node->getAsConstantUnion()->getConstArray(); + TConstUnionArray constArray(selectors.size()); + + for (int i = 0; i < selectors.size(); i++) + constArray[i] = unionArray[selectors[i]]; + + TIntermTyped* result = addConstantUnion(constArray, node->getType(), loc); + + if (result == 0) + result = node; + else + result->setType(TType(node->getBasicType(), EvqConst, selectors.size())); + + return result; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp b/src/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp new file mode 100644 index 0000000..d00c422 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/InfoSink.cpp @@ -0,0 +1,113 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/InfoSink.h" + +#include + +namespace glslang { + +void TInfoSinkBase::append(const char* s) +{ + if (outputStream & EString) { + if (s == nullptr) + sink.append("(null)"); + else { + checkMem(strlen(s)); + sink.append(s); + } + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(s); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", s); +} + +void TInfoSinkBase::append(int count, char c) +{ + if (outputStream & EString) { + checkMem(count); + sink.append(count, c); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) { +// char str[2]; +// str[0] = c; +// str[1] = '\0'; +// OutputDebugString(str); +// } +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%c", c); +} + +void TInfoSinkBase::append(const TPersistString& t) +{ + if (outputStream & EString) { + checkMem(t.size()); + sink.append(t); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(t.c_str()); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", t.c_str()); +} + +void TInfoSinkBase::append(const TString& t) +{ + if (outputStream & EString) { + checkMem(t.size()); + sink.append(t.c_str()); + } + +//#ifdef _WIN32 +// if (outputStream & EDebugger) +// OutputDebugString(t.c_str()); +//#endif + + if (outputStream & EStdOut) + fprintf(stdout, "%s", t.c_str()); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Initialize.cpp b/src/third_party/glslang/glslang/MachineIndependent/Initialize.cpp new file mode 100644 index 0000000..8d5d04f --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Initialize.cpp @@ -0,0 +1,9290 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Create strings that declare built-in definitions, add built-ins programmatically +// that cannot be expressed in the strings, and establish mappings between +// built-in functions and operators. +// +// Where to put a built-in: +// TBuiltIns::initialize(version,profile) context-independent textual built-ins; add them to the right string +// TBuiltIns::initialize(resources,...) context-dependent textual built-ins; add them to the right string +// TBuiltIns::identifyBuiltIns(...,symbolTable) context-independent programmatic additions/mappings to the symbol table, +// including identifying what extensions are needed if a version does not allow a symbol +// TBuiltIns::identifyBuiltIns(...,symbolTable, resources) context-dependent programmatic additions/mappings to the symbol table, +// including identifying what extensions are needed if a version does not allow a symbol +// + +#include "../Include/intermediate.h" +#include "Initialize.h" + +namespace glslang { + +// TODO: ARB_Compatability: do full extension support +const bool ARBCompatibility = true; + +const bool ForwardCompatibility = false; + +// change this back to false if depending on textual spellings of texturing calls when consuming the AST +// Using PureOperatorBuiltins=false is deprecated. +bool PureOperatorBuiltins = true; + +namespace { + +// +// A set of definitions for tabling of the built-in functions. +// + +// Order matters here, as does correlation with the subsequent +// "const int ..." declarations and the ArgType enumerants. +const char* TypeString[] = { + "bool", "bvec2", "bvec3", "bvec4", + "float", "vec2", "vec3", "vec4", + "int", "ivec2", "ivec3", "ivec4", + "uint", "uvec2", "uvec3", "uvec4", +}; +const int TypeStringCount = sizeof(TypeString) / sizeof(char*); // number of entries in 'TypeString' +const int TypeStringRowShift = 2; // shift amount to go downe one row in 'TypeString' +const int TypeStringColumnMask = (1 << TypeStringRowShift) - 1; // reduce type to its column number in 'TypeString' +const int TypeStringScalarMask = ~TypeStringColumnMask; // take type to its scalar column in 'TypeString' + +enum ArgType { + // numbers hardcoded to correspond to 'TypeString'; order and value matter + TypeB = 1 << 0, // Boolean + TypeF = 1 << 1, // float 32 + TypeI = 1 << 2, // int 32 + TypeU = 1 << 3, // uint 32 + TypeF16 = 1 << 4, // float 16 + TypeF64 = 1 << 5, // float 64 + TypeI8 = 1 << 6, // int 8 + TypeI16 = 1 << 7, // int 16 + TypeI64 = 1 << 8, // int 64 + TypeU8 = 1 << 9, // uint 8 + TypeU16 = 1 << 10, // uint 16 + TypeU64 = 1 << 11, // uint 64 +}; +// Mixtures of the above, to help the function tables +const ArgType TypeFI = static_cast(TypeF | TypeI); +const ArgType TypeFIB = static_cast(TypeF | TypeI | TypeB); +const ArgType TypeIU = static_cast(TypeI | TypeU); + +// The relationships between arguments and return type, whether anything is +// output, or other unusual situations. +enum ArgClass { + ClassRegular = 0, // nothing special, just all vector widths with matching return type; traditional arithmetic + ClassLS = 1 << 0, // the last argument is also held fixed as a (type-matched) scalar while the others cycle + ClassXLS = 1 << 1, // the last argument is exclusively a (type-matched) scalar while the others cycle + ClassLS2 = 1 << 2, // the last two arguments are held fixed as a (type-matched) scalar while the others cycle + ClassFS = 1 << 3, // the first argument is held fixed as a (type-matched) scalar while the others cycle + ClassFS2 = 1 << 4, // the first two arguments are held fixed as a (type-matched) scalar while the others cycle + ClassLO = 1 << 5, // the last argument is an output + ClassB = 1 << 6, // return type cycles through only bool/bvec, matching vector width of args + ClassLB = 1 << 7, // last argument cycles through only bool/bvec, matching vector width of args + ClassV1 = 1 << 8, // scalar only + ClassFIO = 1 << 9, // first argument is inout + ClassRS = 1 << 10, // the return is held scalar as the arguments cycle + ClassNS = 1 << 11, // no scalar prototype + ClassCV = 1 << 12, // first argument is 'coherent volatile' + ClassFO = 1 << 13, // first argument is output + ClassV3 = 1 << 14, // vec3 only +}; +// Mixtures of the above, to help the function tables +const ArgClass ClassV1FIOCV = (ArgClass)(ClassV1 | ClassFIO | ClassCV); +const ArgClass ClassBNS = (ArgClass)(ClassB | ClassNS); +const ArgClass ClassRSNS = (ArgClass)(ClassRS | ClassNS); + +// A descriptor, for a single profile, of when something is available. +// If the current profile does not match 'profile' mask below, the other fields +// do not apply (nor validate). +// profiles == EBadProfile is the end of an array of these +struct Versioning { + EProfile profiles; // the profile(s) (mask) that the following fields are valid for + int minExtendedVersion; // earliest version when extensions are enabled; ignored if numExtensions is 0 + int minCoreVersion; // earliest version function is in core; 0 means never + int numExtensions; // how many extensions are in the 'extensions' list + const char** extensions; // list of extension names enabling the function +}; + +EProfile EDesktopProfile = static_cast(ENoProfile | ECoreProfile | ECompatibilityProfile); + +// Declare pointers to put into the table for versioning. +#ifdef GLSLANG_WEB + const Versioning* Es300Desktop130 = nullptr; + const Versioning* Es310Desktop420 = nullptr; +#elif defined(GLSLANG_ANGLE) + const Versioning* Es300Desktop130 = nullptr; + const Versioning* Es310Desktop420 = nullptr; + const Versioning* Es310Desktop450 = nullptr; +#else + const Versioning Es300Desktop130Version[] = { { EEsProfile, 0, 300, 0, nullptr }, + { EDesktopProfile, 0, 130, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es300Desktop130 = &Es300Desktop130Version[0]; + + const Versioning Es310Desktop420Version[] = { { EEsProfile, 0, 310, 0, nullptr }, + { EDesktopProfile, 0, 420, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es310Desktop420 = &Es310Desktop420Version[0]; + + const Versioning Es310Desktop450Version[] = { { EEsProfile, 0, 310, 0, nullptr }, + { EDesktopProfile, 0, 450, 0, nullptr }, + { EBadProfile } }; + const Versioning* Es310Desktop450 = &Es310Desktop450Version[0]; +#endif + +// The main descriptor of what a set of function prototypes can look like, and +// a pointer to extra versioning information, when needed. +struct BuiltInFunction { + TOperator op; // operator to map the name to + const char* name; // function name + int numArguments; // number of arguments (overloads with varying arguments need different entries) + ArgType types; // ArgType mask + ArgClass classes; // the ways this particular function entry manifests + const Versioning* versioning; // nullptr means always a valid version +}; + +// The tables can have the same built-in function name more than one time, +// but the exact same prototype must be indicated at most once. +// The prototypes that get declared are the union of all those indicated. +// This is important when different releases add new prototypes for the same name. +// It also also congnitively simpler tiling of the prototype space. +// In practice, most names can be fully represented with one entry. +// +// Table is terminated by an OpNull TOperator. + +const BuiltInFunction BaseFunctions[] = { +// TOperator, name, arg-count, ArgType, ArgClass, versioning +// --------- ---- --------- ------- -------- ---------- + { EOpRadians, "radians", 1, TypeF, ClassRegular, nullptr }, + { EOpDegrees, "degrees", 1, TypeF, ClassRegular, nullptr }, + { EOpSin, "sin", 1, TypeF, ClassRegular, nullptr }, + { EOpCos, "cos", 1, TypeF, ClassRegular, nullptr }, + { EOpTan, "tan", 1, TypeF, ClassRegular, nullptr }, + { EOpAsin, "asin", 1, TypeF, ClassRegular, nullptr }, + { EOpAcos, "acos", 1, TypeF, ClassRegular, nullptr }, + { EOpAtan, "atan", 2, TypeF, ClassRegular, nullptr }, + { EOpAtan, "atan", 1, TypeF, ClassRegular, nullptr }, + { EOpPow, "pow", 2, TypeF, ClassRegular, nullptr }, + { EOpExp, "exp", 1, TypeF, ClassRegular, nullptr }, + { EOpLog, "log", 1, TypeF, ClassRegular, nullptr }, + { EOpExp2, "exp2", 1, TypeF, ClassRegular, nullptr }, + { EOpLog2, "log2", 1, TypeF, ClassRegular, nullptr }, + { EOpSqrt, "sqrt", 1, TypeF, ClassRegular, nullptr }, + { EOpInverseSqrt, "inversesqrt", 1, TypeF, ClassRegular, nullptr }, + { EOpAbs, "abs", 1, TypeF, ClassRegular, nullptr }, + { EOpSign, "sign", 1, TypeF, ClassRegular, nullptr }, + { EOpFloor, "floor", 1, TypeF, ClassRegular, nullptr }, + { EOpCeil, "ceil", 1, TypeF, ClassRegular, nullptr }, + { EOpFract, "fract", 1, TypeF, ClassRegular, nullptr }, + { EOpMod, "mod", 2, TypeF, ClassLS, nullptr }, + { EOpMin, "min", 2, TypeF, ClassLS, nullptr }, + { EOpMax, "max", 2, TypeF, ClassLS, nullptr }, + { EOpClamp, "clamp", 3, TypeF, ClassLS2, nullptr }, + { EOpMix, "mix", 3, TypeF, ClassLS, nullptr }, + { EOpStep, "step", 2, TypeF, ClassFS, nullptr }, + { EOpSmoothStep, "smoothstep", 3, TypeF, ClassFS2, nullptr }, + { EOpNormalize, "normalize", 1, TypeF, ClassRegular, nullptr }, + { EOpFaceForward, "faceforward", 3, TypeF, ClassRegular, nullptr }, + { EOpReflect, "reflect", 2, TypeF, ClassRegular, nullptr }, + { EOpRefract, "refract", 3, TypeF, ClassXLS, nullptr }, + { EOpLength, "length", 1, TypeF, ClassRS, nullptr }, + { EOpDistance, "distance", 2, TypeF, ClassRS, nullptr }, + { EOpDot, "dot", 2, TypeF, ClassRS, nullptr }, + { EOpCross, "cross", 2, TypeF, ClassV3, nullptr }, + { EOpLessThan, "lessThan", 2, TypeFI, ClassBNS, nullptr }, + { EOpLessThanEqual, "lessThanEqual", 2, TypeFI, ClassBNS, nullptr }, + { EOpGreaterThan, "greaterThan", 2, TypeFI, ClassBNS, nullptr }, + { EOpGreaterThanEqual, "greaterThanEqual", 2, TypeFI, ClassBNS, nullptr }, + { EOpVectorEqual, "equal", 2, TypeFIB, ClassBNS, nullptr }, + { EOpVectorNotEqual, "notEqual", 2, TypeFIB, ClassBNS, nullptr }, + { EOpAny, "any", 1, TypeB, ClassRSNS, nullptr }, + { EOpAll, "all", 1, TypeB, ClassRSNS, nullptr }, + { EOpVectorLogicalNot, "not", 1, TypeB, ClassNS, nullptr }, + { EOpSinh, "sinh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpCosh, "cosh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpTanh, "tanh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAsinh, "asinh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAcosh, "acosh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAtanh, "atanh", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpAbs, "abs", 1, TypeI, ClassRegular, Es300Desktop130 }, + { EOpSign, "sign", 1, TypeI, ClassRegular, Es300Desktop130 }, + { EOpTrunc, "trunc", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpRound, "round", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpRoundEven, "roundEven", 1, TypeF, ClassRegular, Es300Desktop130 }, + { EOpModf, "modf", 2, TypeF, ClassLO, Es300Desktop130 }, + { EOpMin, "min", 2, TypeIU, ClassLS, Es300Desktop130 }, + { EOpMax, "max", 2, TypeIU, ClassLS, Es300Desktop130 }, + { EOpClamp, "clamp", 3, TypeIU, ClassLS2, Es300Desktop130 }, + { EOpMix, "mix", 3, TypeF, ClassLB, Es300Desktop130 }, + { EOpIsInf, "isinf", 1, TypeF, ClassB, Es300Desktop130 }, + { EOpIsNan, "isnan", 1, TypeF, ClassB, Es300Desktop130 }, + { EOpLessThan, "lessThan", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpLessThanEqual, "lessThanEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpGreaterThan, "greaterThan", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpGreaterThanEqual, "greaterThanEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpVectorEqual, "equal", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpVectorNotEqual, "notEqual", 2, TypeU, ClassBNS, Es300Desktop130 }, + { EOpAtomicAdd, "atomicAdd", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicMin, "atomicMin", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicMax, "atomicMax", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicAnd, "atomicAnd", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicOr, "atomicOr", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicXor, "atomicXor", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicExchange, "atomicExchange", 2, TypeIU, ClassV1FIOCV, Es310Desktop420 }, + { EOpAtomicCompSwap, "atomicCompSwap", 3, TypeIU, ClassV1FIOCV, Es310Desktop420 }, +#ifndef GLSLANG_WEB + { EOpMix, "mix", 3, TypeB, ClassRegular, Es310Desktop450 }, + { EOpMix, "mix", 3, TypeIU, ClassLB, Es310Desktop450 }, +#endif + + { EOpNull } +}; + +const BuiltInFunction DerivativeFunctions[] = { + { EOpDPdx, "dFdx", 1, TypeF, ClassRegular, nullptr }, + { EOpDPdy, "dFdy", 1, TypeF, ClassRegular, nullptr }, + { EOpFwidth, "fwidth", 1, TypeF, ClassRegular, nullptr }, + { EOpNull } +}; + +// For functions declared some other way, but still use the table to relate to operator. +struct CustomFunction { + TOperator op; // operator to map the name to + const char* name; // function name + const Versioning* versioning; // nullptr means always a valid version +}; + +const CustomFunction CustomFunctions[] = { + { EOpBarrier, "barrier", nullptr }, + { EOpMemoryBarrierShared, "memoryBarrierShared", nullptr }, + { EOpGroupMemoryBarrier, "groupMemoryBarrier", nullptr }, + { EOpMemoryBarrier, "memoryBarrier", nullptr }, + { EOpMemoryBarrierBuffer, "memoryBarrierBuffer", nullptr }, + + { EOpPackSnorm2x16, "packSnorm2x16", nullptr }, + { EOpUnpackSnorm2x16, "unpackSnorm2x16", nullptr }, + { EOpPackUnorm2x16, "packUnorm2x16", nullptr }, + { EOpUnpackUnorm2x16, "unpackUnorm2x16", nullptr }, + { EOpPackHalf2x16, "packHalf2x16", nullptr }, + { EOpUnpackHalf2x16, "unpackHalf2x16", nullptr }, + + { EOpMul, "matrixCompMult", nullptr }, + { EOpOuterProduct, "outerProduct", nullptr }, + { EOpTranspose, "transpose", nullptr }, + { EOpDeterminant, "determinant", nullptr }, + { EOpMatrixInverse, "inverse", nullptr }, + { EOpFloatBitsToInt, "floatBitsToInt", nullptr }, + { EOpFloatBitsToUint, "floatBitsToUint", nullptr }, + { EOpIntBitsToFloat, "intBitsToFloat", nullptr }, + { EOpUintBitsToFloat, "uintBitsToFloat", nullptr }, + + { EOpTextureQuerySize, "textureSize", nullptr }, + { EOpTextureQueryLod, "textureQueryLod", nullptr }, + { EOpTextureQueryLevels, "textureQueryLevels", nullptr }, + { EOpTextureQuerySamples, "textureSamples", nullptr }, + { EOpTexture, "texture", nullptr }, + { EOpTextureProj, "textureProj", nullptr }, + { EOpTextureLod, "textureLod", nullptr }, + { EOpTextureOffset, "textureOffset", nullptr }, + { EOpTextureFetch, "texelFetch", nullptr }, + { EOpTextureFetchOffset, "texelFetchOffset", nullptr }, + { EOpTextureProjOffset, "textureProjOffset", nullptr }, + { EOpTextureLodOffset, "textureLodOffset", nullptr }, + { EOpTextureProjLod, "textureProjLod", nullptr }, + { EOpTextureProjLodOffset, "textureProjLodOffset", nullptr }, + { EOpTextureGrad, "textureGrad", nullptr }, + { EOpTextureGradOffset, "textureGradOffset", nullptr }, + { EOpTextureProjGrad, "textureProjGrad", nullptr }, + { EOpTextureProjGradOffset, "textureProjGradOffset", nullptr }, + + { EOpNull } +}; + +// For the given table of functions, add all the indicated prototypes for each +// one, to be returned in the passed in decls. +void AddTabledBuiltin(TString& decls, const BuiltInFunction& function) +{ + const auto isScalarType = [](int type) { return (type & TypeStringColumnMask) == 0; }; + + // loop across these two: + // 0: the varying arg set, and + // 1: the fixed scalar args + const ArgClass ClassFixed = (ArgClass)(ClassLS | ClassXLS | ClassLS2 | ClassFS | ClassFS2); + for (int fixed = 0; fixed < ((function.classes & ClassFixed) > 0 ? 2 : 1); ++fixed) { + + if (fixed == 0 && (function.classes & ClassXLS)) + continue; + + // walk the type strings in TypeString[] + for (int type = 0; type < TypeStringCount; ++type) { + // skip types not selected: go from type to row number to type bit + if ((function.types & (1 << (type >> TypeStringRowShift))) == 0) + continue; + + // if we aren't on a scalar, and should be, skip + if ((function.classes & ClassV1) && !isScalarType(type)) + continue; + + // if we aren't on a 3-vector, and should be, skip + if ((function.classes & ClassV3) && (type & TypeStringColumnMask) != 2) + continue; + + // skip replication of all arg scalars between the varying arg set and the fixed args + if (fixed == 1 && type == (type & TypeStringScalarMask) && (function.classes & ClassXLS) == 0) + continue; + + // skip scalars when we are told to + if ((function.classes & ClassNS) && isScalarType(type)) + continue; + + // return type + if (function.classes & ClassB) + decls.append(TypeString[type & TypeStringColumnMask]); + else if (function.classes & ClassRS) + decls.append(TypeString[type & TypeStringScalarMask]); + else + decls.append(TypeString[type]); + decls.append(" "); + decls.append(function.name); + decls.append("("); + + // arguments + for (int arg = 0; arg < function.numArguments; ++arg) { + if (arg == function.numArguments - 1 && (function.classes & ClassLO)) + decls.append("out "); + if (arg == 0) { +#ifndef GLSLANG_WEB + if (function.classes & ClassCV) + decls.append("coherent volatile "); +#endif + if (function.classes & ClassFIO) + decls.append("inout "); + if (function.classes & ClassFO) + decls.append("out "); + } + if ((function.classes & ClassLB) && arg == function.numArguments - 1) + decls.append(TypeString[type & TypeStringColumnMask]); + else if (fixed && ((arg == function.numArguments - 1 && (function.classes & (ClassLS | ClassXLS | + ClassLS2))) || + (arg == function.numArguments - 2 && (function.classes & ClassLS2)) || + (arg == 0 && (function.classes & (ClassFS | ClassFS2))) || + (arg == 1 && (function.classes & ClassFS2)))) + decls.append(TypeString[type & TypeStringScalarMask]); + else + decls.append(TypeString[type]); + if (arg < function.numArguments - 1) + decls.append(","); + } + decls.append(");\n"); + } + } +} + +// See if the tabled versioning information allows the current version. +bool ValidVersion(const BuiltInFunction& function, int version, EProfile profile, const SpvVersion& /* spVersion */) +{ +#if defined(GLSLANG_WEB) || defined(GLSLANG_ANGLE) + // all entries in table are valid + return true; +#endif + + // nullptr means always valid + if (function.versioning == nullptr) + return true; + + // check for what is said about our current profile + for (const Versioning* v = function.versioning; v->profiles != EBadProfile; ++v) { + if ((v->profiles & profile) != 0) { + if (v->minCoreVersion <= version || (v->numExtensions > 0 && v->minExtendedVersion <= version)) + return true; + } + } + + return false; +} + +// Relate a single table of built-ins to their AST operator. +// This can get called redundantly (especially for the common built-ins, when +// called once per stage). This is a performance issue only, not a correctness +// concern. It is done for quality arising from simplicity, as there are subtleties +// to get correct if instead trying to do it surgically. +template +void RelateTabledBuiltins(const FunctionT* functions, TSymbolTable& symbolTable) +{ + while (functions->op != EOpNull) { + symbolTable.relateToOperator(functions->name, functions->op); + ++functions; + } +} + +} // end anonymous namespace + +// Add declarations for all tables of built-in functions. +void TBuiltIns::addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion) +{ + const auto forEachFunction = [&](TString& decls, const BuiltInFunction* function) { + while (function->op != EOpNull) { + if (ValidVersion(*function, version, profile, spvVersion)) + AddTabledBuiltin(decls, *function); + ++function; + } + }; + + forEachFunction(commonBuiltins, BaseFunctions); + forEachFunction(stageBuiltins[EShLangFragment], DerivativeFunctions); + + if ((profile == EEsProfile && version >= 320) || (profile != EEsProfile && version >= 450)) + forEachFunction(stageBuiltins[EShLangCompute], DerivativeFunctions); +} + +// Relate all tables of built-ins to the AST operators. +void TBuiltIns::relateTabledBuiltins(int /* version */, EProfile /* profile */, const SpvVersion& /* spvVersion */, EShLanguage /* stage */, TSymbolTable& symbolTable) +{ + RelateTabledBuiltins(BaseFunctions, symbolTable); + RelateTabledBuiltins(DerivativeFunctions, symbolTable); + RelateTabledBuiltins(CustomFunctions, symbolTable); +} + +inline bool IncludeLegacy(int version, EProfile profile, const SpvVersion& spvVersion) +{ + return profile != EEsProfile && (version <= 130 || (spvVersion.spv == 0 && ARBCompatibility) || profile == ECompatibilityProfile); +} + +// Construct TBuiltInParseables base class. This can be used for language-common constructs. +TBuiltInParseables::TBuiltInParseables() +{ +} + +// Destroy TBuiltInParseables. +TBuiltInParseables::~TBuiltInParseables() +{ +} + +TBuiltIns::TBuiltIns() +{ + // Set up textual representations for making all the permutations + // of texturing/imaging functions. + prefixes[EbtFloat] = ""; + prefixes[EbtInt] = "i"; + prefixes[EbtUint] = "u"; +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + prefixes[EbtFloat16] = "f16"; + prefixes[EbtInt8] = "i8"; + prefixes[EbtUint8] = "u8"; + prefixes[EbtInt16] = "i16"; + prefixes[EbtUint16] = "u16"; +#endif + + postfixes[2] = "2"; + postfixes[3] = "3"; + postfixes[4] = "4"; + + // Map from symbolic class of texturing dimension to numeric dimensions. + dimMap[Esd2D] = 2; + dimMap[Esd3D] = 3; + dimMap[EsdCube] = 3; +#ifndef GLSLANG_WEB +#ifndef GLSLANG_ANGLE + dimMap[Esd1D] = 1; +#endif + dimMap[EsdRect] = 2; + dimMap[EsdBuffer] = 1; + dimMap[EsdSubpass] = 2; // potentially unused for now +#endif +} + +TBuiltIns::~TBuiltIns() +{ +} + + +// +// Add all context-independent built-in functions and variables that are present +// for the given version and profile. Share common ones across stages, otherwise +// make stage-specific entries. +// +// Most built-ins variables can be added as simple text strings. Some need to +// be added programmatically, which is done later in IdentifyBuiltIns() below. +// +void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvVersion) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#elif defined(GLSLANG_ANGLE) + version = 450; + profile = ECoreProfile; +#endif + addTabledBuiltins(version, profile, spvVersion); + + //============================================================================ + // + // Prototypes for built-in functions used repeatly by different shaders + // + //============================================================================ + +#ifndef GLSLANG_WEB + // + // Derivatives Functions. + // + TString derivativeControls ( + "float dFdxFine(float p);" + "vec2 dFdxFine(vec2 p);" + "vec3 dFdxFine(vec3 p);" + "vec4 dFdxFine(vec4 p);" + + "float dFdyFine(float p);" + "vec2 dFdyFine(vec2 p);" + "vec3 dFdyFine(vec3 p);" + "vec4 dFdyFine(vec4 p);" + + "float fwidthFine(float p);" + "vec2 fwidthFine(vec2 p);" + "vec3 fwidthFine(vec3 p);" + "vec4 fwidthFine(vec4 p);" + + "float dFdxCoarse(float p);" + "vec2 dFdxCoarse(vec2 p);" + "vec3 dFdxCoarse(vec3 p);" + "vec4 dFdxCoarse(vec4 p);" + + "float dFdyCoarse(float p);" + "vec2 dFdyCoarse(vec2 p);" + "vec3 dFdyCoarse(vec3 p);" + "vec4 dFdyCoarse(vec4 p);" + + "float fwidthCoarse(float p);" + "vec2 fwidthCoarse(vec2 p);" + "vec3 fwidthCoarse(vec3 p);" + "vec4 fwidthCoarse(vec4 p);" + ); + +#ifndef GLSLANG_ANGLE + TString derivativesAndControl16bits ( + "float16_t dFdx(float16_t);" + "f16vec2 dFdx(f16vec2);" + "f16vec3 dFdx(f16vec3);" + "f16vec4 dFdx(f16vec4);" + + "float16_t dFdy(float16_t);" + "f16vec2 dFdy(f16vec2);" + "f16vec3 dFdy(f16vec3);" + "f16vec4 dFdy(f16vec4);" + + "float16_t dFdxFine(float16_t);" + "f16vec2 dFdxFine(f16vec2);" + "f16vec3 dFdxFine(f16vec3);" + "f16vec4 dFdxFine(f16vec4);" + + "float16_t dFdyFine(float16_t);" + "f16vec2 dFdyFine(f16vec2);" + "f16vec3 dFdyFine(f16vec3);" + "f16vec4 dFdyFine(f16vec4);" + + "float16_t dFdxCoarse(float16_t);" + "f16vec2 dFdxCoarse(f16vec2);" + "f16vec3 dFdxCoarse(f16vec3);" + "f16vec4 dFdxCoarse(f16vec4);" + + "float16_t dFdyCoarse(float16_t);" + "f16vec2 dFdyCoarse(f16vec2);" + "f16vec3 dFdyCoarse(f16vec3);" + "f16vec4 dFdyCoarse(f16vec4);" + + "float16_t fwidth(float16_t);" + "f16vec2 fwidth(f16vec2);" + "f16vec3 fwidth(f16vec3);" + "f16vec4 fwidth(f16vec4);" + + "float16_t fwidthFine(float16_t);" + "f16vec2 fwidthFine(f16vec2);" + "f16vec3 fwidthFine(f16vec3);" + "f16vec4 fwidthFine(f16vec4);" + + "float16_t fwidthCoarse(float16_t);" + "f16vec2 fwidthCoarse(f16vec2);" + "f16vec3 fwidthCoarse(f16vec3);" + "f16vec4 fwidthCoarse(f16vec4);" + ); + + TString derivativesAndControl64bits ( + "float64_t dFdx(float64_t);" + "f64vec2 dFdx(f64vec2);" + "f64vec3 dFdx(f64vec3);" + "f64vec4 dFdx(f64vec4);" + + "float64_t dFdy(float64_t);" + "f64vec2 dFdy(f64vec2);" + "f64vec3 dFdy(f64vec3);" + "f64vec4 dFdy(f64vec4);" + + "float64_t dFdxFine(float64_t);" + "f64vec2 dFdxFine(f64vec2);" + "f64vec3 dFdxFine(f64vec3);" + "f64vec4 dFdxFine(f64vec4);" + + "float64_t dFdyFine(float64_t);" + "f64vec2 dFdyFine(f64vec2);" + "f64vec3 dFdyFine(f64vec3);" + "f64vec4 dFdyFine(f64vec4);" + + "float64_t dFdxCoarse(float64_t);" + "f64vec2 dFdxCoarse(f64vec2);" + "f64vec3 dFdxCoarse(f64vec3);" + "f64vec4 dFdxCoarse(f64vec4);" + + "float64_t dFdyCoarse(float64_t);" + "f64vec2 dFdyCoarse(f64vec2);" + "f64vec3 dFdyCoarse(f64vec3);" + "f64vec4 dFdyCoarse(f64vec4);" + + "float64_t fwidth(float64_t);" + "f64vec2 fwidth(f64vec2);" + "f64vec3 fwidth(f64vec3);" + "f64vec4 fwidth(f64vec4);" + + "float64_t fwidthFine(float64_t);" + "f64vec2 fwidthFine(f64vec2);" + "f64vec3 fwidthFine(f64vec3);" + "f64vec4 fwidthFine(f64vec4);" + + "float64_t fwidthCoarse(float64_t);" + "f64vec2 fwidthCoarse(f64vec2);" + "f64vec3 fwidthCoarse(f64vec3);" + "f64vec4 fwidthCoarse(f64vec4);" + ); + + //============================================================================ + // + // Prototypes for built-in functions seen by both vertex and fragment shaders. + // + //============================================================================ + + // + // double functions added to desktop 4.00, but not fma, frexp, ldexp, or pack/unpack + // + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + + "double sqrt(double);" + "dvec2 sqrt(dvec2);" + "dvec3 sqrt(dvec3);" + "dvec4 sqrt(dvec4);" + + "double inversesqrt(double);" + "dvec2 inversesqrt(dvec2);" + "dvec3 inversesqrt(dvec3);" + "dvec4 inversesqrt(dvec4);" + + "double abs(double);" + "dvec2 abs(dvec2);" + "dvec3 abs(dvec3);" + "dvec4 abs(dvec4);" + + "double sign(double);" + "dvec2 sign(dvec2);" + "dvec3 sign(dvec3);" + "dvec4 sign(dvec4);" + + "double floor(double);" + "dvec2 floor(dvec2);" + "dvec3 floor(dvec3);" + "dvec4 floor(dvec4);" + + "double trunc(double);" + "dvec2 trunc(dvec2);" + "dvec3 trunc(dvec3);" + "dvec4 trunc(dvec4);" + + "double round(double);" + "dvec2 round(dvec2);" + "dvec3 round(dvec3);" + "dvec4 round(dvec4);" + + "double roundEven(double);" + "dvec2 roundEven(dvec2);" + "dvec3 roundEven(dvec3);" + "dvec4 roundEven(dvec4);" + + "double ceil(double);" + "dvec2 ceil(dvec2);" + "dvec3 ceil(dvec3);" + "dvec4 ceil(dvec4);" + + "double fract(double);" + "dvec2 fract(dvec2);" + "dvec3 fract(dvec3);" + "dvec4 fract(dvec4);" + + "double mod(double, double);" + "dvec2 mod(dvec2 , double);" + "dvec3 mod(dvec3 , double);" + "dvec4 mod(dvec4 , double);" + "dvec2 mod(dvec2 , dvec2);" + "dvec3 mod(dvec3 , dvec3);" + "dvec4 mod(dvec4 , dvec4);" + + "double modf(double, out double);" + "dvec2 modf(dvec2, out dvec2);" + "dvec3 modf(dvec3, out dvec3);" + "dvec4 modf(dvec4, out dvec4);" + + "double min(double, double);" + "dvec2 min(dvec2, double);" + "dvec3 min(dvec3, double);" + "dvec4 min(dvec4, double);" + "dvec2 min(dvec2, dvec2);" + "dvec3 min(dvec3, dvec3);" + "dvec4 min(dvec4, dvec4);" + + "double max(double, double);" + "dvec2 max(dvec2 , double);" + "dvec3 max(dvec3 , double);" + "dvec4 max(dvec4 , double);" + "dvec2 max(dvec2 , dvec2);" + "dvec3 max(dvec3 , dvec3);" + "dvec4 max(dvec4 , dvec4);" + + "double clamp(double, double, double);" + "dvec2 clamp(dvec2 , double, double);" + "dvec3 clamp(dvec3 , double, double);" + "dvec4 clamp(dvec4 , double, double);" + "dvec2 clamp(dvec2 , dvec2 , dvec2);" + "dvec3 clamp(dvec3 , dvec3 , dvec3);" + "dvec4 clamp(dvec4 , dvec4 , dvec4);" + + "double mix(double, double, double);" + "dvec2 mix(dvec2, dvec2, double);" + "dvec3 mix(dvec3, dvec3, double);" + "dvec4 mix(dvec4, dvec4, double);" + "dvec2 mix(dvec2, dvec2, dvec2);" + "dvec3 mix(dvec3, dvec3, dvec3);" + "dvec4 mix(dvec4, dvec4, dvec4);" + "double mix(double, double, bool);" + "dvec2 mix(dvec2, dvec2, bvec2);" + "dvec3 mix(dvec3, dvec3, bvec3);" + "dvec4 mix(dvec4, dvec4, bvec4);" + + "double step(double, double);" + "dvec2 step(dvec2 , dvec2);" + "dvec3 step(dvec3 , dvec3);" + "dvec4 step(dvec4 , dvec4);" + "dvec2 step(double, dvec2);" + "dvec3 step(double, dvec3);" + "dvec4 step(double, dvec4);" + + "double smoothstep(double, double, double);" + "dvec2 smoothstep(dvec2 , dvec2 , dvec2);" + "dvec3 smoothstep(dvec3 , dvec3 , dvec3);" + "dvec4 smoothstep(dvec4 , dvec4 , dvec4);" + "dvec2 smoothstep(double, double, dvec2);" + "dvec3 smoothstep(double, double, dvec3);" + "dvec4 smoothstep(double, double, dvec4);" + + "bool isnan(double);" + "bvec2 isnan(dvec2);" + "bvec3 isnan(dvec3);" + "bvec4 isnan(dvec4);" + + "bool isinf(double);" + "bvec2 isinf(dvec2);" + "bvec3 isinf(dvec3);" + "bvec4 isinf(dvec4);" + + "double length(double);" + "double length(dvec2);" + "double length(dvec3);" + "double length(dvec4);" + + "double distance(double, double);" + "double distance(dvec2 , dvec2);" + "double distance(dvec3 , dvec3);" + "double distance(dvec4 , dvec4);" + + "double dot(double, double);" + "double dot(dvec2 , dvec2);" + "double dot(dvec3 , dvec3);" + "double dot(dvec4 , dvec4);" + + "dvec3 cross(dvec3, dvec3);" + + "double normalize(double);" + "dvec2 normalize(dvec2);" + "dvec3 normalize(dvec3);" + "dvec4 normalize(dvec4);" + + "double faceforward(double, double, double);" + "dvec2 faceforward(dvec2, dvec2, dvec2);" + "dvec3 faceforward(dvec3, dvec3, dvec3);" + "dvec4 faceforward(dvec4, dvec4, dvec4);" + + "double reflect(double, double);" + "dvec2 reflect(dvec2 , dvec2 );" + "dvec3 reflect(dvec3 , dvec3 );" + "dvec4 reflect(dvec4 , dvec4 );" + + "double refract(double, double, double);" + "dvec2 refract(dvec2 , dvec2 , double);" + "dvec3 refract(dvec3 , dvec3 , double);" + "dvec4 refract(dvec4 , dvec4 , double);" + + "dmat2 matrixCompMult(dmat2, dmat2);" + "dmat3 matrixCompMult(dmat3, dmat3);" + "dmat4 matrixCompMult(dmat4, dmat4);" + "dmat2x3 matrixCompMult(dmat2x3, dmat2x3);" + "dmat2x4 matrixCompMult(dmat2x4, dmat2x4);" + "dmat3x2 matrixCompMult(dmat3x2, dmat3x2);" + "dmat3x4 matrixCompMult(dmat3x4, dmat3x4);" + "dmat4x2 matrixCompMult(dmat4x2, dmat4x2);" + "dmat4x3 matrixCompMult(dmat4x3, dmat4x3);" + + "dmat2 outerProduct(dvec2, dvec2);" + "dmat3 outerProduct(dvec3, dvec3);" + "dmat4 outerProduct(dvec4, dvec4);" + "dmat2x3 outerProduct(dvec3, dvec2);" + "dmat3x2 outerProduct(dvec2, dvec3);" + "dmat2x4 outerProduct(dvec4, dvec2);" + "dmat4x2 outerProduct(dvec2, dvec4);" + "dmat3x4 outerProduct(dvec4, dvec3);" + "dmat4x3 outerProduct(dvec3, dvec4);" + + "dmat2 transpose(dmat2);" + "dmat3 transpose(dmat3);" + "dmat4 transpose(dmat4);" + "dmat2x3 transpose(dmat3x2);" + "dmat3x2 transpose(dmat2x3);" + "dmat2x4 transpose(dmat4x2);" + "dmat4x2 transpose(dmat2x4);" + "dmat3x4 transpose(dmat4x3);" + "dmat4x3 transpose(dmat3x4);" + + "double determinant(dmat2);" + "double determinant(dmat3);" + "double determinant(dmat4);" + + "dmat2 inverse(dmat2);" + "dmat3 inverse(dmat3);" + "dmat4 inverse(dmat4);" + + "bvec2 lessThan(dvec2, dvec2);" + "bvec3 lessThan(dvec3, dvec3);" + "bvec4 lessThan(dvec4, dvec4);" + + "bvec2 lessThanEqual(dvec2, dvec2);" + "bvec3 lessThanEqual(dvec3, dvec3);" + "bvec4 lessThanEqual(dvec4, dvec4);" + + "bvec2 greaterThan(dvec2, dvec2);" + "bvec3 greaterThan(dvec3, dvec3);" + "bvec4 greaterThan(dvec4, dvec4);" + + "bvec2 greaterThanEqual(dvec2, dvec2);" + "bvec3 greaterThanEqual(dvec3, dvec3);" + "bvec4 greaterThanEqual(dvec4, dvec4);" + + "bvec2 equal(dvec2, dvec2);" + "bvec3 equal(dvec3, dvec3);" + "bvec4 equal(dvec4, dvec4);" + + "bvec2 notEqual(dvec2, dvec2);" + "bvec3 notEqual(dvec3, dvec3);" + "bvec4 notEqual(dvec4, dvec4);" + + "\n"); + } + + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + + "int64_t abs(int64_t);" + "i64vec2 abs(i64vec2);" + "i64vec3 abs(i64vec3);" + "i64vec4 abs(i64vec4);" + + "int64_t sign(int64_t);" + "i64vec2 sign(i64vec2);" + "i64vec3 sign(i64vec3);" + "i64vec4 sign(i64vec4);" + + "int64_t min(int64_t, int64_t);" + "i64vec2 min(i64vec2, int64_t);" + "i64vec3 min(i64vec3, int64_t);" + "i64vec4 min(i64vec4, int64_t);" + "i64vec2 min(i64vec2, i64vec2);" + "i64vec3 min(i64vec3, i64vec3);" + "i64vec4 min(i64vec4, i64vec4);" + "uint64_t min(uint64_t, uint64_t);" + "u64vec2 min(u64vec2, uint64_t);" + "u64vec3 min(u64vec3, uint64_t);" + "u64vec4 min(u64vec4, uint64_t);" + "u64vec2 min(u64vec2, u64vec2);" + "u64vec3 min(u64vec3, u64vec3);" + "u64vec4 min(u64vec4, u64vec4);" + + "int64_t max(int64_t, int64_t);" + "i64vec2 max(i64vec2, int64_t);" + "i64vec3 max(i64vec3, int64_t);" + "i64vec4 max(i64vec4, int64_t);" + "i64vec2 max(i64vec2, i64vec2);" + "i64vec3 max(i64vec3, i64vec3);" + "i64vec4 max(i64vec4, i64vec4);" + "uint64_t max(uint64_t, uint64_t);" + "u64vec2 max(u64vec2, uint64_t);" + "u64vec3 max(u64vec3, uint64_t);" + "u64vec4 max(u64vec4, uint64_t);" + "u64vec2 max(u64vec2, u64vec2);" + "u64vec3 max(u64vec3, u64vec3);" + "u64vec4 max(u64vec4, u64vec4);" + + "int64_t clamp(int64_t, int64_t, int64_t);" + "i64vec2 clamp(i64vec2, int64_t, int64_t);" + "i64vec3 clamp(i64vec3, int64_t, int64_t);" + "i64vec4 clamp(i64vec4, int64_t, int64_t);" + "i64vec2 clamp(i64vec2, i64vec2, i64vec2);" + "i64vec3 clamp(i64vec3, i64vec3, i64vec3);" + "i64vec4 clamp(i64vec4, i64vec4, i64vec4);" + "uint64_t clamp(uint64_t, uint64_t, uint64_t);" + "u64vec2 clamp(u64vec2, uint64_t, uint64_t);" + "u64vec3 clamp(u64vec3, uint64_t, uint64_t);" + "u64vec4 clamp(u64vec4, uint64_t, uint64_t);" + "u64vec2 clamp(u64vec2, u64vec2, u64vec2);" + "u64vec3 clamp(u64vec3, u64vec3, u64vec3);" + "u64vec4 clamp(u64vec4, u64vec4, u64vec4);" + + "int64_t mix(int64_t, int64_t, bool);" + "i64vec2 mix(i64vec2, i64vec2, bvec2);" + "i64vec3 mix(i64vec3, i64vec3, bvec3);" + "i64vec4 mix(i64vec4, i64vec4, bvec4);" + "uint64_t mix(uint64_t, uint64_t, bool);" + "u64vec2 mix(u64vec2, u64vec2, bvec2);" + "u64vec3 mix(u64vec3, u64vec3, bvec3);" + "u64vec4 mix(u64vec4, u64vec4, bvec4);" + + "int64_t doubleBitsToInt64(double);" + "i64vec2 doubleBitsToInt64(dvec2);" + "i64vec3 doubleBitsToInt64(dvec3);" + "i64vec4 doubleBitsToInt64(dvec4);" + + "uint64_t doubleBitsToUint64(double);" + "u64vec2 doubleBitsToUint64(dvec2);" + "u64vec3 doubleBitsToUint64(dvec3);" + "u64vec4 doubleBitsToUint64(dvec4);" + + "double int64BitsToDouble(int64_t);" + "dvec2 int64BitsToDouble(i64vec2);" + "dvec3 int64BitsToDouble(i64vec3);" + "dvec4 int64BitsToDouble(i64vec4);" + + "double uint64BitsToDouble(uint64_t);" + "dvec2 uint64BitsToDouble(u64vec2);" + "dvec3 uint64BitsToDouble(u64vec3);" + "dvec4 uint64BitsToDouble(u64vec4);" + + "int64_t packInt2x32(ivec2);" + "uint64_t packUint2x32(uvec2);" + "ivec2 unpackInt2x32(int64_t);" + "uvec2 unpackUint2x32(uint64_t);" + + "bvec2 lessThan(i64vec2, i64vec2);" + "bvec3 lessThan(i64vec3, i64vec3);" + "bvec4 lessThan(i64vec4, i64vec4);" + "bvec2 lessThan(u64vec2, u64vec2);" + "bvec3 lessThan(u64vec3, u64vec3);" + "bvec4 lessThan(u64vec4, u64vec4);" + + "bvec2 lessThanEqual(i64vec2, i64vec2);" + "bvec3 lessThanEqual(i64vec3, i64vec3);" + "bvec4 lessThanEqual(i64vec4, i64vec4);" + "bvec2 lessThanEqual(u64vec2, u64vec2);" + "bvec3 lessThanEqual(u64vec3, u64vec3);" + "bvec4 lessThanEqual(u64vec4, u64vec4);" + + "bvec2 greaterThan(i64vec2, i64vec2);" + "bvec3 greaterThan(i64vec3, i64vec3);" + "bvec4 greaterThan(i64vec4, i64vec4);" + "bvec2 greaterThan(u64vec2, u64vec2);" + "bvec3 greaterThan(u64vec3, u64vec3);" + "bvec4 greaterThan(u64vec4, u64vec4);" + + "bvec2 greaterThanEqual(i64vec2, i64vec2);" + "bvec3 greaterThanEqual(i64vec3, i64vec3);" + "bvec4 greaterThanEqual(i64vec4, i64vec4);" + "bvec2 greaterThanEqual(u64vec2, u64vec2);" + "bvec3 greaterThanEqual(u64vec3, u64vec3);" + "bvec4 greaterThanEqual(u64vec4, u64vec4);" + + "bvec2 equal(i64vec2, i64vec2);" + "bvec3 equal(i64vec3, i64vec3);" + "bvec4 equal(i64vec4, i64vec4);" + "bvec2 equal(u64vec2, u64vec2);" + "bvec3 equal(u64vec3, u64vec3);" + "bvec4 equal(u64vec4, u64vec4);" + + "bvec2 notEqual(i64vec2, i64vec2);" + "bvec3 notEqual(i64vec3, i64vec3);" + "bvec4 notEqual(i64vec4, i64vec4);" + "bvec2 notEqual(u64vec2, u64vec2);" + "bvec3 notEqual(u64vec3, u64vec3);" + "bvec4 notEqual(u64vec4, u64vec4);" + + "int64_t findLSB(int64_t);" + "i64vec2 findLSB(i64vec2);" + "i64vec3 findLSB(i64vec3);" + "i64vec4 findLSB(i64vec4);" + + "int64_t findLSB(uint64_t);" + "i64vec2 findLSB(u64vec2);" + "i64vec3 findLSB(u64vec3);" + "i64vec4 findLSB(u64vec4);" + + "int64_t findMSB(int64_t);" + "i64vec2 findMSB(i64vec2);" + "i64vec3 findMSB(i64vec3);" + "i64vec4 findMSB(i64vec4);" + + "int64_t findMSB(uint64_t);" + "i64vec2 findMSB(u64vec2);" + "i64vec3 findMSB(u64vec3);" + "i64vec4 findMSB(u64vec4);" + + "\n" + ); + } + + // GL_AMD_shader_trinary_minmax + if (profile != EEsProfile && version >= 430) { + commonBuiltins.append( + "float min3(float, float, float);" + "vec2 min3(vec2, vec2, vec2);" + "vec3 min3(vec3, vec3, vec3);" + "vec4 min3(vec4, vec4, vec4);" + + "int min3(int, int, int);" + "ivec2 min3(ivec2, ivec2, ivec2);" + "ivec3 min3(ivec3, ivec3, ivec3);" + "ivec4 min3(ivec4, ivec4, ivec4);" + + "uint min3(uint, uint, uint);" + "uvec2 min3(uvec2, uvec2, uvec2);" + "uvec3 min3(uvec3, uvec3, uvec3);" + "uvec4 min3(uvec4, uvec4, uvec4);" + + "float max3(float, float, float);" + "vec2 max3(vec2, vec2, vec2);" + "vec3 max3(vec3, vec3, vec3);" + "vec4 max3(vec4, vec4, vec4);" + + "int max3(int, int, int);" + "ivec2 max3(ivec2, ivec2, ivec2);" + "ivec3 max3(ivec3, ivec3, ivec3);" + "ivec4 max3(ivec4, ivec4, ivec4);" + + "uint max3(uint, uint, uint);" + "uvec2 max3(uvec2, uvec2, uvec2);" + "uvec3 max3(uvec3, uvec3, uvec3);" + "uvec4 max3(uvec4, uvec4, uvec4);" + + "float mid3(float, float, float);" + "vec2 mid3(vec2, vec2, vec2);" + "vec3 mid3(vec3, vec3, vec3);" + "vec4 mid3(vec4, vec4, vec4);" + + "int mid3(int, int, int);" + "ivec2 mid3(ivec2, ivec2, ivec2);" + "ivec3 mid3(ivec3, ivec3, ivec3);" + "ivec4 mid3(ivec4, ivec4, ivec4);" + + "uint mid3(uint, uint, uint);" + "uvec2 mid3(uvec2, uvec2, uvec2);" + "uvec3 mid3(uvec3, uvec3, uvec3);" + "uvec4 mid3(uvec4, uvec4, uvec4);" + + "float16_t min3(float16_t, float16_t, float16_t);" + "f16vec2 min3(f16vec2, f16vec2, f16vec2);" + "f16vec3 min3(f16vec3, f16vec3, f16vec3);" + "f16vec4 min3(f16vec4, f16vec4, f16vec4);" + + "float16_t max3(float16_t, float16_t, float16_t);" + "f16vec2 max3(f16vec2, f16vec2, f16vec2);" + "f16vec3 max3(f16vec3, f16vec3, f16vec3);" + "f16vec4 max3(f16vec4, f16vec4, f16vec4);" + + "float16_t mid3(float16_t, float16_t, float16_t);" + "f16vec2 mid3(f16vec2, f16vec2, f16vec2);" + "f16vec3 mid3(f16vec3, f16vec3, f16vec3);" + "f16vec4 mid3(f16vec4, f16vec4, f16vec4);" + + "int16_t min3(int16_t, int16_t, int16_t);" + "i16vec2 min3(i16vec2, i16vec2, i16vec2);" + "i16vec3 min3(i16vec3, i16vec3, i16vec3);" + "i16vec4 min3(i16vec4, i16vec4, i16vec4);" + + "int16_t max3(int16_t, int16_t, int16_t);" + "i16vec2 max3(i16vec2, i16vec2, i16vec2);" + "i16vec3 max3(i16vec3, i16vec3, i16vec3);" + "i16vec4 max3(i16vec4, i16vec4, i16vec4);" + + "int16_t mid3(int16_t, int16_t, int16_t);" + "i16vec2 mid3(i16vec2, i16vec2, i16vec2);" + "i16vec3 mid3(i16vec3, i16vec3, i16vec3);" + "i16vec4 mid3(i16vec4, i16vec4, i16vec4);" + + "uint16_t min3(uint16_t, uint16_t, uint16_t);" + "u16vec2 min3(u16vec2, u16vec2, u16vec2);" + "u16vec3 min3(u16vec3, u16vec3, u16vec3);" + "u16vec4 min3(u16vec4, u16vec4, u16vec4);" + + "uint16_t max3(uint16_t, uint16_t, uint16_t);" + "u16vec2 max3(u16vec2, u16vec2, u16vec2);" + "u16vec3 max3(u16vec3, u16vec3, u16vec3);" + "u16vec4 max3(u16vec4, u16vec4, u16vec4);" + + "uint16_t mid3(uint16_t, uint16_t, uint16_t);" + "u16vec2 mid3(u16vec2, u16vec2, u16vec2);" + "u16vec3 mid3(u16vec3, u16vec3, u16vec3);" + "u16vec4 mid3(u16vec4, u16vec4, u16vec4);" + + "\n" + ); + } +#endif // !GLSLANG_ANGLE + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 430)) { + commonBuiltins.append( + "uint atomicAdd(coherent volatile inout uint, uint, int, int, int);" + " int atomicAdd(coherent volatile inout int, int, int, int, int);" + + "uint atomicMin(coherent volatile inout uint, uint, int, int, int);" + " int atomicMin(coherent volatile inout int, int, int, int, int);" + + "uint atomicMax(coherent volatile inout uint, uint, int, int, int);" + " int atomicMax(coherent volatile inout int, int, int, int, int);" + + "uint atomicAnd(coherent volatile inout uint, uint, int, int, int);" + " int atomicAnd(coherent volatile inout int, int, int, int, int);" + + "uint atomicOr (coherent volatile inout uint, uint, int, int, int);" + " int atomicOr (coherent volatile inout int, int, int, int, int);" + + "uint atomicXor(coherent volatile inout uint, uint, int, int, int);" + " int atomicXor(coherent volatile inout int, int, int, int, int);" + + "uint atomicExchange(coherent volatile inout uint, uint, int, int, int);" + " int atomicExchange(coherent volatile inout int, int, int, int, int);" + + "uint atomicCompSwap(coherent volatile inout uint, uint, uint, int, int, int, int, int);" + " int atomicCompSwap(coherent volatile inout int, int, int, int, int, int, int, int);" + + "uint atomicLoad(coherent volatile in uint, int, int, int);" + " int atomicLoad(coherent volatile in int, int, int, int);" + + "void atomicStore(coherent volatile out uint, uint, int, int, int);" + "void atomicStore(coherent volatile out int, int, int, int, int);" + + "\n"); + } + +#ifndef GLSLANG_ANGLE + if (profile != EEsProfile && version >= 440) { + commonBuiltins.append( + "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicMin(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicMin(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicMax(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicMax(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicAnd(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicAnd(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicOr (coherent volatile inout int64_t, int64_t);" + "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicOr (coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicXor(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicXor(coherent volatile inout int64_t, int64_t, int, int, int);" + + "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicAdd(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicAdd(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicAdd(coherent volatile inout int64_t, int64_t, int, int, int);" + " float atomicAdd(coherent volatile inout float, float);" + " float atomicAdd(coherent volatile inout float, float, int, int, int);" + " double atomicAdd(coherent volatile inout double, double);" + " double atomicAdd(coherent volatile inout double, double, int, int, int);" + + "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t);" + " int64_t atomicExchange(coherent volatile inout int64_t, int64_t);" + "uint64_t atomicExchange(coherent volatile inout uint64_t, uint64_t, int, int, int);" + " int64_t atomicExchange(coherent volatile inout int64_t, int64_t, int, int, int);" + " float atomicExchange(coherent volatile inout float, float);" + " float atomicExchange(coherent volatile inout float, float, int, int, int);" + " double atomicExchange(coherent volatile inout double, double);" + " double atomicExchange(coherent volatile inout double, double, int, int, int);" + + "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t);" + " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);" + "uint64_t atomicCompSwap(coherent volatile inout uint64_t, uint64_t, uint64_t, int, int, int, int, int);" + " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t, int, int, int, int, int);" + + "uint64_t atomicLoad(coherent volatile in uint64_t, int, int, int);" + " int64_t atomicLoad(coherent volatile in int64_t, int, int, int);" + " float atomicLoad(coherent volatile in float, int, int, int);" + " double atomicLoad(coherent volatile in double, int, int, int);" + + "void atomicStore(coherent volatile out uint64_t, uint64_t, int, int, int);" + "void atomicStore(coherent volatile out int64_t, int64_t, int, int, int);" + "void atomicStore(coherent volatile out float, float, int, int, int);" + "void atomicStore(coherent volatile out double, double, int, int, int);" + "\n"); + } +#endif // !GLSLANG_ANGLE +#endif // !GLSLANG_WEB + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { // GL_ARB_shader_bit_encoding + commonBuiltins.append( + "int floatBitsToInt(highp float value);" + "ivec2 floatBitsToInt(highp vec2 value);" + "ivec3 floatBitsToInt(highp vec3 value);" + "ivec4 floatBitsToInt(highp vec4 value);" + + "uint floatBitsToUint(highp float value);" + "uvec2 floatBitsToUint(highp vec2 value);" + "uvec3 floatBitsToUint(highp vec3 value);" + "uvec4 floatBitsToUint(highp vec4 value);" + + "float intBitsToFloat(highp int value);" + "vec2 intBitsToFloat(highp ivec2 value);" + "vec3 intBitsToFloat(highp ivec3 value);" + "vec4 intBitsToFloat(highp ivec4 value);" + + "float uintBitsToFloat(highp uint value);" + "vec2 uintBitsToFloat(highp uvec2 value);" + "vec3 uintBitsToFloat(highp uvec3 value);" + "vec4 uintBitsToFloat(highp uvec4 value);" + + "\n"); + } + +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 400) || + (profile == EEsProfile && version >= 310)) { // GL_OES_gpu_shader5 + + commonBuiltins.append( + "float fma(float, float, float );" + "vec2 fma(vec2, vec2, vec2 );" + "vec3 fma(vec3, vec3, vec3 );" + "vec4 fma(vec4, vec4, vec4 );" + "\n"); + } + +#ifndef GLSLANG_ANGLE + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + "double fma(double, double, double);" + "dvec2 fma(dvec2, dvec2, dvec2 );" + "dvec3 fma(dvec3, dvec3, dvec3 );" + "dvec4 fma(dvec4, dvec4, dvec4 );" + "\n"); + } +#endif + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + "float frexp(highp float, out highp int);" + "vec2 frexp(highp vec2, out highp ivec2);" + "vec3 frexp(highp vec3, out highp ivec3);" + "vec4 frexp(highp vec4, out highp ivec4);" + + "float ldexp(highp float, highp int);" + "vec2 ldexp(highp vec2, highp ivec2);" + "vec3 ldexp(highp vec3, highp ivec3);" + "vec4 ldexp(highp vec4, highp ivec4);" + + "\n"); + } + +#ifndef GLSLANG_ANGLE + if (profile != EEsProfile && version >= 150) { // ARB_gpu_shader_fp64 + commonBuiltins.append( + "double frexp(double, out int);" + "dvec2 frexp( dvec2, out ivec2);" + "dvec3 frexp( dvec3, out ivec3);" + "dvec4 frexp( dvec4, out ivec4);" + + "double ldexp(double, int);" + "dvec2 ldexp( dvec2, ivec2);" + "dvec3 ldexp( dvec3, ivec3);" + "dvec4 ldexp( dvec4, ivec4);" + + "double packDouble2x32(uvec2);" + "uvec2 unpackDouble2x32(double);" + + "\n"); + } +#endif +#endif + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packUnorm2x16(vec2);" + "vec2 unpackUnorm2x16(highp uint);" + "\n"); + } + + if ((profile == EEsProfile && version >= 300) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packSnorm2x16(vec2);" + " vec2 unpackSnorm2x16(highp uint);" + "highp uint packHalf2x16(vec2);" + "\n"); + } + + if (profile == EEsProfile && version >= 300) { + commonBuiltins.append( + "mediump vec2 unpackHalf2x16(highp uint);" + "\n"); + } else if (profile != EEsProfile && version >= 150) { + commonBuiltins.append( + " vec2 unpackHalf2x16(highp uint);" + "\n"); + } + +#ifndef GLSLANG_WEB + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 150)) { + commonBuiltins.append( + "highp uint packSnorm4x8(vec4);" + "highp uint packUnorm4x8(vec4);" + "\n"); + } + + if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "mediump vec4 unpackSnorm4x8(highp uint);" + "mediump vec4 unpackUnorm4x8(highp uint);" + "\n"); + } else if (profile != EEsProfile && version >= 150) { + commonBuiltins.append( + "vec4 unpackSnorm4x8(highp uint);" + "vec4 unpackUnorm4x8(highp uint);" + "\n"); + } +#endif + + // + // Matrix Functions. + // + commonBuiltins.append( + "mat2 matrixCompMult(mat2 x, mat2 y);" + "mat3 matrixCompMult(mat3 x, mat3 y);" + "mat4 matrixCompMult(mat4 x, mat4 y);" + + "\n"); + + // 120 is correct for both ES and desktop + if (version >= 120) { + commonBuiltins.append( + "mat2 outerProduct(vec2 c, vec2 r);" + "mat3 outerProduct(vec3 c, vec3 r);" + "mat4 outerProduct(vec4 c, vec4 r);" + "mat2x3 outerProduct(vec3 c, vec2 r);" + "mat3x2 outerProduct(vec2 c, vec3 r);" + "mat2x4 outerProduct(vec4 c, vec2 r);" + "mat4x2 outerProduct(vec2 c, vec4 r);" + "mat3x4 outerProduct(vec4 c, vec3 r);" + "mat4x3 outerProduct(vec3 c, vec4 r);" + + "mat2 transpose(mat2 m);" + "mat3 transpose(mat3 m);" + "mat4 transpose(mat4 m);" + "mat2x3 transpose(mat3x2 m);" + "mat3x2 transpose(mat2x3 m);" + "mat2x4 transpose(mat4x2 m);" + "mat4x2 transpose(mat2x4 m);" + "mat3x4 transpose(mat4x3 m);" + "mat4x3 transpose(mat3x4 m);" + + "mat2x3 matrixCompMult(mat2x3, mat2x3);" + "mat2x4 matrixCompMult(mat2x4, mat2x4);" + "mat3x2 matrixCompMult(mat3x2, mat3x2);" + "mat3x4 matrixCompMult(mat3x4, mat3x4);" + "mat4x2 matrixCompMult(mat4x2, mat4x2);" + "mat4x3 matrixCompMult(mat4x3, mat4x3);" + + "\n"); + + // 150 is correct for both ES and desktop + if (version >= 150) { + commonBuiltins.append( + "float determinant(mat2 m);" + "float determinant(mat3 m);" + "float determinant(mat4 m);" + + "mat2 inverse(mat2 m);" + "mat3 inverse(mat3 m);" + "mat4 inverse(mat4 m);" + + "\n"); + } + } + +#ifndef GLSLANG_WEB +#ifndef GLSLANG_ANGLE + // + // Original-style texture functions existing in all stages. + // (Per-stage functions below.) + // + if ((profile == EEsProfile && version == 100) || + profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + commonBuiltins.append( + "vec4 texture2D(sampler2D, vec2);" + + "vec4 texture2DProj(sampler2D, vec3);" + "vec4 texture2DProj(sampler2D, vec4);" + + "vec4 texture3D(sampler3D, vec3);" // OES_texture_3D, but caught by keyword check + "vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check + + "vec4 textureCube(samplerCube, vec3);" + + "\n"); + } + } + + if ( profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + commonBuiltins.append( + "vec4 texture1D(sampler1D, float);" + + "vec4 texture1DProj(sampler1D, vec2);" + "vec4 texture1DProj(sampler1D, vec4);" + + "vec4 shadow1D(sampler1DShadow, vec3);" + "vec4 shadow2D(sampler2DShadow, vec3);" + "vec4 shadow1DProj(sampler1DShadow, vec4);" + "vec4 shadow2DProj(sampler2DShadow, vec4);" + + "vec4 texture2DRect(sampler2DRect, vec2);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 texture2DRectProj(sampler2DRect, vec3);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 texture2DRectProj(sampler2DRect, vec4);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 shadow2DRect(sampler2DRectShadow, vec3);" // GL_ARB_texture_rectangle, caught by keyword check + "vec4 shadow2DRectProj(sampler2DRectShadow, vec4);" // GL_ARB_texture_rectangle, caught by keyword check + + "\n"); + } + } + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + if (version < 300) { + commonBuiltins.append( + "vec4 texture2D(samplerExternalOES, vec2 coord);" // GL_OES_EGL_image_external + "vec4 texture2DProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external + "vec4 texture2DProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external + "\n"); + } else { + commonBuiltins.append( + "highp ivec2 textureSize(samplerExternalOES, int lod);" // GL_OES_EGL_image_external_essl3 + "vec4 texture(samplerExternalOES, vec2);" // GL_OES_EGL_image_external_essl3 + "vec4 texture(samplerExternalOES, vec2, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec3, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external_essl3 + "vec4 textureProj(samplerExternalOES, vec4, float bias);" // GL_OES_EGL_image_external_essl3 + "vec4 texelFetch(samplerExternalOES, ivec2, int lod);" // GL_OES_EGL_image_external_essl3 + "\n"); + } + commonBuiltins.append( + "highp ivec2 textureSize(__samplerExternal2DY2YEXT, int lod);" // GL_EXT_YUV_target + "vec4 texture(__samplerExternal2DY2YEXT, vec2);" // GL_EXT_YUV_target + "vec4 texture(__samplerExternal2DY2YEXT, vec2, float bias);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec3);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec3, float bias);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec4);" // GL_EXT_YUV_target + "vec4 textureProj(__samplerExternal2DY2YEXT, vec4, float bias);" // GL_EXT_YUV_target + "vec4 texelFetch(__samplerExternal2DY2YEXT sampler, ivec2, int lod);" // GL_EXT_YUV_target + "\n"); + commonBuiltins.append( + "vec4 texture2DGradEXT(sampler2D, vec2, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjGradEXT(sampler2D, vec3, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjGradEXT(sampler2D, vec4, vec2, vec2);" // GL_EXT_shader_texture_lod + "vec4 textureCubeGradEXT(samplerCube, vec3, vec3, vec3);" // GL_EXT_shader_texture_lod + + "float shadow2DEXT(sampler2DShadow, vec3);" // GL_EXT_shadow_samplers + "float shadow2DProjEXT(sampler2DShadow, vec4);" // GL_EXT_shadow_samplers + + "\n"); + } + } + + // + // Noise functions. + // + if (spvVersion.spv == 0 && profile != EEsProfile) { + commonBuiltins.append( + "float noise1(float x);" + "float noise1(vec2 x);" + "float noise1(vec3 x);" + "float noise1(vec4 x);" + + "vec2 noise2(float x);" + "vec2 noise2(vec2 x);" + "vec2 noise2(vec3 x);" + "vec2 noise2(vec4 x);" + + "vec3 noise3(float x);" + "vec3 noise3(vec2 x);" + "vec3 noise3(vec3 x);" + "vec3 noise3(vec4 x);" + + "vec4 noise4(float x);" + "vec4 noise4(vec2 x);" + "vec4 noise4(vec3 x);" + "vec4 noise4(vec4 x);" + + "\n"); + } + + if (spvVersion.vulkan == 0) { + // + // Atomic counter functions. + // + if ((profile != EEsProfile && version >= 300) || + (profile == EEsProfile && version >= 310)) { + commonBuiltins.append( + "uint atomicCounterIncrement(atomic_uint);" + "uint atomicCounterDecrement(atomic_uint);" + "uint atomicCounter(atomic_uint);" + + "\n"); + } + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append( + "uint atomicCounterAdd(atomic_uint, uint);" + "uint atomicCounterSubtract(atomic_uint, uint);" + "uint atomicCounterMin(atomic_uint, uint);" + "uint atomicCounterMax(atomic_uint, uint);" + "uint atomicCounterAnd(atomic_uint, uint);" + "uint atomicCounterOr(atomic_uint, uint);" + "uint atomicCounterXor(atomic_uint, uint);" + "uint atomicCounterExchange(atomic_uint, uint);" + "uint atomicCounterCompSwap(atomic_uint, uint, uint);" + + "\n"); + } + } +#endif // !GLSLANG_ANGLE + + // Bitfield + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + " int bitfieldExtract( int, int, int);" + "ivec2 bitfieldExtract(ivec2, int, int);" + "ivec3 bitfieldExtract(ivec3, int, int);" + "ivec4 bitfieldExtract(ivec4, int, int);" + + " uint bitfieldExtract( uint, int, int);" + "uvec2 bitfieldExtract(uvec2, int, int);" + "uvec3 bitfieldExtract(uvec3, int, int);" + "uvec4 bitfieldExtract(uvec4, int, int);" + + " int bitfieldInsert( int base, int, int, int);" + "ivec2 bitfieldInsert(ivec2 base, ivec2, int, int);" + "ivec3 bitfieldInsert(ivec3 base, ivec3, int, int);" + "ivec4 bitfieldInsert(ivec4 base, ivec4, int, int);" + + " uint bitfieldInsert( uint base, uint, int, int);" + "uvec2 bitfieldInsert(uvec2 base, uvec2, int, int);" + "uvec3 bitfieldInsert(uvec3 base, uvec3, int, int);" + "uvec4 bitfieldInsert(uvec4 base, uvec4, int, int);" + + "\n"); + } + + if (profile != EEsProfile && version >= 400) { + commonBuiltins.append( + " int findLSB( int);" + "ivec2 findLSB(ivec2);" + "ivec3 findLSB(ivec3);" + "ivec4 findLSB(ivec4);" + + " int findLSB( uint);" + "ivec2 findLSB(uvec2);" + "ivec3 findLSB(uvec3);" + "ivec4 findLSB(uvec4);" + + "\n"); + } else if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "lowp int findLSB( int);" + "lowp ivec2 findLSB(ivec2);" + "lowp ivec3 findLSB(ivec3);" + "lowp ivec4 findLSB(ivec4);" + + "lowp int findLSB( uint);" + "lowp ivec2 findLSB(uvec2);" + "lowp ivec3 findLSB(uvec3);" + "lowp ivec4 findLSB(uvec4);" + + "\n"); + } + + if (profile != EEsProfile && version >= 400) { + commonBuiltins.append( + " int bitCount( int);" + "ivec2 bitCount(ivec2);" + "ivec3 bitCount(ivec3);" + "ivec4 bitCount(ivec4);" + + " int bitCount( uint);" + "ivec2 bitCount(uvec2);" + "ivec3 bitCount(uvec3);" + "ivec4 bitCount(uvec4);" + + " int findMSB(highp int);" + "ivec2 findMSB(highp ivec2);" + "ivec3 findMSB(highp ivec3);" + "ivec4 findMSB(highp ivec4);" + + " int findMSB(highp uint);" + "ivec2 findMSB(highp uvec2);" + "ivec3 findMSB(highp uvec3);" + "ivec4 findMSB(highp uvec4);" + + "\n"); + } + + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + commonBuiltins.append( + " uint uaddCarry(highp uint, highp uint, out lowp uint carry);" + "uvec2 uaddCarry(highp uvec2, highp uvec2, out lowp uvec2 carry);" + "uvec3 uaddCarry(highp uvec3, highp uvec3, out lowp uvec3 carry);" + "uvec4 uaddCarry(highp uvec4, highp uvec4, out lowp uvec4 carry);" + + " uint usubBorrow(highp uint, highp uint, out lowp uint borrow);" + "uvec2 usubBorrow(highp uvec2, highp uvec2, out lowp uvec2 borrow);" + "uvec3 usubBorrow(highp uvec3, highp uvec3, out lowp uvec3 borrow);" + "uvec4 usubBorrow(highp uvec4, highp uvec4, out lowp uvec4 borrow);" + + "void umulExtended(highp uint, highp uint, out highp uint, out highp uint lsb);" + "void umulExtended(highp uvec2, highp uvec2, out highp uvec2, out highp uvec2 lsb);" + "void umulExtended(highp uvec3, highp uvec3, out highp uvec3, out highp uvec3 lsb);" + "void umulExtended(highp uvec4, highp uvec4, out highp uvec4, out highp uvec4 lsb);" + + "void imulExtended(highp int, highp int, out highp int, out highp int lsb);" + "void imulExtended(highp ivec2, highp ivec2, out highp ivec2, out highp ivec2 lsb);" + "void imulExtended(highp ivec3, highp ivec3, out highp ivec3, out highp ivec3 lsb);" + "void imulExtended(highp ivec4, highp ivec4, out highp ivec4, out highp ivec4 lsb);" + + " int bitfieldReverse(highp int);" + "ivec2 bitfieldReverse(highp ivec2);" + "ivec3 bitfieldReverse(highp ivec3);" + "ivec4 bitfieldReverse(highp ivec4);" + + " uint bitfieldReverse(highp uint);" + "uvec2 bitfieldReverse(highp uvec2);" + "uvec3 bitfieldReverse(highp uvec3);" + "uvec4 bitfieldReverse(highp uvec4);" + + "\n"); + } + + if (profile == EEsProfile && version >= 310) { + commonBuiltins.append( + "lowp int bitCount( int);" + "lowp ivec2 bitCount(ivec2);" + "lowp ivec3 bitCount(ivec3);" + "lowp ivec4 bitCount(ivec4);" + + "lowp int bitCount( uint);" + "lowp ivec2 bitCount(uvec2);" + "lowp ivec3 bitCount(uvec3);" + "lowp ivec4 bitCount(uvec4);" + + "lowp int findMSB(highp int);" + "lowp ivec2 findMSB(highp ivec2);" + "lowp ivec3 findMSB(highp ivec3);" + "lowp ivec4 findMSB(highp ivec4);" + + "lowp int findMSB(highp uint);" + "lowp ivec2 findMSB(highp uvec2);" + "lowp ivec3 findMSB(highp uvec3);" + "lowp ivec4 findMSB(highp uvec4);" + + "\n"); + } + +#ifndef GLSLANG_ANGLE + // GL_ARB_shader_ballot + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uint64_t ballotARB(bool);" + + "float readInvocationARB(float, uint);" + "vec2 readInvocationARB(vec2, uint);" + "vec3 readInvocationARB(vec3, uint);" + "vec4 readInvocationARB(vec4, uint);" + + "int readInvocationARB(int, uint);" + "ivec2 readInvocationARB(ivec2, uint);" + "ivec3 readInvocationARB(ivec3, uint);" + "ivec4 readInvocationARB(ivec4, uint);" + + "uint readInvocationARB(uint, uint);" + "uvec2 readInvocationARB(uvec2, uint);" + "uvec3 readInvocationARB(uvec3, uint);" + "uvec4 readInvocationARB(uvec4, uint);" + + "float readFirstInvocationARB(float);" + "vec2 readFirstInvocationARB(vec2);" + "vec3 readFirstInvocationARB(vec3);" + "vec4 readFirstInvocationARB(vec4);" + + "int readFirstInvocationARB(int);" + "ivec2 readFirstInvocationARB(ivec2);" + "ivec3 readFirstInvocationARB(ivec3);" + "ivec4 readFirstInvocationARB(ivec4);" + + "uint readFirstInvocationARB(uint);" + "uvec2 readFirstInvocationARB(uvec2);" + "uvec3 readFirstInvocationARB(uvec3);" + "uvec4 readFirstInvocationARB(uvec4);" + + "\n"); + } + + // GL_ARB_shader_group_vote + if (profile != EEsProfile && version >= 430) { + commonBuiltins.append( + "bool anyInvocationARB(bool);" + "bool allInvocationsARB(bool);" + "bool allInvocationsEqualARB(bool);" + + "\n"); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + commonBuiltins.append( + "void subgroupBarrier();" + "void subgroupMemoryBarrier();" + "void subgroupMemoryBarrierBuffer();" + "void subgroupMemoryBarrierImage();" + "bool subgroupElect();" + + "bool subgroupAll(bool);\n" + "bool subgroupAny(bool);\n" + "uvec4 subgroupBallot(bool);\n" + "bool subgroupInverseBallot(uvec4);\n" + "bool subgroupBallotBitExtract(uvec4, uint);\n" + "uint subgroupBallotBitCount(uvec4);\n" + "uint subgroupBallotInclusiveBitCount(uvec4);\n" + "uint subgroupBallotExclusiveBitCount(uvec4);\n" + "uint subgroupBallotFindLSB(uvec4);\n" + "uint subgroupBallotFindMSB(uvec4);\n" + ); + + // Generate all flavors of subgroup ops. + static const char *subgroupOps[] = + { + "bool subgroupAllEqual(%s);\n", + "%s subgroupBroadcast(%s, uint);\n", + "%s subgroupBroadcastFirst(%s);\n", + "%s subgroupShuffle(%s, uint);\n", + "%s subgroupShuffleXor(%s, uint);\n", + "%s subgroupShuffleUp(%s, uint delta);\n", + "%s subgroupShuffleDown(%s, uint delta);\n", + "%s subgroupAdd(%s);\n", + "%s subgroupMul(%s);\n", + "%s subgroupMin(%s);\n", + "%s subgroupMax(%s);\n", + "%s subgroupAnd(%s);\n", + "%s subgroupOr(%s);\n", + "%s subgroupXor(%s);\n", + "%s subgroupInclusiveAdd(%s);\n", + "%s subgroupInclusiveMul(%s);\n", + "%s subgroupInclusiveMin(%s);\n", + "%s subgroupInclusiveMax(%s);\n", + "%s subgroupInclusiveAnd(%s);\n", + "%s subgroupInclusiveOr(%s);\n", + "%s subgroupInclusiveXor(%s);\n", + "%s subgroupExclusiveAdd(%s);\n", + "%s subgroupExclusiveMul(%s);\n", + "%s subgroupExclusiveMin(%s);\n", + "%s subgroupExclusiveMax(%s);\n", + "%s subgroupExclusiveAnd(%s);\n", + "%s subgroupExclusiveOr(%s);\n", + "%s subgroupExclusiveXor(%s);\n", + "%s subgroupClusteredAdd(%s, uint);\n", + "%s subgroupClusteredMul(%s, uint);\n", + "%s subgroupClusteredMin(%s, uint);\n", + "%s subgroupClusteredMax(%s, uint);\n", + "%s subgroupClusteredAnd(%s, uint);\n", + "%s subgroupClusteredOr(%s, uint);\n", + "%s subgroupClusteredXor(%s, uint);\n", + "%s subgroupQuadBroadcast(%s, uint);\n", + "%s subgroupQuadSwapHorizontal(%s);\n", + "%s subgroupQuadSwapVertical(%s);\n", + "%s subgroupQuadSwapDiagonal(%s);\n", + "uvec4 subgroupPartitionNV(%s);\n", + "%s subgroupPartitionedAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedXorNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedInclusiveXorNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveAddNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMulNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMinNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveMaxNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveAndNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveOrNV(%s, uvec4 ballot);\n", + "%s subgroupPartitionedExclusiveXorNV(%s, uvec4 ballot);\n", + }; + + static const char *floatTypes[] = { + "float", "vec2", "vec3", "vec4", + "float16_t", "f16vec2", "f16vec3", "f16vec4", + }; + static const char *doubleTypes[] = { + "double", "dvec2", "dvec3", "dvec4", + }; + static const char *intTypes[] = { + "int8_t", "i8vec2", "i8vec3", "i8vec4", + "int16_t", "i16vec2", "i16vec3", "i16vec4", + "int", "ivec2", "ivec3", "ivec4", + "int64_t", "i64vec2", "i64vec3", "i64vec4", + "uint8_t", "u8vec2", "u8vec3", "u8vec4", + "uint16_t", "u16vec2", "u16vec3", "u16vec4", + "uint", "uvec2", "uvec3", "uvec4", + "uint64_t", "u64vec2", "u64vec3", "u64vec4", + }; + static const char *boolTypes[] = { + "bool", "bvec2", "bvec3", "bvec4", + }; + + for (size_t i = 0; i < sizeof(subgroupOps)/sizeof(subgroupOps[0]); ++i) { + const char *op = subgroupOps[i]; + + // Logical operations don't support float + bool logicalOp = strstr(op, "Or") || strstr(op, "And") || + (strstr(op, "Xor") && !strstr(op, "ShuffleXor")); + // Math operations don't support bool + bool mathOp = strstr(op, "Add") || strstr(op, "Mul") || strstr(op, "Min") || strstr(op, "Max"); + + const int bufSize = 256; + char buf[bufSize]; + + if (!logicalOp) { + for (size_t j = 0; j < sizeof(floatTypes)/sizeof(floatTypes[0]); ++j) { + snprintf(buf, bufSize, op, floatTypes[j], floatTypes[j]); + commonBuiltins.append(buf); + } + if (profile != EEsProfile && version >= 400) { + for (size_t j = 0; j < sizeof(doubleTypes)/sizeof(doubleTypes[0]); ++j) { + snprintf(buf, bufSize, op, doubleTypes[j], doubleTypes[j]); + commonBuiltins.append(buf); + } + } + } + if (!mathOp) { + for (size_t j = 0; j < sizeof(boolTypes)/sizeof(boolTypes[0]); ++j) { + snprintf(buf, bufSize, op, boolTypes[j], boolTypes[j]); + commonBuiltins.append(buf); + } + } + for (size_t j = 0; j < sizeof(intTypes)/sizeof(intTypes[0]); ++j) { + snprintf(buf, bufSize, op, intTypes[j], intTypes[j]); + commonBuiltins.append(buf); + } + } + + stageBuiltins[EShLangCompute].append( + "void subgroupMemoryBarrierShared();" + + "\n" + ); + stageBuiltins[EShLangMeshNV].append( + "void subgroupMemoryBarrierShared();" + "\n" + ); + stageBuiltins[EShLangTaskNV].append( + "void subgroupMemoryBarrierShared();" + "\n" + ); + } + + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append( + "bool anyInvocation(bool);" + "bool allInvocations(bool);" + "bool allInvocationsEqual(bool);" + + "\n"); + } + + // GL_AMD_shader_ballot + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "float minInvocationsAMD(float);" + "vec2 minInvocationsAMD(vec2);" + "vec3 minInvocationsAMD(vec3);" + "vec4 minInvocationsAMD(vec4);" + + "int minInvocationsAMD(int);" + "ivec2 minInvocationsAMD(ivec2);" + "ivec3 minInvocationsAMD(ivec3);" + "ivec4 minInvocationsAMD(ivec4);" + + "uint minInvocationsAMD(uint);" + "uvec2 minInvocationsAMD(uvec2);" + "uvec3 minInvocationsAMD(uvec3);" + "uvec4 minInvocationsAMD(uvec4);" + + "double minInvocationsAMD(double);" + "dvec2 minInvocationsAMD(dvec2);" + "dvec3 minInvocationsAMD(dvec3);" + "dvec4 minInvocationsAMD(dvec4);" + + "int64_t minInvocationsAMD(int64_t);" + "i64vec2 minInvocationsAMD(i64vec2);" + "i64vec3 minInvocationsAMD(i64vec3);" + "i64vec4 minInvocationsAMD(i64vec4);" + + "uint64_t minInvocationsAMD(uint64_t);" + "u64vec2 minInvocationsAMD(u64vec2);" + "u64vec3 minInvocationsAMD(u64vec3);" + "u64vec4 minInvocationsAMD(u64vec4);" + + "float16_t minInvocationsAMD(float16_t);" + "f16vec2 minInvocationsAMD(f16vec2);" + "f16vec3 minInvocationsAMD(f16vec3);" + "f16vec4 minInvocationsAMD(f16vec4);" + + "int16_t minInvocationsAMD(int16_t);" + "i16vec2 minInvocationsAMD(i16vec2);" + "i16vec3 minInvocationsAMD(i16vec3);" + "i16vec4 minInvocationsAMD(i16vec4);" + + "uint16_t minInvocationsAMD(uint16_t);" + "u16vec2 minInvocationsAMD(u16vec2);" + "u16vec3 minInvocationsAMD(u16vec3);" + "u16vec4 minInvocationsAMD(u16vec4);" + + "float minInvocationsInclusiveScanAMD(float);" + "vec2 minInvocationsInclusiveScanAMD(vec2);" + "vec3 minInvocationsInclusiveScanAMD(vec3);" + "vec4 minInvocationsInclusiveScanAMD(vec4);" + + "int minInvocationsInclusiveScanAMD(int);" + "ivec2 minInvocationsInclusiveScanAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanAMD(ivec4);" + + "uint minInvocationsInclusiveScanAMD(uint);" + "uvec2 minInvocationsInclusiveScanAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanAMD(uvec4);" + + "double minInvocationsInclusiveScanAMD(double);" + "dvec2 minInvocationsInclusiveScanAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t minInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 minInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 minInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 minInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t minInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 minInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 minInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 minInvocationsInclusiveScanAMD(u16vec4);" + + "float minInvocationsExclusiveScanAMD(float);" + "vec2 minInvocationsExclusiveScanAMD(vec2);" + "vec3 minInvocationsExclusiveScanAMD(vec3);" + "vec4 minInvocationsExclusiveScanAMD(vec4);" + + "int minInvocationsExclusiveScanAMD(int);" + "ivec2 minInvocationsExclusiveScanAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanAMD(ivec4);" + + "uint minInvocationsExclusiveScanAMD(uint);" + "uvec2 minInvocationsExclusiveScanAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanAMD(uvec4);" + + "double minInvocationsExclusiveScanAMD(double);" + "dvec2 minInvocationsExclusiveScanAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t minInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 minInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 minInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 minInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t minInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 minInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 minInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 minInvocationsExclusiveScanAMD(u16vec4);" + + "float maxInvocationsAMD(float);" + "vec2 maxInvocationsAMD(vec2);" + "vec3 maxInvocationsAMD(vec3);" + "vec4 maxInvocationsAMD(vec4);" + + "int maxInvocationsAMD(int);" + "ivec2 maxInvocationsAMD(ivec2);" + "ivec3 maxInvocationsAMD(ivec3);" + "ivec4 maxInvocationsAMD(ivec4);" + + "uint maxInvocationsAMD(uint);" + "uvec2 maxInvocationsAMD(uvec2);" + "uvec3 maxInvocationsAMD(uvec3);" + "uvec4 maxInvocationsAMD(uvec4);" + + "double maxInvocationsAMD(double);" + "dvec2 maxInvocationsAMD(dvec2);" + "dvec3 maxInvocationsAMD(dvec3);" + "dvec4 maxInvocationsAMD(dvec4);" + + "int64_t maxInvocationsAMD(int64_t);" + "i64vec2 maxInvocationsAMD(i64vec2);" + "i64vec3 maxInvocationsAMD(i64vec3);" + "i64vec4 maxInvocationsAMD(i64vec4);" + + "uint64_t maxInvocationsAMD(uint64_t);" + "u64vec2 maxInvocationsAMD(u64vec2);" + "u64vec3 maxInvocationsAMD(u64vec3);" + "u64vec4 maxInvocationsAMD(u64vec4);" + + "float16_t maxInvocationsAMD(float16_t);" + "f16vec2 maxInvocationsAMD(f16vec2);" + "f16vec3 maxInvocationsAMD(f16vec3);" + "f16vec4 maxInvocationsAMD(f16vec4);" + + "int16_t maxInvocationsAMD(int16_t);" + "i16vec2 maxInvocationsAMD(i16vec2);" + "i16vec3 maxInvocationsAMD(i16vec3);" + "i16vec4 maxInvocationsAMD(i16vec4);" + + "uint16_t maxInvocationsAMD(uint16_t);" + "u16vec2 maxInvocationsAMD(u16vec2);" + "u16vec3 maxInvocationsAMD(u16vec3);" + "u16vec4 maxInvocationsAMD(u16vec4);" + + "float maxInvocationsInclusiveScanAMD(float);" + "vec2 maxInvocationsInclusiveScanAMD(vec2);" + "vec3 maxInvocationsInclusiveScanAMD(vec3);" + "vec4 maxInvocationsInclusiveScanAMD(vec4);" + + "int maxInvocationsInclusiveScanAMD(int);" + "ivec2 maxInvocationsInclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanAMD(ivec4);" + + "uint maxInvocationsInclusiveScanAMD(uint);" + "uvec2 maxInvocationsInclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanAMD(uvec4);" + + "double maxInvocationsInclusiveScanAMD(double);" + "dvec2 maxInvocationsInclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t maxInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 maxInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 maxInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 maxInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t maxInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 maxInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 maxInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 maxInvocationsInclusiveScanAMD(u16vec4);" + + "float maxInvocationsExclusiveScanAMD(float);" + "vec2 maxInvocationsExclusiveScanAMD(vec2);" + "vec3 maxInvocationsExclusiveScanAMD(vec3);" + "vec4 maxInvocationsExclusiveScanAMD(vec4);" + + "int maxInvocationsExclusiveScanAMD(int);" + "ivec2 maxInvocationsExclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanAMD(ivec4);" + + "uint maxInvocationsExclusiveScanAMD(uint);" + "uvec2 maxInvocationsExclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanAMD(uvec4);" + + "double maxInvocationsExclusiveScanAMD(double);" + "dvec2 maxInvocationsExclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t maxInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 maxInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 maxInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 maxInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t maxInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 maxInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 maxInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 maxInvocationsExclusiveScanAMD(u16vec4);" + + "float addInvocationsAMD(float);" + "vec2 addInvocationsAMD(vec2);" + "vec3 addInvocationsAMD(vec3);" + "vec4 addInvocationsAMD(vec4);" + + "int addInvocationsAMD(int);" + "ivec2 addInvocationsAMD(ivec2);" + "ivec3 addInvocationsAMD(ivec3);" + "ivec4 addInvocationsAMD(ivec4);" + + "uint addInvocationsAMD(uint);" + "uvec2 addInvocationsAMD(uvec2);" + "uvec3 addInvocationsAMD(uvec3);" + "uvec4 addInvocationsAMD(uvec4);" + + "double addInvocationsAMD(double);" + "dvec2 addInvocationsAMD(dvec2);" + "dvec3 addInvocationsAMD(dvec3);" + "dvec4 addInvocationsAMD(dvec4);" + + "int64_t addInvocationsAMD(int64_t);" + "i64vec2 addInvocationsAMD(i64vec2);" + "i64vec3 addInvocationsAMD(i64vec3);" + "i64vec4 addInvocationsAMD(i64vec4);" + + "uint64_t addInvocationsAMD(uint64_t);" + "u64vec2 addInvocationsAMD(u64vec2);" + "u64vec3 addInvocationsAMD(u64vec3);" + "u64vec4 addInvocationsAMD(u64vec4);" + + "float16_t addInvocationsAMD(float16_t);" + "f16vec2 addInvocationsAMD(f16vec2);" + "f16vec3 addInvocationsAMD(f16vec3);" + "f16vec4 addInvocationsAMD(f16vec4);" + + "int16_t addInvocationsAMD(int16_t);" + "i16vec2 addInvocationsAMD(i16vec2);" + "i16vec3 addInvocationsAMD(i16vec3);" + "i16vec4 addInvocationsAMD(i16vec4);" + + "uint16_t addInvocationsAMD(uint16_t);" + "u16vec2 addInvocationsAMD(u16vec2);" + "u16vec3 addInvocationsAMD(u16vec3);" + "u16vec4 addInvocationsAMD(u16vec4);" + + "float addInvocationsInclusiveScanAMD(float);" + "vec2 addInvocationsInclusiveScanAMD(vec2);" + "vec3 addInvocationsInclusiveScanAMD(vec3);" + "vec4 addInvocationsInclusiveScanAMD(vec4);" + + "int addInvocationsInclusiveScanAMD(int);" + "ivec2 addInvocationsInclusiveScanAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanAMD(ivec4);" + + "uint addInvocationsInclusiveScanAMD(uint);" + "uvec2 addInvocationsInclusiveScanAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanAMD(uvec4);" + + "double addInvocationsInclusiveScanAMD(double);" + "dvec2 addInvocationsInclusiveScanAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);" + + "int16_t addInvocationsInclusiveScanAMD(int16_t);" + "i16vec2 addInvocationsInclusiveScanAMD(i16vec2);" + "i16vec3 addInvocationsInclusiveScanAMD(i16vec3);" + "i16vec4 addInvocationsInclusiveScanAMD(i16vec4);" + + "uint16_t addInvocationsInclusiveScanAMD(uint16_t);" + "u16vec2 addInvocationsInclusiveScanAMD(u16vec2);" + "u16vec3 addInvocationsInclusiveScanAMD(u16vec3);" + "u16vec4 addInvocationsInclusiveScanAMD(u16vec4);" + + "float addInvocationsExclusiveScanAMD(float);" + "vec2 addInvocationsExclusiveScanAMD(vec2);" + "vec3 addInvocationsExclusiveScanAMD(vec3);" + "vec4 addInvocationsExclusiveScanAMD(vec4);" + + "int addInvocationsExclusiveScanAMD(int);" + "ivec2 addInvocationsExclusiveScanAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanAMD(ivec4);" + + "uint addInvocationsExclusiveScanAMD(uint);" + "uvec2 addInvocationsExclusiveScanAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanAMD(uvec4);" + + "double addInvocationsExclusiveScanAMD(double);" + "dvec2 addInvocationsExclusiveScanAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);" + + "int16_t addInvocationsExclusiveScanAMD(int16_t);" + "i16vec2 addInvocationsExclusiveScanAMD(i16vec2);" + "i16vec3 addInvocationsExclusiveScanAMD(i16vec3);" + "i16vec4 addInvocationsExclusiveScanAMD(i16vec4);" + + "uint16_t addInvocationsExclusiveScanAMD(uint16_t);" + "u16vec2 addInvocationsExclusiveScanAMD(u16vec2);" + "u16vec3 addInvocationsExclusiveScanAMD(u16vec3);" + "u16vec4 addInvocationsExclusiveScanAMD(u16vec4);" + + "float minInvocationsNonUniformAMD(float);" + "vec2 minInvocationsNonUniformAMD(vec2);" + "vec3 minInvocationsNonUniformAMD(vec3);" + "vec4 minInvocationsNonUniformAMD(vec4);" + + "int minInvocationsNonUniformAMD(int);" + "ivec2 minInvocationsNonUniformAMD(ivec2);" + "ivec3 minInvocationsNonUniformAMD(ivec3);" + "ivec4 minInvocationsNonUniformAMD(ivec4);" + + "uint minInvocationsNonUniformAMD(uint);" + "uvec2 minInvocationsNonUniformAMD(uvec2);" + "uvec3 minInvocationsNonUniformAMD(uvec3);" + "uvec4 minInvocationsNonUniformAMD(uvec4);" + + "double minInvocationsNonUniformAMD(double);" + "dvec2 minInvocationsNonUniformAMD(dvec2);" + "dvec3 minInvocationsNonUniformAMD(dvec3);" + "dvec4 minInvocationsNonUniformAMD(dvec4);" + + "int64_t minInvocationsNonUniformAMD(int64_t);" + "i64vec2 minInvocationsNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsNonUniformAMD(u64vec4);" + + "float16_t minInvocationsNonUniformAMD(float16_t);" + "f16vec2 minInvocationsNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsNonUniformAMD(f16vec4);" + + "int16_t minInvocationsNonUniformAMD(int16_t);" + "i16vec2 minInvocationsNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsNonUniformAMD(u16vec4);" + + "float minInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t minInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 minInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float minInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t minInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 minInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 minInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 minInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t minInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 minInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 minInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 minInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float maxInvocationsNonUniformAMD(float);" + "vec2 maxInvocationsNonUniformAMD(vec2);" + "vec3 maxInvocationsNonUniformAMD(vec3);" + "vec4 maxInvocationsNonUniformAMD(vec4);" + + "int maxInvocationsNonUniformAMD(int);" + "ivec2 maxInvocationsNonUniformAMD(ivec2);" + "ivec3 maxInvocationsNonUniformAMD(ivec3);" + "ivec4 maxInvocationsNonUniformAMD(ivec4);" + + "uint maxInvocationsNonUniformAMD(uint);" + "uvec2 maxInvocationsNonUniformAMD(uvec2);" + "uvec3 maxInvocationsNonUniformAMD(uvec3);" + "uvec4 maxInvocationsNonUniformAMD(uvec4);" + + "double maxInvocationsNonUniformAMD(double);" + "dvec2 maxInvocationsNonUniformAMD(dvec2);" + "dvec3 maxInvocationsNonUniformAMD(dvec3);" + "dvec4 maxInvocationsNonUniformAMD(dvec4);" + + "int64_t maxInvocationsNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsNonUniformAMD(u16vec4);" + + "float maxInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float maxInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t maxInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 maxInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 maxInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 maxInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t maxInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 maxInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 maxInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 maxInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float addInvocationsNonUniformAMD(float);" + "vec2 addInvocationsNonUniformAMD(vec2);" + "vec3 addInvocationsNonUniformAMD(vec3);" + "vec4 addInvocationsNonUniformAMD(vec4);" + + "int addInvocationsNonUniformAMD(int);" + "ivec2 addInvocationsNonUniformAMD(ivec2);" + "ivec3 addInvocationsNonUniformAMD(ivec3);" + "ivec4 addInvocationsNonUniformAMD(ivec4);" + + "uint addInvocationsNonUniformAMD(uint);" + "uvec2 addInvocationsNonUniformAMD(uvec2);" + "uvec3 addInvocationsNonUniformAMD(uvec3);" + "uvec4 addInvocationsNonUniformAMD(uvec4);" + + "double addInvocationsNonUniformAMD(double);" + "dvec2 addInvocationsNonUniformAMD(dvec2);" + "dvec3 addInvocationsNonUniformAMD(dvec3);" + "dvec4 addInvocationsNonUniformAMD(dvec4);" + + "int64_t addInvocationsNonUniformAMD(int64_t);" + "i64vec2 addInvocationsNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsNonUniformAMD(u64vec4);" + + "float16_t addInvocationsNonUniformAMD(float16_t);" + "f16vec2 addInvocationsNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsNonUniformAMD(f16vec4);" + + "int16_t addInvocationsNonUniformAMD(int16_t);" + "i16vec2 addInvocationsNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsNonUniformAMD(u16vec4);" + + "float addInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "int16_t addInvocationsInclusiveScanNonUniformAMD(int16_t);" + "i16vec2 addInvocationsInclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsInclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsInclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsInclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsInclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsInclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsInclusiveScanNonUniformAMD(u16vec4);" + + "float addInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);" + + "int16_t addInvocationsExclusiveScanNonUniformAMD(int16_t);" + "i16vec2 addInvocationsExclusiveScanNonUniformAMD(i16vec2);" + "i16vec3 addInvocationsExclusiveScanNonUniformAMD(i16vec3);" + "i16vec4 addInvocationsExclusiveScanNonUniformAMD(i16vec4);" + + "uint16_t addInvocationsExclusiveScanNonUniformAMD(uint16_t);" + "u16vec2 addInvocationsExclusiveScanNonUniformAMD(u16vec2);" + "u16vec3 addInvocationsExclusiveScanNonUniformAMD(u16vec3);" + "u16vec4 addInvocationsExclusiveScanNonUniformAMD(u16vec4);" + + "float swizzleInvocationsAMD(float, uvec4);" + "vec2 swizzleInvocationsAMD(vec2, uvec4);" + "vec3 swizzleInvocationsAMD(vec3, uvec4);" + "vec4 swizzleInvocationsAMD(vec4, uvec4);" + + "int swizzleInvocationsAMD(int, uvec4);" + "ivec2 swizzleInvocationsAMD(ivec2, uvec4);" + "ivec3 swizzleInvocationsAMD(ivec3, uvec4);" + "ivec4 swizzleInvocationsAMD(ivec4, uvec4);" + + "uint swizzleInvocationsAMD(uint, uvec4);" + "uvec2 swizzleInvocationsAMD(uvec2, uvec4);" + "uvec3 swizzleInvocationsAMD(uvec3, uvec4);" + "uvec4 swizzleInvocationsAMD(uvec4, uvec4);" + + "float swizzleInvocationsMaskedAMD(float, uvec3);" + "vec2 swizzleInvocationsMaskedAMD(vec2, uvec3);" + "vec3 swizzleInvocationsMaskedAMD(vec3, uvec3);" + "vec4 swizzleInvocationsMaskedAMD(vec4, uvec3);" + + "int swizzleInvocationsMaskedAMD(int, uvec3);" + "ivec2 swizzleInvocationsMaskedAMD(ivec2, uvec3);" + "ivec3 swizzleInvocationsMaskedAMD(ivec3, uvec3);" + "ivec4 swizzleInvocationsMaskedAMD(ivec4, uvec3);" + + "uint swizzleInvocationsMaskedAMD(uint, uvec3);" + "uvec2 swizzleInvocationsMaskedAMD(uvec2, uvec3);" + "uvec3 swizzleInvocationsMaskedAMD(uvec3, uvec3);" + "uvec4 swizzleInvocationsMaskedAMD(uvec4, uvec3);" + + "float writeInvocationAMD(float, float, uint);" + "vec2 writeInvocationAMD(vec2, vec2, uint);" + "vec3 writeInvocationAMD(vec3, vec3, uint);" + "vec4 writeInvocationAMD(vec4, vec4, uint);" + + "int writeInvocationAMD(int, int, uint);" + "ivec2 writeInvocationAMD(ivec2, ivec2, uint);" + "ivec3 writeInvocationAMD(ivec3, ivec3, uint);" + "ivec4 writeInvocationAMD(ivec4, ivec4, uint);" + + "uint writeInvocationAMD(uint, uint, uint);" + "uvec2 writeInvocationAMD(uvec2, uvec2, uint);" + "uvec3 writeInvocationAMD(uvec3, uvec3, uint);" + "uvec4 writeInvocationAMD(uvec4, uvec4, uint);" + + "uint mbcntAMD(uint64_t);" + + "\n"); + } + + // GL_AMD_gcn_shader + if (profile != EEsProfile && version >= 440) { + commonBuiltins.append( + "float cubeFaceIndexAMD(vec3);" + "vec2 cubeFaceCoordAMD(vec3);" + "uint64_t timeAMD();" + + "in int gl_SIMDGroupSizeAMD;" + "\n"); + } + + // GL_AMD_shader_fragment_mask + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uint fragmentMaskFetchAMD(sampler2DMS, ivec2);" + "uint fragmentMaskFetchAMD(isampler2DMS, ivec2);" + "uint fragmentMaskFetchAMD(usampler2DMS, ivec2);" + + "uint fragmentMaskFetchAMD(sampler2DMSArray, ivec3);" + "uint fragmentMaskFetchAMD(isampler2DMSArray, ivec3);" + "uint fragmentMaskFetchAMD(usampler2DMSArray, ivec3);" + + "vec4 fragmentFetchAMD(sampler2DMS, ivec2, uint);" + "ivec4 fragmentFetchAMD(isampler2DMS, ivec2, uint);" + "uvec4 fragmentFetchAMD(usampler2DMS, ivec2, uint);" + + "vec4 fragmentFetchAMD(sampler2DMSArray, ivec3, uint);" + "ivec4 fragmentFetchAMD(isampler2DMSArray, ivec3, uint);" + "uvec4 fragmentFetchAMD(usampler2DMSArray, ivec3, uint);" + + "\n"); + } + + if ((profile != EEsProfile && version >= 130) || + (profile == EEsProfile && version >= 300)) { + commonBuiltins.append( + "uint countLeadingZeros(uint);" + "uvec2 countLeadingZeros(uvec2);" + "uvec3 countLeadingZeros(uvec3);" + "uvec4 countLeadingZeros(uvec4);" + + "uint countTrailingZeros(uint);" + "uvec2 countTrailingZeros(uvec2);" + "uvec3 countTrailingZeros(uvec3);" + "uvec4 countTrailingZeros(uvec4);" + + "uint absoluteDifference(int, int);" + "uvec2 absoluteDifference(ivec2, ivec2);" + "uvec3 absoluteDifference(ivec3, ivec3);" + "uvec4 absoluteDifference(ivec4, ivec4);" + + "uint16_t absoluteDifference(int16_t, int16_t);" + "u16vec2 absoluteDifference(i16vec2, i16vec2);" + "u16vec3 absoluteDifference(i16vec3, i16vec3);" + "u16vec4 absoluteDifference(i16vec4, i16vec4);" + + "uint64_t absoluteDifference(int64_t, int64_t);" + "u64vec2 absoluteDifference(i64vec2, i64vec2);" + "u64vec3 absoluteDifference(i64vec3, i64vec3);" + "u64vec4 absoluteDifference(i64vec4, i64vec4);" + + "uint absoluteDifference(uint, uint);" + "uvec2 absoluteDifference(uvec2, uvec2);" + "uvec3 absoluteDifference(uvec3, uvec3);" + "uvec4 absoluteDifference(uvec4, uvec4);" + + "uint16_t absoluteDifference(uint16_t, uint16_t);" + "u16vec2 absoluteDifference(u16vec2, u16vec2);" + "u16vec3 absoluteDifference(u16vec3, u16vec3);" + "u16vec4 absoluteDifference(u16vec4, u16vec4);" + + "uint64_t absoluteDifference(uint64_t, uint64_t);" + "u64vec2 absoluteDifference(u64vec2, u64vec2);" + "u64vec3 absoluteDifference(u64vec3, u64vec3);" + "u64vec4 absoluteDifference(u64vec4, u64vec4);" + + "int addSaturate(int, int);" + "ivec2 addSaturate(ivec2, ivec2);" + "ivec3 addSaturate(ivec3, ivec3);" + "ivec4 addSaturate(ivec4, ivec4);" + + "int16_t addSaturate(int16_t, int16_t);" + "i16vec2 addSaturate(i16vec2, i16vec2);" + "i16vec3 addSaturate(i16vec3, i16vec3);" + "i16vec4 addSaturate(i16vec4, i16vec4);" + + "int64_t addSaturate(int64_t, int64_t);" + "i64vec2 addSaturate(i64vec2, i64vec2);" + "i64vec3 addSaturate(i64vec3, i64vec3);" + "i64vec4 addSaturate(i64vec4, i64vec4);" + + "uint addSaturate(uint, uint);" + "uvec2 addSaturate(uvec2, uvec2);" + "uvec3 addSaturate(uvec3, uvec3);" + "uvec4 addSaturate(uvec4, uvec4);" + + "uint16_t addSaturate(uint16_t, uint16_t);" + "u16vec2 addSaturate(u16vec2, u16vec2);" + "u16vec3 addSaturate(u16vec3, u16vec3);" + "u16vec4 addSaturate(u16vec4, u16vec4);" + + "uint64_t addSaturate(uint64_t, uint64_t);" + "u64vec2 addSaturate(u64vec2, u64vec2);" + "u64vec3 addSaturate(u64vec3, u64vec3);" + "u64vec4 addSaturate(u64vec4, u64vec4);" + + "int subtractSaturate(int, int);" + "ivec2 subtractSaturate(ivec2, ivec2);" + "ivec3 subtractSaturate(ivec3, ivec3);" + "ivec4 subtractSaturate(ivec4, ivec4);" + + "int16_t subtractSaturate(int16_t, int16_t);" + "i16vec2 subtractSaturate(i16vec2, i16vec2);" + "i16vec3 subtractSaturate(i16vec3, i16vec3);" + "i16vec4 subtractSaturate(i16vec4, i16vec4);" + + "int64_t subtractSaturate(int64_t, int64_t);" + "i64vec2 subtractSaturate(i64vec2, i64vec2);" + "i64vec3 subtractSaturate(i64vec3, i64vec3);" + "i64vec4 subtractSaturate(i64vec4, i64vec4);" + + "uint subtractSaturate(uint, uint);" + "uvec2 subtractSaturate(uvec2, uvec2);" + "uvec3 subtractSaturate(uvec3, uvec3);" + "uvec4 subtractSaturate(uvec4, uvec4);" + + "uint16_t subtractSaturate(uint16_t, uint16_t);" + "u16vec2 subtractSaturate(u16vec2, u16vec2);" + "u16vec3 subtractSaturate(u16vec3, u16vec3);" + "u16vec4 subtractSaturate(u16vec4, u16vec4);" + + "uint64_t subtractSaturate(uint64_t, uint64_t);" + "u64vec2 subtractSaturate(u64vec2, u64vec2);" + "u64vec3 subtractSaturate(u64vec3, u64vec3);" + "u64vec4 subtractSaturate(u64vec4, u64vec4);" + + "int average(int, int);" + "ivec2 average(ivec2, ivec2);" + "ivec3 average(ivec3, ivec3);" + "ivec4 average(ivec4, ivec4);" + + "int16_t average(int16_t, int16_t);" + "i16vec2 average(i16vec2, i16vec2);" + "i16vec3 average(i16vec3, i16vec3);" + "i16vec4 average(i16vec4, i16vec4);" + + "int64_t average(int64_t, int64_t);" + "i64vec2 average(i64vec2, i64vec2);" + "i64vec3 average(i64vec3, i64vec3);" + "i64vec4 average(i64vec4, i64vec4);" + + "uint average(uint, uint);" + "uvec2 average(uvec2, uvec2);" + "uvec3 average(uvec3, uvec3);" + "uvec4 average(uvec4, uvec4);" + + "uint16_t average(uint16_t, uint16_t);" + "u16vec2 average(u16vec2, u16vec2);" + "u16vec3 average(u16vec3, u16vec3);" + "u16vec4 average(u16vec4, u16vec4);" + + "uint64_t average(uint64_t, uint64_t);" + "u64vec2 average(u64vec2, u64vec2);" + "u64vec3 average(u64vec3, u64vec3);" + "u64vec4 average(u64vec4, u64vec4);" + + "int averageRounded(int, int);" + "ivec2 averageRounded(ivec2, ivec2);" + "ivec3 averageRounded(ivec3, ivec3);" + "ivec4 averageRounded(ivec4, ivec4);" + + "int16_t averageRounded(int16_t, int16_t);" + "i16vec2 averageRounded(i16vec2, i16vec2);" + "i16vec3 averageRounded(i16vec3, i16vec3);" + "i16vec4 averageRounded(i16vec4, i16vec4);" + + "int64_t averageRounded(int64_t, int64_t);" + "i64vec2 averageRounded(i64vec2, i64vec2);" + "i64vec3 averageRounded(i64vec3, i64vec3);" + "i64vec4 averageRounded(i64vec4, i64vec4);" + + "uint averageRounded(uint, uint);" + "uvec2 averageRounded(uvec2, uvec2);" + "uvec3 averageRounded(uvec3, uvec3);" + "uvec4 averageRounded(uvec4, uvec4);" + + "uint16_t averageRounded(uint16_t, uint16_t);" + "u16vec2 averageRounded(u16vec2, u16vec2);" + "u16vec3 averageRounded(u16vec3, u16vec3);" + "u16vec4 averageRounded(u16vec4, u16vec4);" + + "uint64_t averageRounded(uint64_t, uint64_t);" + "u64vec2 averageRounded(u64vec2, u64vec2);" + "u64vec3 averageRounded(u64vec3, u64vec3);" + "u64vec4 averageRounded(u64vec4, u64vec4);" + + "int multiply32x16(int, int);" + "ivec2 multiply32x16(ivec2, ivec2);" + "ivec3 multiply32x16(ivec3, ivec3);" + "ivec4 multiply32x16(ivec4, ivec4);" + + "uint multiply32x16(uint, uint);" + "uvec2 multiply32x16(uvec2, uvec2);" + "uvec3 multiply32x16(uvec3, uvec3);" + "uvec4 multiply32x16(uvec4, uvec4);" + "\n"); + } + + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + commonBuiltins.append( + "struct gl_TextureFootprint2DNV {" + "uvec2 anchor;" + "uvec2 offset;" + "uvec2 mask;" + "uint lod;" + "uint granularity;" + "};" + + "struct gl_TextureFootprint3DNV {" + "uvec3 anchor;" + "uvec3 offset;" + "uvec2 mask;" + "uint lod;" + "uint granularity;" + "};" + "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintNV(sampler2D, vec2, int, bool, out gl_TextureFootprint2DNV, float);" + "bool textureFootprintNV(sampler3D, vec3, int, bool, out gl_TextureFootprint3DNV, float);" + "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintClampNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV, float);" + "bool textureFootprintClampNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV, float);" + "bool textureFootprintLodNV(sampler2D, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintLodNV(sampler3D, vec3, float, int, bool, out gl_TextureFootprint3DNV);" + "bool textureFootprintGradNV(sampler2D, vec2, vec2, vec2, int, bool, out gl_TextureFootprint2DNV);" + "bool textureFootprintGradClampNV(sampler2D, vec2, vec2, vec2, float, int, bool, out gl_TextureFootprint2DNV);" + "\n"); + } +#endif // !GLSLANG_ANGLE + + if ((profile == EEsProfile && version >= 300 && version < 310) || + (profile != EEsProfile && version >= 150 && version < 450)) { // GL_EXT_shader_integer_mix + commonBuiltins.append("int mix(int, int, bool);" + "ivec2 mix(ivec2, ivec2, bvec2);" + "ivec3 mix(ivec3, ivec3, bvec3);" + "ivec4 mix(ivec4, ivec4, bvec4);" + "uint mix(uint, uint, bool );" + "uvec2 mix(uvec2, uvec2, bvec2);" + "uvec3 mix(uvec3, uvec3, bvec3);" + "uvec4 mix(uvec4, uvec4, bvec4);" + "bool mix(bool, bool, bool );" + "bvec2 mix(bvec2, bvec2, bvec2);" + "bvec3 mix(bvec3, bvec3, bvec3);" + "bvec4 mix(bvec4, bvec4, bvec4);" + + "\n"); + } + +#ifndef GLSLANG_ANGLE + // GL_AMD_gpu_shader_half_float/Explicit types + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "float16_t radians(float16_t);" + "f16vec2 radians(f16vec2);" + "f16vec3 radians(f16vec3);" + "f16vec4 radians(f16vec4);" + + "float16_t degrees(float16_t);" + "f16vec2 degrees(f16vec2);" + "f16vec3 degrees(f16vec3);" + "f16vec4 degrees(f16vec4);" + + "float16_t sin(float16_t);" + "f16vec2 sin(f16vec2);" + "f16vec3 sin(f16vec3);" + "f16vec4 sin(f16vec4);" + + "float16_t cos(float16_t);" + "f16vec2 cos(f16vec2);" + "f16vec3 cos(f16vec3);" + "f16vec4 cos(f16vec4);" + + "float16_t tan(float16_t);" + "f16vec2 tan(f16vec2);" + "f16vec3 tan(f16vec3);" + "f16vec4 tan(f16vec4);" + + "float16_t asin(float16_t);" + "f16vec2 asin(f16vec2);" + "f16vec3 asin(f16vec3);" + "f16vec4 asin(f16vec4);" + + "float16_t acos(float16_t);" + "f16vec2 acos(f16vec2);" + "f16vec3 acos(f16vec3);" + "f16vec4 acos(f16vec4);" + + "float16_t atan(float16_t, float16_t);" + "f16vec2 atan(f16vec2, f16vec2);" + "f16vec3 atan(f16vec3, f16vec3);" + "f16vec4 atan(f16vec4, f16vec4);" + + "float16_t atan(float16_t);" + "f16vec2 atan(f16vec2);" + "f16vec3 atan(f16vec3);" + "f16vec4 atan(f16vec4);" + + "float16_t sinh(float16_t);" + "f16vec2 sinh(f16vec2);" + "f16vec3 sinh(f16vec3);" + "f16vec4 sinh(f16vec4);" + + "float16_t cosh(float16_t);" + "f16vec2 cosh(f16vec2);" + "f16vec3 cosh(f16vec3);" + "f16vec4 cosh(f16vec4);" + + "float16_t tanh(float16_t);" + "f16vec2 tanh(f16vec2);" + "f16vec3 tanh(f16vec3);" + "f16vec4 tanh(f16vec4);" + + "float16_t asinh(float16_t);" + "f16vec2 asinh(f16vec2);" + "f16vec3 asinh(f16vec3);" + "f16vec4 asinh(f16vec4);" + + "float16_t acosh(float16_t);" + "f16vec2 acosh(f16vec2);" + "f16vec3 acosh(f16vec3);" + "f16vec4 acosh(f16vec4);" + + "float16_t atanh(float16_t);" + "f16vec2 atanh(f16vec2);" + "f16vec3 atanh(f16vec3);" + "f16vec4 atanh(f16vec4);" + + "float16_t pow(float16_t, float16_t);" + "f16vec2 pow(f16vec2, f16vec2);" + "f16vec3 pow(f16vec3, f16vec3);" + "f16vec4 pow(f16vec4, f16vec4);" + + "float16_t exp(float16_t);" + "f16vec2 exp(f16vec2);" + "f16vec3 exp(f16vec3);" + "f16vec4 exp(f16vec4);" + + "float16_t log(float16_t);" + "f16vec2 log(f16vec2);" + "f16vec3 log(f16vec3);" + "f16vec4 log(f16vec4);" + + "float16_t exp2(float16_t);" + "f16vec2 exp2(f16vec2);" + "f16vec3 exp2(f16vec3);" + "f16vec4 exp2(f16vec4);" + + "float16_t log2(float16_t);" + "f16vec2 log2(f16vec2);" + "f16vec3 log2(f16vec3);" + "f16vec4 log2(f16vec4);" + + "float16_t sqrt(float16_t);" + "f16vec2 sqrt(f16vec2);" + "f16vec3 sqrt(f16vec3);" + "f16vec4 sqrt(f16vec4);" + + "float16_t inversesqrt(float16_t);" + "f16vec2 inversesqrt(f16vec2);" + "f16vec3 inversesqrt(f16vec3);" + "f16vec4 inversesqrt(f16vec4);" + + "float16_t abs(float16_t);" + "f16vec2 abs(f16vec2);" + "f16vec3 abs(f16vec3);" + "f16vec4 abs(f16vec4);" + + "float16_t sign(float16_t);" + "f16vec2 sign(f16vec2);" + "f16vec3 sign(f16vec3);" + "f16vec4 sign(f16vec4);" + + "float16_t floor(float16_t);" + "f16vec2 floor(f16vec2);" + "f16vec3 floor(f16vec3);" + "f16vec4 floor(f16vec4);" + + "float16_t trunc(float16_t);" + "f16vec2 trunc(f16vec2);" + "f16vec3 trunc(f16vec3);" + "f16vec4 trunc(f16vec4);" + + "float16_t round(float16_t);" + "f16vec2 round(f16vec2);" + "f16vec3 round(f16vec3);" + "f16vec4 round(f16vec4);" + + "float16_t roundEven(float16_t);" + "f16vec2 roundEven(f16vec2);" + "f16vec3 roundEven(f16vec3);" + "f16vec4 roundEven(f16vec4);" + + "float16_t ceil(float16_t);" + "f16vec2 ceil(f16vec2);" + "f16vec3 ceil(f16vec3);" + "f16vec4 ceil(f16vec4);" + + "float16_t fract(float16_t);" + "f16vec2 fract(f16vec2);" + "f16vec3 fract(f16vec3);" + "f16vec4 fract(f16vec4);" + + "float16_t mod(float16_t, float16_t);" + "f16vec2 mod(f16vec2, float16_t);" + "f16vec3 mod(f16vec3, float16_t);" + "f16vec4 mod(f16vec4, float16_t);" + "f16vec2 mod(f16vec2, f16vec2);" + "f16vec3 mod(f16vec3, f16vec3);" + "f16vec4 mod(f16vec4, f16vec4);" + + "float16_t modf(float16_t, out float16_t);" + "f16vec2 modf(f16vec2, out f16vec2);" + "f16vec3 modf(f16vec3, out f16vec3);" + "f16vec4 modf(f16vec4, out f16vec4);" + + "float16_t min(float16_t, float16_t);" + "f16vec2 min(f16vec2, float16_t);" + "f16vec3 min(f16vec3, float16_t);" + "f16vec4 min(f16vec4, float16_t);" + "f16vec2 min(f16vec2, f16vec2);" + "f16vec3 min(f16vec3, f16vec3);" + "f16vec4 min(f16vec4, f16vec4);" + + "float16_t max(float16_t, float16_t);" + "f16vec2 max(f16vec2, float16_t);" + "f16vec3 max(f16vec3, float16_t);" + "f16vec4 max(f16vec4, float16_t);" + "f16vec2 max(f16vec2, f16vec2);" + "f16vec3 max(f16vec3, f16vec3);" + "f16vec4 max(f16vec4, f16vec4);" + + "float16_t clamp(float16_t, float16_t, float16_t);" + "f16vec2 clamp(f16vec2, float16_t, float16_t);" + "f16vec3 clamp(f16vec3, float16_t, float16_t);" + "f16vec4 clamp(f16vec4, float16_t, float16_t);" + "f16vec2 clamp(f16vec2, f16vec2, f16vec2);" + "f16vec3 clamp(f16vec3, f16vec3, f16vec3);" + "f16vec4 clamp(f16vec4, f16vec4, f16vec4);" + + "float16_t mix(float16_t, float16_t, float16_t);" + "f16vec2 mix(f16vec2, f16vec2, float16_t);" + "f16vec3 mix(f16vec3, f16vec3, float16_t);" + "f16vec4 mix(f16vec4, f16vec4, float16_t);" + "f16vec2 mix(f16vec2, f16vec2, f16vec2);" + "f16vec3 mix(f16vec3, f16vec3, f16vec3);" + "f16vec4 mix(f16vec4, f16vec4, f16vec4);" + "float16_t mix(float16_t, float16_t, bool);" + "f16vec2 mix(f16vec2, f16vec2, bvec2);" + "f16vec3 mix(f16vec3, f16vec3, bvec3);" + "f16vec4 mix(f16vec4, f16vec4, bvec4);" + + "float16_t step(float16_t, float16_t);" + "f16vec2 step(f16vec2, f16vec2);" + "f16vec3 step(f16vec3, f16vec3);" + "f16vec4 step(f16vec4, f16vec4);" + "f16vec2 step(float16_t, f16vec2);" + "f16vec3 step(float16_t, f16vec3);" + "f16vec4 step(float16_t, f16vec4);" + + "float16_t smoothstep(float16_t, float16_t, float16_t);" + "f16vec2 smoothstep(f16vec2, f16vec2, f16vec2);" + "f16vec3 smoothstep(f16vec3, f16vec3, f16vec3);" + "f16vec4 smoothstep(f16vec4, f16vec4, f16vec4);" + "f16vec2 smoothstep(float16_t, float16_t, f16vec2);" + "f16vec3 smoothstep(float16_t, float16_t, f16vec3);" + "f16vec4 smoothstep(float16_t, float16_t, f16vec4);" + + "bool isnan(float16_t);" + "bvec2 isnan(f16vec2);" + "bvec3 isnan(f16vec3);" + "bvec4 isnan(f16vec4);" + + "bool isinf(float16_t);" + "bvec2 isinf(f16vec2);" + "bvec3 isinf(f16vec3);" + "bvec4 isinf(f16vec4);" + + "float16_t fma(float16_t, float16_t, float16_t);" + "f16vec2 fma(f16vec2, f16vec2, f16vec2);" + "f16vec3 fma(f16vec3, f16vec3, f16vec3);" + "f16vec4 fma(f16vec4, f16vec4, f16vec4);" + + "float16_t frexp(float16_t, out int);" + "f16vec2 frexp(f16vec2, out ivec2);" + "f16vec3 frexp(f16vec3, out ivec3);" + "f16vec4 frexp(f16vec4, out ivec4);" + + "float16_t ldexp(float16_t, in int);" + "f16vec2 ldexp(f16vec2, in ivec2);" + "f16vec3 ldexp(f16vec3, in ivec3);" + "f16vec4 ldexp(f16vec4, in ivec4);" + + "uint packFloat2x16(f16vec2);" + "f16vec2 unpackFloat2x16(uint);" + + "float16_t length(float16_t);" + "float16_t length(f16vec2);" + "float16_t length(f16vec3);" + "float16_t length(f16vec4);" + + "float16_t distance(float16_t, float16_t);" + "float16_t distance(f16vec2, f16vec2);" + "float16_t distance(f16vec3, f16vec3);" + "float16_t distance(f16vec4, f16vec4);" + + "float16_t dot(float16_t, float16_t);" + "float16_t dot(f16vec2, f16vec2);" + "float16_t dot(f16vec3, f16vec3);" + "float16_t dot(f16vec4, f16vec4);" + + "f16vec3 cross(f16vec3, f16vec3);" + + "float16_t normalize(float16_t);" + "f16vec2 normalize(f16vec2);" + "f16vec3 normalize(f16vec3);" + "f16vec4 normalize(f16vec4);" + + "float16_t faceforward(float16_t, float16_t, float16_t);" + "f16vec2 faceforward(f16vec2, f16vec2, f16vec2);" + "f16vec3 faceforward(f16vec3, f16vec3, f16vec3);" + "f16vec4 faceforward(f16vec4, f16vec4, f16vec4);" + + "float16_t reflect(float16_t, float16_t);" + "f16vec2 reflect(f16vec2, f16vec2);" + "f16vec3 reflect(f16vec3, f16vec3);" + "f16vec4 reflect(f16vec4, f16vec4);" + + "float16_t refract(float16_t, float16_t, float16_t);" + "f16vec2 refract(f16vec2, f16vec2, float16_t);" + "f16vec3 refract(f16vec3, f16vec3, float16_t);" + "f16vec4 refract(f16vec4, f16vec4, float16_t);" + + "f16mat2 matrixCompMult(f16mat2, f16mat2);" + "f16mat3 matrixCompMult(f16mat3, f16mat3);" + "f16mat4 matrixCompMult(f16mat4, f16mat4);" + "f16mat2x3 matrixCompMult(f16mat2x3, f16mat2x3);" + "f16mat2x4 matrixCompMult(f16mat2x4, f16mat2x4);" + "f16mat3x2 matrixCompMult(f16mat3x2, f16mat3x2);" + "f16mat3x4 matrixCompMult(f16mat3x4, f16mat3x4);" + "f16mat4x2 matrixCompMult(f16mat4x2, f16mat4x2);" + "f16mat4x3 matrixCompMult(f16mat4x3, f16mat4x3);" + + "f16mat2 outerProduct(f16vec2, f16vec2);" + "f16mat3 outerProduct(f16vec3, f16vec3);" + "f16mat4 outerProduct(f16vec4, f16vec4);" + "f16mat2x3 outerProduct(f16vec3, f16vec2);" + "f16mat3x2 outerProduct(f16vec2, f16vec3);" + "f16mat2x4 outerProduct(f16vec4, f16vec2);" + "f16mat4x2 outerProduct(f16vec2, f16vec4);" + "f16mat3x4 outerProduct(f16vec4, f16vec3);" + "f16mat4x3 outerProduct(f16vec3, f16vec4);" + + "f16mat2 transpose(f16mat2);" + "f16mat3 transpose(f16mat3);" + "f16mat4 transpose(f16mat4);" + "f16mat2x3 transpose(f16mat3x2);" + "f16mat3x2 transpose(f16mat2x3);" + "f16mat2x4 transpose(f16mat4x2);" + "f16mat4x2 transpose(f16mat2x4);" + "f16mat3x4 transpose(f16mat4x3);" + "f16mat4x3 transpose(f16mat3x4);" + + "float16_t determinant(f16mat2);" + "float16_t determinant(f16mat3);" + "float16_t determinant(f16mat4);" + + "f16mat2 inverse(f16mat2);" + "f16mat3 inverse(f16mat3);" + "f16mat4 inverse(f16mat4);" + + "bvec2 lessThan(f16vec2, f16vec2);" + "bvec3 lessThan(f16vec3, f16vec3);" + "bvec4 lessThan(f16vec4, f16vec4);" + + "bvec2 lessThanEqual(f16vec2, f16vec2);" + "bvec3 lessThanEqual(f16vec3, f16vec3);" + "bvec4 lessThanEqual(f16vec4, f16vec4);" + + "bvec2 greaterThan(f16vec2, f16vec2);" + "bvec3 greaterThan(f16vec3, f16vec3);" + "bvec4 greaterThan(f16vec4, f16vec4);" + + "bvec2 greaterThanEqual(f16vec2, f16vec2);" + "bvec3 greaterThanEqual(f16vec3, f16vec3);" + "bvec4 greaterThanEqual(f16vec4, f16vec4);" + + "bvec2 equal(f16vec2, f16vec2);" + "bvec3 equal(f16vec3, f16vec3);" + "bvec4 equal(f16vec4, f16vec4);" + + "bvec2 notEqual(f16vec2, f16vec2);" + "bvec3 notEqual(f16vec3, f16vec3);" + "bvec4 notEqual(f16vec4, f16vec4);" + + "\n"); + } + + // Explicit types + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "int8_t abs(int8_t);" + "i8vec2 abs(i8vec2);" + "i8vec3 abs(i8vec3);" + "i8vec4 abs(i8vec4);" + + "int8_t sign(int8_t);" + "i8vec2 sign(i8vec2);" + "i8vec3 sign(i8vec3);" + "i8vec4 sign(i8vec4);" + + "int8_t min(int8_t x, int8_t y);" + "i8vec2 min(i8vec2 x, int8_t y);" + "i8vec3 min(i8vec3 x, int8_t y);" + "i8vec4 min(i8vec4 x, int8_t y);" + "i8vec2 min(i8vec2 x, i8vec2 y);" + "i8vec3 min(i8vec3 x, i8vec3 y);" + "i8vec4 min(i8vec4 x, i8vec4 y);" + + "uint8_t min(uint8_t x, uint8_t y);" + "u8vec2 min(u8vec2 x, uint8_t y);" + "u8vec3 min(u8vec3 x, uint8_t y);" + "u8vec4 min(u8vec4 x, uint8_t y);" + "u8vec2 min(u8vec2 x, u8vec2 y);" + "u8vec3 min(u8vec3 x, u8vec3 y);" + "u8vec4 min(u8vec4 x, u8vec4 y);" + + "int8_t max(int8_t x, int8_t y);" + "i8vec2 max(i8vec2 x, int8_t y);" + "i8vec3 max(i8vec3 x, int8_t y);" + "i8vec4 max(i8vec4 x, int8_t y);" + "i8vec2 max(i8vec2 x, i8vec2 y);" + "i8vec3 max(i8vec3 x, i8vec3 y);" + "i8vec4 max(i8vec4 x, i8vec4 y);" + + "uint8_t max(uint8_t x, uint8_t y);" + "u8vec2 max(u8vec2 x, uint8_t y);" + "u8vec3 max(u8vec3 x, uint8_t y);" + "u8vec4 max(u8vec4 x, uint8_t y);" + "u8vec2 max(u8vec2 x, u8vec2 y);" + "u8vec3 max(u8vec3 x, u8vec3 y);" + "u8vec4 max(u8vec4 x, u8vec4 y);" + + "int8_t clamp(int8_t x, int8_t minVal, int8_t maxVal);" + "i8vec2 clamp(i8vec2 x, int8_t minVal, int8_t maxVal);" + "i8vec3 clamp(i8vec3 x, int8_t minVal, int8_t maxVal);" + "i8vec4 clamp(i8vec4 x, int8_t minVal, int8_t maxVal);" + "i8vec2 clamp(i8vec2 x, i8vec2 minVal, i8vec2 maxVal);" + "i8vec3 clamp(i8vec3 x, i8vec3 minVal, i8vec3 maxVal);" + "i8vec4 clamp(i8vec4 x, i8vec4 minVal, i8vec4 maxVal);" + + "uint8_t clamp(uint8_t x, uint8_t minVal, uint8_t maxVal);" + "u8vec2 clamp(u8vec2 x, uint8_t minVal, uint8_t maxVal);" + "u8vec3 clamp(u8vec3 x, uint8_t minVal, uint8_t maxVal);" + "u8vec4 clamp(u8vec4 x, uint8_t minVal, uint8_t maxVal);" + "u8vec2 clamp(u8vec2 x, u8vec2 minVal, u8vec2 maxVal);" + "u8vec3 clamp(u8vec3 x, u8vec3 minVal, u8vec3 maxVal);" + "u8vec4 clamp(u8vec4 x, u8vec4 minVal, u8vec4 maxVal);" + + "int8_t mix(int8_t, int8_t, bool);" + "i8vec2 mix(i8vec2, i8vec2, bvec2);" + "i8vec3 mix(i8vec3, i8vec3, bvec3);" + "i8vec4 mix(i8vec4, i8vec4, bvec4);" + "uint8_t mix(uint8_t, uint8_t, bool);" + "u8vec2 mix(u8vec2, u8vec2, bvec2);" + "u8vec3 mix(u8vec3, u8vec3, bvec3);" + "u8vec4 mix(u8vec4, u8vec4, bvec4);" + + "bvec2 lessThan(i8vec2, i8vec2);" + "bvec3 lessThan(i8vec3, i8vec3);" + "bvec4 lessThan(i8vec4, i8vec4);" + "bvec2 lessThan(u8vec2, u8vec2);" + "bvec3 lessThan(u8vec3, u8vec3);" + "bvec4 lessThan(u8vec4, u8vec4);" + + "bvec2 lessThanEqual(i8vec2, i8vec2);" + "bvec3 lessThanEqual(i8vec3, i8vec3);" + "bvec4 lessThanEqual(i8vec4, i8vec4);" + "bvec2 lessThanEqual(u8vec2, u8vec2);" + "bvec3 lessThanEqual(u8vec3, u8vec3);" + "bvec4 lessThanEqual(u8vec4, u8vec4);" + + "bvec2 greaterThan(i8vec2, i8vec2);" + "bvec3 greaterThan(i8vec3, i8vec3);" + "bvec4 greaterThan(i8vec4, i8vec4);" + "bvec2 greaterThan(u8vec2, u8vec2);" + "bvec3 greaterThan(u8vec3, u8vec3);" + "bvec4 greaterThan(u8vec4, u8vec4);" + + "bvec2 greaterThanEqual(i8vec2, i8vec2);" + "bvec3 greaterThanEqual(i8vec3, i8vec3);" + "bvec4 greaterThanEqual(i8vec4, i8vec4);" + "bvec2 greaterThanEqual(u8vec2, u8vec2);" + "bvec3 greaterThanEqual(u8vec3, u8vec3);" + "bvec4 greaterThanEqual(u8vec4, u8vec4);" + + "bvec2 equal(i8vec2, i8vec2);" + "bvec3 equal(i8vec3, i8vec3);" + "bvec4 equal(i8vec4, i8vec4);" + "bvec2 equal(u8vec2, u8vec2);" + "bvec3 equal(u8vec3, u8vec3);" + "bvec4 equal(u8vec4, u8vec4);" + + "bvec2 notEqual(i8vec2, i8vec2);" + "bvec3 notEqual(i8vec3, i8vec3);" + "bvec4 notEqual(i8vec4, i8vec4);" + "bvec2 notEqual(u8vec2, u8vec2);" + "bvec3 notEqual(u8vec3, u8vec3);" + "bvec4 notEqual(u8vec4, u8vec4);" + + " int8_t bitfieldExtract( int8_t, int8_t, int8_t);" + "i8vec2 bitfieldExtract(i8vec2, int8_t, int8_t);" + "i8vec3 bitfieldExtract(i8vec3, int8_t, int8_t);" + "i8vec4 bitfieldExtract(i8vec4, int8_t, int8_t);" + + " uint8_t bitfieldExtract( uint8_t, int8_t, int8_t);" + "u8vec2 bitfieldExtract(u8vec2, int8_t, int8_t);" + "u8vec3 bitfieldExtract(u8vec3, int8_t, int8_t);" + "u8vec4 bitfieldExtract(u8vec4, int8_t, int8_t);" + + " int8_t bitfieldInsert( int8_t base, int8_t, int8_t, int8_t);" + "i8vec2 bitfieldInsert(i8vec2 base, i8vec2, int8_t, int8_t);" + "i8vec3 bitfieldInsert(i8vec3 base, i8vec3, int8_t, int8_t);" + "i8vec4 bitfieldInsert(i8vec4 base, i8vec4, int8_t, int8_t);" + + " uint8_t bitfieldInsert( uint8_t base, uint8_t, int8_t, int8_t);" + "u8vec2 bitfieldInsert(u8vec2 base, u8vec2, int8_t, int8_t);" + "u8vec3 bitfieldInsert(u8vec3 base, u8vec3, int8_t, int8_t);" + "u8vec4 bitfieldInsert(u8vec4 base, u8vec4, int8_t, int8_t);" + + " int8_t bitCount( int8_t);" + "i8vec2 bitCount(i8vec2);" + "i8vec3 bitCount(i8vec3);" + "i8vec4 bitCount(i8vec4);" + + " int8_t bitCount( uint8_t);" + "i8vec2 bitCount(u8vec2);" + "i8vec3 bitCount(u8vec3);" + "i8vec4 bitCount(u8vec4);" + + " int8_t findLSB( int8_t);" + "i8vec2 findLSB(i8vec2);" + "i8vec3 findLSB(i8vec3);" + "i8vec4 findLSB(i8vec4);" + + " int8_t findLSB( uint8_t);" + "i8vec2 findLSB(u8vec2);" + "i8vec3 findLSB(u8vec3);" + "i8vec4 findLSB(u8vec4);" + + " int8_t findMSB( int8_t);" + "i8vec2 findMSB(i8vec2);" + "i8vec3 findMSB(i8vec3);" + "i8vec4 findMSB(i8vec4);" + + " int8_t findMSB( uint8_t);" + "i8vec2 findMSB(u8vec2);" + "i8vec3 findMSB(u8vec3);" + "i8vec4 findMSB(u8vec4);" + + "int16_t abs(int16_t);" + "i16vec2 abs(i16vec2);" + "i16vec3 abs(i16vec3);" + "i16vec4 abs(i16vec4);" + + "int16_t sign(int16_t);" + "i16vec2 sign(i16vec2);" + "i16vec3 sign(i16vec3);" + "i16vec4 sign(i16vec4);" + + "int16_t min(int16_t x, int16_t y);" + "i16vec2 min(i16vec2 x, int16_t y);" + "i16vec3 min(i16vec3 x, int16_t y);" + "i16vec4 min(i16vec4 x, int16_t y);" + "i16vec2 min(i16vec2 x, i16vec2 y);" + "i16vec3 min(i16vec3 x, i16vec3 y);" + "i16vec4 min(i16vec4 x, i16vec4 y);" + + "uint16_t min(uint16_t x, uint16_t y);" + "u16vec2 min(u16vec2 x, uint16_t y);" + "u16vec3 min(u16vec3 x, uint16_t y);" + "u16vec4 min(u16vec4 x, uint16_t y);" + "u16vec2 min(u16vec2 x, u16vec2 y);" + "u16vec3 min(u16vec3 x, u16vec3 y);" + "u16vec4 min(u16vec4 x, u16vec4 y);" + + "int16_t max(int16_t x, int16_t y);" + "i16vec2 max(i16vec2 x, int16_t y);" + "i16vec3 max(i16vec3 x, int16_t y);" + "i16vec4 max(i16vec4 x, int16_t y);" + "i16vec2 max(i16vec2 x, i16vec2 y);" + "i16vec3 max(i16vec3 x, i16vec3 y);" + "i16vec4 max(i16vec4 x, i16vec4 y);" + + "uint16_t max(uint16_t x, uint16_t y);" + "u16vec2 max(u16vec2 x, uint16_t y);" + "u16vec3 max(u16vec3 x, uint16_t y);" + "u16vec4 max(u16vec4 x, uint16_t y);" + "u16vec2 max(u16vec2 x, u16vec2 y);" + "u16vec3 max(u16vec3 x, u16vec3 y);" + "u16vec4 max(u16vec4 x, u16vec4 y);" + + "int16_t clamp(int16_t x, int16_t minVal, int16_t maxVal);" + "i16vec2 clamp(i16vec2 x, int16_t minVal, int16_t maxVal);" + "i16vec3 clamp(i16vec3 x, int16_t minVal, int16_t maxVal);" + "i16vec4 clamp(i16vec4 x, int16_t minVal, int16_t maxVal);" + "i16vec2 clamp(i16vec2 x, i16vec2 minVal, i16vec2 maxVal);" + "i16vec3 clamp(i16vec3 x, i16vec3 minVal, i16vec3 maxVal);" + "i16vec4 clamp(i16vec4 x, i16vec4 minVal, i16vec4 maxVal);" + + "uint16_t clamp(uint16_t x, uint16_t minVal, uint16_t maxVal);" + "u16vec2 clamp(u16vec2 x, uint16_t minVal, uint16_t maxVal);" + "u16vec3 clamp(u16vec3 x, uint16_t minVal, uint16_t maxVal);" + "u16vec4 clamp(u16vec4 x, uint16_t minVal, uint16_t maxVal);" + "u16vec2 clamp(u16vec2 x, u16vec2 minVal, u16vec2 maxVal);" + "u16vec3 clamp(u16vec3 x, u16vec3 minVal, u16vec3 maxVal);" + "u16vec4 clamp(u16vec4 x, u16vec4 minVal, u16vec4 maxVal);" + + "int16_t mix(int16_t, int16_t, bool);" + "i16vec2 mix(i16vec2, i16vec2, bvec2);" + "i16vec3 mix(i16vec3, i16vec3, bvec3);" + "i16vec4 mix(i16vec4, i16vec4, bvec4);" + "uint16_t mix(uint16_t, uint16_t, bool);" + "u16vec2 mix(u16vec2, u16vec2, bvec2);" + "u16vec3 mix(u16vec3, u16vec3, bvec3);" + "u16vec4 mix(u16vec4, u16vec4, bvec4);" + + "float16_t frexp(float16_t, out int16_t);" + "f16vec2 frexp(f16vec2, out i16vec2);" + "f16vec3 frexp(f16vec3, out i16vec3);" + "f16vec4 frexp(f16vec4, out i16vec4);" + + "float16_t ldexp(float16_t, int16_t);" + "f16vec2 ldexp(f16vec2, i16vec2);" + "f16vec3 ldexp(f16vec3, i16vec3);" + "f16vec4 ldexp(f16vec4, i16vec4);" + + "int16_t halfBitsToInt16(float16_t);" + "i16vec2 halfBitsToInt16(f16vec2);" + "i16vec3 halhBitsToInt16(f16vec3);" + "i16vec4 halfBitsToInt16(f16vec4);" + + "uint16_t halfBitsToUint16(float16_t);" + "u16vec2 halfBitsToUint16(f16vec2);" + "u16vec3 halfBitsToUint16(f16vec3);" + "u16vec4 halfBitsToUint16(f16vec4);" + + "int16_t float16BitsToInt16(float16_t);" + "i16vec2 float16BitsToInt16(f16vec2);" + "i16vec3 float16BitsToInt16(f16vec3);" + "i16vec4 float16BitsToInt16(f16vec4);" + + "uint16_t float16BitsToUint16(float16_t);" + "u16vec2 float16BitsToUint16(f16vec2);" + "u16vec3 float16BitsToUint16(f16vec3);" + "u16vec4 float16BitsToUint16(f16vec4);" + + "float16_t int16BitsToFloat16(int16_t);" + "f16vec2 int16BitsToFloat16(i16vec2);" + "f16vec3 int16BitsToFloat16(i16vec3);" + "f16vec4 int16BitsToFloat16(i16vec4);" + + "float16_t uint16BitsToFloat16(uint16_t);" + "f16vec2 uint16BitsToFloat16(u16vec2);" + "f16vec3 uint16BitsToFloat16(u16vec3);" + "f16vec4 uint16BitsToFloat16(u16vec4);" + + "float16_t int16BitsToHalf(int16_t);" + "f16vec2 int16BitsToHalf(i16vec2);" + "f16vec3 int16BitsToHalf(i16vec3);" + "f16vec4 int16BitsToHalf(i16vec4);" + + "float16_t uint16BitsToHalf(uint16_t);" + "f16vec2 uint16BitsToHalf(u16vec2);" + "f16vec3 uint16BitsToHalf(u16vec3);" + "f16vec4 uint16BitsToHalf(u16vec4);" + + "int packInt2x16(i16vec2);" + "uint packUint2x16(u16vec2);" + "int64_t packInt4x16(i16vec4);" + "uint64_t packUint4x16(u16vec4);" + "i16vec2 unpackInt2x16(int);" + "u16vec2 unpackUint2x16(uint);" + "i16vec4 unpackInt4x16(int64_t);" + "u16vec4 unpackUint4x16(uint64_t);" + + "bvec2 lessThan(i16vec2, i16vec2);" + "bvec3 lessThan(i16vec3, i16vec3);" + "bvec4 lessThan(i16vec4, i16vec4);" + "bvec2 lessThan(u16vec2, u16vec2);" + "bvec3 lessThan(u16vec3, u16vec3);" + "bvec4 lessThan(u16vec4, u16vec4);" + + "bvec2 lessThanEqual(i16vec2, i16vec2);" + "bvec3 lessThanEqual(i16vec3, i16vec3);" + "bvec4 lessThanEqual(i16vec4, i16vec4);" + "bvec2 lessThanEqual(u16vec2, u16vec2);" + "bvec3 lessThanEqual(u16vec3, u16vec3);" + "bvec4 lessThanEqual(u16vec4, u16vec4);" + + "bvec2 greaterThan(i16vec2, i16vec2);" + "bvec3 greaterThan(i16vec3, i16vec3);" + "bvec4 greaterThan(i16vec4, i16vec4);" + "bvec2 greaterThan(u16vec2, u16vec2);" + "bvec3 greaterThan(u16vec3, u16vec3);" + "bvec4 greaterThan(u16vec4, u16vec4);" + + "bvec2 greaterThanEqual(i16vec2, i16vec2);" + "bvec3 greaterThanEqual(i16vec3, i16vec3);" + "bvec4 greaterThanEqual(i16vec4, i16vec4);" + "bvec2 greaterThanEqual(u16vec2, u16vec2);" + "bvec3 greaterThanEqual(u16vec3, u16vec3);" + "bvec4 greaterThanEqual(u16vec4, u16vec4);" + + "bvec2 equal(i16vec2, i16vec2);" + "bvec3 equal(i16vec3, i16vec3);" + "bvec4 equal(i16vec4, i16vec4);" + "bvec2 equal(u16vec2, u16vec2);" + "bvec3 equal(u16vec3, u16vec3);" + "bvec4 equal(u16vec4, u16vec4);" + + "bvec2 notEqual(i16vec2, i16vec2);" + "bvec3 notEqual(i16vec3, i16vec3);" + "bvec4 notEqual(i16vec4, i16vec4);" + "bvec2 notEqual(u16vec2, u16vec2);" + "bvec3 notEqual(u16vec3, u16vec3);" + "bvec4 notEqual(u16vec4, u16vec4);" + + " int16_t bitfieldExtract( int16_t, int16_t, int16_t);" + "i16vec2 bitfieldExtract(i16vec2, int16_t, int16_t);" + "i16vec3 bitfieldExtract(i16vec3, int16_t, int16_t);" + "i16vec4 bitfieldExtract(i16vec4, int16_t, int16_t);" + + " uint16_t bitfieldExtract( uint16_t, int16_t, int16_t);" + "u16vec2 bitfieldExtract(u16vec2, int16_t, int16_t);" + "u16vec3 bitfieldExtract(u16vec3, int16_t, int16_t);" + "u16vec4 bitfieldExtract(u16vec4, int16_t, int16_t);" + + " int16_t bitfieldInsert( int16_t base, int16_t, int16_t, int16_t);" + "i16vec2 bitfieldInsert(i16vec2 base, i16vec2, int16_t, int16_t);" + "i16vec3 bitfieldInsert(i16vec3 base, i16vec3, int16_t, int16_t);" + "i16vec4 bitfieldInsert(i16vec4 base, i16vec4, int16_t, int16_t);" + + " uint16_t bitfieldInsert( uint16_t base, uint16_t, int16_t, int16_t);" + "u16vec2 bitfieldInsert(u16vec2 base, u16vec2, int16_t, int16_t);" + "u16vec3 bitfieldInsert(u16vec3 base, u16vec3, int16_t, int16_t);" + "u16vec4 bitfieldInsert(u16vec4 base, u16vec4, int16_t, int16_t);" + + " int16_t bitCount( int16_t);" + "i16vec2 bitCount(i16vec2);" + "i16vec3 bitCount(i16vec3);" + "i16vec4 bitCount(i16vec4);" + + " int16_t bitCount( uint16_t);" + "i16vec2 bitCount(u16vec2);" + "i16vec3 bitCount(u16vec3);" + "i16vec4 bitCount(u16vec4);" + + " int16_t findLSB( int16_t);" + "i16vec2 findLSB(i16vec2);" + "i16vec3 findLSB(i16vec3);" + "i16vec4 findLSB(i16vec4);" + + " int16_t findLSB( uint16_t);" + "i16vec2 findLSB(u16vec2);" + "i16vec3 findLSB(u16vec3);" + "i16vec4 findLSB(u16vec4);" + + " int16_t findMSB( int16_t);" + "i16vec2 findMSB(i16vec2);" + "i16vec3 findMSB(i16vec3);" + "i16vec4 findMSB(i16vec4);" + + " int16_t findMSB( uint16_t);" + "i16vec2 findMSB(u16vec2);" + "i16vec3 findMSB(u16vec3);" + "i16vec4 findMSB(u16vec4);" + + "int16_t pack16(i8vec2);" + "uint16_t pack16(u8vec2);" + "int32_t pack32(i8vec4);" + "uint32_t pack32(u8vec4);" + "int32_t pack32(i16vec2);" + "uint32_t pack32(u16vec2);" + "int64_t pack64(i16vec4);" + "uint64_t pack64(u16vec4);" + "int64_t pack64(i32vec2);" + "uint64_t pack64(u32vec2);" + + "i8vec2 unpack8(int16_t);" + "u8vec2 unpack8(uint16_t);" + "i8vec4 unpack8(int32_t);" + "u8vec4 unpack8(uint32_t);" + "i16vec2 unpack16(int32_t);" + "u16vec2 unpack16(uint32_t);" + "i16vec4 unpack16(int64_t);" + "u16vec4 unpack16(uint64_t);" + "i32vec2 unpack32(int64_t);" + "u32vec2 unpack32(uint64_t);" + + "float64_t radians(float64_t);" + "f64vec2 radians(f64vec2);" + "f64vec3 radians(f64vec3);" + "f64vec4 radians(f64vec4);" + + "float64_t degrees(float64_t);" + "f64vec2 degrees(f64vec2);" + "f64vec3 degrees(f64vec3);" + "f64vec4 degrees(f64vec4);" + + "float64_t sin(float64_t);" + "f64vec2 sin(f64vec2);" + "f64vec3 sin(f64vec3);" + "f64vec4 sin(f64vec4);" + + "float64_t cos(float64_t);" + "f64vec2 cos(f64vec2);" + "f64vec3 cos(f64vec3);" + "f64vec4 cos(f64vec4);" + + "float64_t tan(float64_t);" + "f64vec2 tan(f64vec2);" + "f64vec3 tan(f64vec3);" + "f64vec4 tan(f64vec4);" + + "float64_t asin(float64_t);" + "f64vec2 asin(f64vec2);" + "f64vec3 asin(f64vec3);" + "f64vec4 asin(f64vec4);" + + "float64_t acos(float64_t);" + "f64vec2 acos(f64vec2);" + "f64vec3 acos(f64vec3);" + "f64vec4 acos(f64vec4);" + + "float64_t atan(float64_t, float64_t);" + "f64vec2 atan(f64vec2, f64vec2);" + "f64vec3 atan(f64vec3, f64vec3);" + "f64vec4 atan(f64vec4, f64vec4);" + + "float64_t atan(float64_t);" + "f64vec2 atan(f64vec2);" + "f64vec3 atan(f64vec3);" + "f64vec4 atan(f64vec4);" + + "float64_t sinh(float64_t);" + "f64vec2 sinh(f64vec2);" + "f64vec3 sinh(f64vec3);" + "f64vec4 sinh(f64vec4);" + + "float64_t cosh(float64_t);" + "f64vec2 cosh(f64vec2);" + "f64vec3 cosh(f64vec3);" + "f64vec4 cosh(f64vec4);" + + "float64_t tanh(float64_t);" + "f64vec2 tanh(f64vec2);" + "f64vec3 tanh(f64vec3);" + "f64vec4 tanh(f64vec4);" + + "float64_t asinh(float64_t);" + "f64vec2 asinh(f64vec2);" + "f64vec3 asinh(f64vec3);" + "f64vec4 asinh(f64vec4);" + + "float64_t acosh(float64_t);" + "f64vec2 acosh(f64vec2);" + "f64vec3 acosh(f64vec3);" + "f64vec4 acosh(f64vec4);" + + "float64_t atanh(float64_t);" + "f64vec2 atanh(f64vec2);" + "f64vec3 atanh(f64vec3);" + "f64vec4 atanh(f64vec4);" + + "float64_t pow(float64_t, float64_t);" + "f64vec2 pow(f64vec2, f64vec2);" + "f64vec3 pow(f64vec3, f64vec3);" + "f64vec4 pow(f64vec4, f64vec4);" + + "float64_t exp(float64_t);" + "f64vec2 exp(f64vec2);" + "f64vec3 exp(f64vec3);" + "f64vec4 exp(f64vec4);" + + "float64_t log(float64_t);" + "f64vec2 log(f64vec2);" + "f64vec3 log(f64vec3);" + "f64vec4 log(f64vec4);" + + "float64_t exp2(float64_t);" + "f64vec2 exp2(f64vec2);" + "f64vec3 exp2(f64vec3);" + "f64vec4 exp2(f64vec4);" + + "float64_t log2(float64_t);" + "f64vec2 log2(f64vec2);" + "f64vec3 log2(f64vec3);" + "f64vec4 log2(f64vec4);" + "\n"); + } + + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append(derivativesAndControl64bits); + stageBuiltins[EShLangFragment].append( + "float64_t interpolateAtCentroid(float64_t);" + "f64vec2 interpolateAtCentroid(f64vec2);" + "f64vec3 interpolateAtCentroid(f64vec3);" + "f64vec4 interpolateAtCentroid(f64vec4);" + + "float64_t interpolateAtSample(float64_t, int);" + "f64vec2 interpolateAtSample(f64vec2, int);" + "f64vec3 interpolateAtSample(f64vec3, int);" + "f64vec4 interpolateAtSample(f64vec4, int);" + + "float64_t interpolateAtOffset(float64_t, f64vec2);" + "f64vec2 interpolateAtOffset(f64vec2, f64vec2);" + "f64vec3 interpolateAtOffset(f64vec3, f64vec2);" + "f64vec4 interpolateAtOffset(f64vec4, f64vec2);" + + "\n"); + + } +#endif // !GLSLANG_ANGLE + + //============================================================================ + // + // Prototypes for built-in functions seen by vertex shaders only. + // (Except legacy lod functions, where it depends which release they are + // vertex only.) + // + //============================================================================ + + // + // Geometric Functions. + // + if (spvVersion.vulkan == 0 && IncludeLegacy(version, profile, spvVersion)) + stageBuiltins[EShLangVertex].append("vec4 ftransform();"); + +#ifndef GLSLANG_ANGLE + // + // Original-style texture Functions with lod. + // + TString* s; + if (version == 100) + s = &stageBuiltins[EShLangVertex]; + else + s = &commonBuiltins; + if ((profile == EEsProfile && version == 100) || + profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + s->append( + "vec4 texture2DLod(sampler2D, vec2, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjLod(sampler2D, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjLod(sampler2D, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 texture3DLod(sampler3D, vec3, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check + "vec4 texture3DProjLod(sampler3D, vec4, float);" // GL_ARB_shader_texture_lod // OES_texture_3D, but caught by keyword check + "vec4 textureCubeLod(samplerCube, vec3, float);" // GL_ARB_shader_texture_lod + + "\n"); + } + } + if ( profile == ECompatibilityProfile || + (profile == ECoreProfile && version < 420) || + profile == ENoProfile) { + if (spvVersion.spv == 0) { + s->append( + "vec4 texture1DLod(sampler1D, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjLod(sampler1D, vec2, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjLod(sampler1D, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DLod(sampler1DShadow, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DLod(sampler2DShadow, vec3, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DProjLod(sampler1DShadow, vec4, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DProjLod(sampler2DShadow, vec4, float);" // GL_ARB_shader_texture_lod + + "vec4 texture1DGradARB(sampler1D, float, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjGradARB(sampler1D, vec2, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture1DProjGradARB(sampler1D, vec4, float, float);" // GL_ARB_shader_texture_lod + "vec4 texture2DGradARB(sampler2D, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjGradARB(sampler2D, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DProjGradARB(sampler2D, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture3DGradARB(sampler3D, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 texture3DProjGradARB(sampler3D, vec4, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 textureCubeGradARB(samplerCube, vec3, vec3, vec3);" // GL_ARB_shader_texture_lod + "vec4 shadow1DGradARB(sampler1DShadow, vec3, float, float);" // GL_ARB_shader_texture_lod + "vec4 shadow1DProjGradARB( sampler1DShadow, vec4, float, float);" // GL_ARB_shader_texture_lod + "vec4 shadow2DGradARB(sampler2DShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DProjGradARB( sampler2DShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectGradARB(sampler2DRect, vec2, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectProjGradARB( sampler2DRect, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 texture2DRectProjGradARB( sampler2DRect, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DRectGradARB( sampler2DRectShadow, vec3, vec2, vec2);" // GL_ARB_shader_texture_lod + "vec4 shadow2DRectProjGradARB(sampler2DRectShadow, vec4, vec2, vec2);" // GL_ARB_shader_texture_lod + + "\n"); + } + } +#endif // !GLSLANG_ANGLE + + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) { + //============================================================================ + // + // Prototypes for built-in functions seen by geometry shaders only. + // + //============================================================================ + + if (profile != EEsProfile && version >= 400) { + stageBuiltins[EShLangGeometry].append( + "void EmitStreamVertex(int);" + "void EndStreamPrimitive(int);" + ); + } + stageBuiltins[EShLangGeometry].append( + "void EmitVertex();" + "void EndPrimitive();" + "\n"); + } +#endif // !GLSLANG_WEB + + //============================================================================ + // + // Prototypes for all control functions. + // + //============================================================================ + bool esBarrier = (profile == EEsProfile && version >= 310); + if ((profile != EEsProfile && version >= 150) || esBarrier) + stageBuiltins[EShLangTessControl].append( + "void barrier();" + ); + if ((profile != EEsProfile && version >= 420) || esBarrier) + stageBuiltins[EShLangCompute].append( + "void barrier();" + ); + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void barrier();" + ); + stageBuiltins[EShLangTaskNV].append( + "void barrier();" + ); + } + if ((profile != EEsProfile && version >= 130) || esBarrier) + commonBuiltins.append( + "void memoryBarrier();" + ); + if ((profile != EEsProfile && version >= 420) || esBarrier) { + commonBuiltins.append( + "void memoryBarrierBuffer();" + ); + stageBuiltins[EShLangCompute].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + } +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 420) || esBarrier) { + if (spvVersion.vulkan == 0) { + commonBuiltins.append("void memoryBarrierAtomicCounter();"); + } + commonBuiltins.append("void memoryBarrierImage();"); + } + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + stageBuiltins[EShLangTaskNV].append( + "void memoryBarrierShared();" + "void groupMemoryBarrier();" + ); + } + + commonBuiltins.append("void controlBarrier(int, int, int, int);\n" + "void memoryBarrier(int, int, int);\n"); + + commonBuiltins.append("void debugPrintfEXT();\n"); + +#ifndef GLSLANG_ANGLE + if (profile != EEsProfile && version >= 450) { + // coopMatStoreNV perhaps ought to have "out" on the buf parameter, but + // adding it introduces undesirable tempArgs on the stack. What we want + // is more like "buf" thought of as a pointer value being an in parameter. + stageBuiltins[EShLangCompute].append( + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent float64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(fcoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "fcoopmatNV coopMatMulAddNV(fcoopmatNV A, fcoopmatNV B, fcoopmatNV C);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out icoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatLoadNV(out ucoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(icoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(icoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent int64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent ivec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent ivec4[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint8_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint16_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uint64_t[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uvec2[] buf, uint element, uint stride, bool colMajor);\n" + "void coopMatStoreNV(ucoopmatNV m, volatile coherent uvec4[] buf, uint element, uint stride, bool colMajor);\n" + + "icoopmatNV coopMatMulAddNV(icoopmatNV A, icoopmatNV B, icoopmatNV C);\n" + "ucoopmatNV coopMatMulAddNV(ucoopmatNV A, ucoopmatNV B, ucoopmatNV C);\n" + ); + } + + //============================================================================ + // + // Prototypes for built-in functions seen by fragment shaders only. + // + //============================================================================ + + // + // Original-style texture Functions with bias. + // + if (spvVersion.spv == 0 && (profile != EEsProfile || version == 100)) { + stageBuiltins[EShLangFragment].append( + "vec4 texture2D(sampler2D, vec2, float);" + "vec4 texture2DProj(sampler2D, vec3, float);" + "vec4 texture2DProj(sampler2D, vec4, float);" + "vec4 texture3D(sampler3D, vec3, float);" // OES_texture_3D + "vec4 texture3DProj(sampler3D, vec4, float);" // OES_texture_3D + "vec4 textureCube(samplerCube, vec3, float);" + + "\n"); + } + if (spvVersion.spv == 0 && (profile != EEsProfile && version > 100)) { + stageBuiltins[EShLangFragment].append( + "vec4 texture1D(sampler1D, float, float);" + "vec4 texture1DProj(sampler1D, vec2, float);" + "vec4 texture1DProj(sampler1D, vec4, float);" + "vec4 shadow1D(sampler1DShadow, vec3, float);" + "vec4 shadow2D(sampler2DShadow, vec3, float);" + "vec4 shadow1DProj(sampler1DShadow, vec4, float);" + "vec4 shadow2DProj(sampler2DShadow, vec4, float);" + + "\n"); + } + if (spvVersion.spv == 0 && profile == EEsProfile) { + stageBuiltins[EShLangFragment].append( + "vec4 texture2DLodEXT(sampler2D, vec2, float);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjLodEXT(sampler2D, vec3, float);" // GL_EXT_shader_texture_lod + "vec4 texture2DProjLodEXT(sampler2D, vec4, float);" // GL_EXT_shader_texture_lod + "vec4 textureCubeLodEXT(samplerCube, vec3, float);" // GL_EXT_shader_texture_lod + + "\n"); + } +#endif // !GLSLANG_ANGLE + + // GL_ARB_derivative_control + if (profile != EEsProfile && version >= 400) { + stageBuiltins[EShLangFragment].append(derivativeControls); + stageBuiltins[EShLangFragment].append("\n"); + } + + // GL_OES_shader_multisample_interpolation + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 400)) { + stageBuiltins[EShLangFragment].append( + "float interpolateAtCentroid(float);" + "vec2 interpolateAtCentroid(vec2);" + "vec3 interpolateAtCentroid(vec3);" + "vec4 interpolateAtCentroid(vec4);" + + "float interpolateAtSample(float, int);" + "vec2 interpolateAtSample(vec2, int);" + "vec3 interpolateAtSample(vec3, int);" + "vec4 interpolateAtSample(vec4, int);" + + "float interpolateAtOffset(float, vec2);" + "vec2 interpolateAtOffset(vec2, vec2);" + "vec3 interpolateAtOffset(vec3, vec2);" + "vec4 interpolateAtOffset(vec4, vec2);" + + "\n"); + } + + stageBuiltins[EShLangFragment].append( + "void beginInvocationInterlockARB(void);" + "void endInvocationInterlockARB(void);"); + + stageBuiltins[EShLangFragment].append( + "bool helperInvocationEXT();" + "\n"); + +#ifndef GLSLANG_ANGLE + // GL_AMD_shader_explicit_vertex_parameter + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append( + "float interpolateAtVertexAMD(float, uint);" + "vec2 interpolateAtVertexAMD(vec2, uint);" + "vec3 interpolateAtVertexAMD(vec3, uint);" + "vec4 interpolateAtVertexAMD(vec4, uint);" + + "int interpolateAtVertexAMD(int, uint);" + "ivec2 interpolateAtVertexAMD(ivec2, uint);" + "ivec3 interpolateAtVertexAMD(ivec3, uint);" + "ivec4 interpolateAtVertexAMD(ivec4, uint);" + + "uint interpolateAtVertexAMD(uint, uint);" + "uvec2 interpolateAtVertexAMD(uvec2, uint);" + "uvec3 interpolateAtVertexAMD(uvec3, uint);" + "uvec4 interpolateAtVertexAMD(uvec4, uint);" + + "float16_t interpolateAtVertexAMD(float16_t, uint);" + "f16vec2 interpolateAtVertexAMD(f16vec2, uint);" + "f16vec3 interpolateAtVertexAMD(f16vec3, uint);" + "f16vec4 interpolateAtVertexAMD(f16vec4, uint);" + + "\n"); + } + + // GL_AMD_gpu_shader_half_float + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangFragment].append(derivativesAndControl16bits); + stageBuiltins[EShLangFragment].append("\n"); + + stageBuiltins[EShLangFragment].append( + "float16_t interpolateAtCentroid(float16_t);" + "f16vec2 interpolateAtCentroid(f16vec2);" + "f16vec3 interpolateAtCentroid(f16vec3);" + "f16vec4 interpolateAtCentroid(f16vec4);" + + "float16_t interpolateAtSample(float16_t, int);" + "f16vec2 interpolateAtSample(f16vec2, int);" + "f16vec3 interpolateAtSample(f16vec3, int);" + "f16vec4 interpolateAtSample(f16vec4, int);" + + "float16_t interpolateAtOffset(float16_t, f16vec2);" + "f16vec2 interpolateAtOffset(f16vec2, f16vec2);" + "f16vec3 interpolateAtOffset(f16vec3, f16vec2);" + "f16vec4 interpolateAtOffset(f16vec4, f16vec2);" + + "\n"); + } + + // GL_ARB_shader_clock & GL_EXT_shader_realtime_clock + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append( + "uvec2 clock2x32ARB();" + "uint64_t clockARB();" + "uvec2 clockRealtime2x32EXT();" + "uint64_t clockRealtimeEXT();" + "\n"); + } + + // GL_AMD_shader_fragment_mask + if (profile != EEsProfile && version >= 450 && spvVersion.vulkan > 0) { + stageBuiltins[EShLangFragment].append( + "uint fragmentMaskFetchAMD(subpassInputMS);" + "uint fragmentMaskFetchAMD(isubpassInputMS);" + "uint fragmentMaskFetchAMD(usubpassInputMS);" + + "vec4 fragmentFetchAMD(subpassInputMS, uint);" + "ivec4 fragmentFetchAMD(isubpassInputMS, uint);" + "uvec4 fragmentFetchAMD(usubpassInputMS, uint);" + + "\n"); + } + + // Builtins for GL_NV_ray_tracing/GL_EXT_ray_tracing/GL_EXT_ray_query + if (profile != EEsProfile && version >= 460) { + commonBuiltins.append("void rayQueryInitializeEXT(rayQueryEXT, accelerationStructureEXT, uint, uint, vec3, float, vec3, float);" + "void rayQueryTerminateEXT(rayQueryEXT);" + "void rayQueryGenerateIntersectionEXT(rayQueryEXT, float);" + "void rayQueryConfirmIntersectionEXT(rayQueryEXT);" + "bool rayQueryProceedEXT(rayQueryEXT);" + "uint rayQueryGetIntersectionTypeEXT(rayQueryEXT, bool);" + "float rayQueryGetRayTMinEXT(rayQueryEXT);" + "uint rayQueryGetRayFlagsEXT(rayQueryEXT);" + "vec3 rayQueryGetWorldRayOriginEXT(rayQueryEXT);" + "vec3 rayQueryGetWorldRayDirectionEXT(rayQueryEXT);" + "float rayQueryGetIntersectionTEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionInstanceCustomIndexEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionInstanceIdEXT(rayQueryEXT, bool);" + "uint rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionGeometryIndexEXT(rayQueryEXT, bool);" + "int rayQueryGetIntersectionPrimitiveIndexEXT(rayQueryEXT, bool);" + "vec2 rayQueryGetIntersectionBarycentricsEXT(rayQueryEXT, bool);" + "bool rayQueryGetIntersectionFrontFaceEXT(rayQueryEXT, bool);" + "bool rayQueryGetIntersectionCandidateAABBOpaqueEXT(rayQueryEXT);" + "vec3 rayQueryGetIntersectionObjectRayDirectionEXT(rayQueryEXT, bool);" + "vec3 rayQueryGetIntersectionObjectRayOriginEXT(rayQueryEXT, bool);" + "mat4x3 rayQueryGetIntersectionObjectToWorldEXT(rayQueryEXT, bool);" + "mat4x3 rayQueryGetIntersectionWorldToObjectEXT(rayQueryEXT, bool);" + "\n"); + + stageBuiltins[EShLangRayGen].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangIntersect].append( + "bool reportIntersectionNV(float, uint);" + "bool reportIntersectionEXT(float, uint);" + "\n"); + stageBuiltins[EShLangAnyHit].append( + "void ignoreIntersectionNV();" + "void ignoreIntersectionEXT();" + "void terminateRayNV();" + "void terminateRayEXT();" + "\n"); + stageBuiltins[EShLangClosestHit].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangMiss].append( + "void traceNV(accelerationStructureNV,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void traceRayEXT(accelerationStructureEXT,uint,uint,uint,uint,uint,vec3,float,vec3,float,int);" + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + stageBuiltins[EShLangCallable].append( + "void executeCallableNV(uint, int);" + "void executeCallableEXT(uint, int);" + "\n"); + } +#endif // !GLSLANG_ANGLE + + //E_SPV_NV_compute_shader_derivatives + if ((profile == EEsProfile && version >= 320) || (profile != EEsProfile && version >= 450)) { + stageBuiltins[EShLangCompute].append(derivativeControls); + stageBuiltins[EShLangCompute].append("\n"); + } +#ifndef GLSLANG_ANGLE + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangCompute].append(derivativesAndControl16bits); + stageBuiltins[EShLangCompute].append(derivativesAndControl64bits); + stageBuiltins[EShLangCompute].append("\n"); + } + + // Builtins for GL_NV_mesh_shader + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + stageBuiltins[EShLangMeshNV].append( + "void writePackedPrimitiveIndices4x8NV(uint, uint);" + "\n"); + } +#endif // !GLSLANG_ANGLE +#endif // !GLSLANG_WEB + + //============================================================================ + // + // Standard Uniforms + // + //============================================================================ + + // + // Depth range in window coordinates, p. 33 + // + if (spvVersion.spv == 0) { + commonBuiltins.append( + "struct gl_DepthRangeParameters {" + ); + if (profile == EEsProfile) { + commonBuiltins.append( + "highp float near;" // n + "highp float far;" // f + "highp float diff;" // f - n + ); + } else { +#ifndef GLSLANG_WEB + commonBuiltins.append( + "float near;" // n + "float far;" // f + "float diff;" // f - n + ); +#endif + } + + commonBuiltins.append( + "};" + "uniform gl_DepthRangeParameters gl_DepthRange;" + "\n"); + } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) { + // + // Matrix state. p. 31, 32, 37, 39, 40. + // + commonBuiltins.append( + "uniform mat4 gl_ModelViewMatrix;" + "uniform mat4 gl_ProjectionMatrix;" + "uniform mat4 gl_ModelViewProjectionMatrix;" + + // + // Derived matrix state that provides inverse and transposed versions + // of the matrices above. + // + "uniform mat3 gl_NormalMatrix;" + + "uniform mat4 gl_ModelViewMatrixInverse;" + "uniform mat4 gl_ProjectionMatrixInverse;" + "uniform mat4 gl_ModelViewProjectionMatrixInverse;" + + "uniform mat4 gl_ModelViewMatrixTranspose;" + "uniform mat4 gl_ProjectionMatrixTranspose;" + "uniform mat4 gl_ModelViewProjectionMatrixTranspose;" + + "uniform mat4 gl_ModelViewMatrixInverseTranspose;" + "uniform mat4 gl_ProjectionMatrixInverseTranspose;" + "uniform mat4 gl_ModelViewProjectionMatrixInverseTranspose;" + + // + // Normal scaling p. 39. + // + "uniform float gl_NormalScale;" + + // + // Point Size, p. 66, 67. + // + "struct gl_PointParameters {" + "float size;" + "float sizeMin;" + "float sizeMax;" + "float fadeThresholdSize;" + "float distanceConstantAttenuation;" + "float distanceLinearAttenuation;" + "float distanceQuadraticAttenuation;" + "};" + + "uniform gl_PointParameters gl_Point;" + + // + // Material State p. 50, 55. + // + "struct gl_MaterialParameters {" + "vec4 emission;" // Ecm + "vec4 ambient;" // Acm + "vec4 diffuse;" // Dcm + "vec4 specular;" // Scm + "float shininess;" // Srm + "};" + "uniform gl_MaterialParameters gl_FrontMaterial;" + "uniform gl_MaterialParameters gl_BackMaterial;" + + // + // Light State p 50, 53, 55. + // + "struct gl_LightSourceParameters {" + "vec4 ambient;" // Acli + "vec4 diffuse;" // Dcli + "vec4 specular;" // Scli + "vec4 position;" // Ppli + "vec4 halfVector;" // Derived: Hi + "vec3 spotDirection;" // Sdli + "float spotExponent;" // Srli + "float spotCutoff;" // Crli + // (range: [0.0,90.0], 180.0) + "float spotCosCutoff;" // Derived: cos(Crli) + // (range: [1.0,0.0],-1.0) + "float constantAttenuation;" // K0 + "float linearAttenuation;" // K1 + "float quadraticAttenuation;"// K2 + "};" + + "struct gl_LightModelParameters {" + "vec4 ambient;" // Acs + "};" + + "uniform gl_LightModelParameters gl_LightModel;" + + // + // Derived state from products of light and material. + // + "struct gl_LightModelProducts {" + "vec4 sceneColor;" // Derived. Ecm + Acm * Acs + "};" + + "uniform gl_LightModelProducts gl_FrontLightModelProduct;" + "uniform gl_LightModelProducts gl_BackLightModelProduct;" + + "struct gl_LightProducts {" + "vec4 ambient;" // Acm * Acli + "vec4 diffuse;" // Dcm * Dcli + "vec4 specular;" // Scm * Scli + "};" + + // + // Fog p. 161 + // + "struct gl_FogParameters {" + "vec4 color;" + "float density;" + "float start;" + "float end;" + "float scale;" // 1 / (gl_FogEnd - gl_FogStart) + "};" + + "uniform gl_FogParameters gl_Fog;" + + "\n"); + } +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE + + //============================================================================ + // + // Define the interface to the compute shader. + // + //============================================================================ + + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangCompute].append( + "in highp uvec3 gl_NumWorkGroups;" + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "\n"); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangCompute].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "\n"); + } + +#ifndef GLSLANG_WEB +#ifndef GLSLANG_ANGLE + //============================================================================ + // + // Define the interface to the mesh/task shader. + // + //============================================================================ + + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + // per-vertex attributes + stageBuiltins[EShLangMeshNV].append( + "out gl_MeshPerVertexNV {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + "float gl_CullDistance[];" + "perviewNV vec4 gl_PositionPerViewNV[];" + "perviewNV float gl_ClipDistancePerViewNV[][];" + "perviewNV float gl_CullDistancePerViewNV[][];" + "} gl_MeshVerticesNV[];" + ); + + // per-primitive attributes + stageBuiltins[EShLangMeshNV].append( + "perprimitiveNV out gl_MeshPerPrimitiveNV {" + "int gl_PrimitiveID;" + "int gl_Layer;" + "int gl_ViewportIndex;" + "int gl_ViewportMask[];" + "perviewNV int gl_LayerPerViewNV[];" + "perviewNV int gl_ViewportMaskPerViewNV[][];" + "} gl_MeshPrimitivesNV[];" + ); + + stageBuiltins[EShLangMeshNV].append( + "out uint gl_PrimitiveCountNV;" + "out uint gl_PrimitiveIndicesNV[];" + + "in uint gl_MeshViewCountNV;" + "in uint gl_MeshViewIndicesNV[4];" + + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "out uint gl_TaskCountNV;" + + "const highp uvec3 gl_WorkGroupSize = uvec3(1,1,1);" + + "in highp uvec3 gl_WorkGroupID;" + "in highp uvec3 gl_LocalInvocationID;" + + "in highp uvec3 gl_GlobalInvocationID;" + "in highp uint gl_LocalInvocationIndex;" + + "in uint gl_MeshViewCountNV;" + "in uint gl_MeshViewIndicesNV[4];" + + "\n"); + } + + if (profile != EEsProfile && version >= 450) { + stageBuiltins[EShLangMeshNV].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in int gl_DrawIDARB;" // GL_ARB_shader_draw_parameters + "\n"); + + if (version >= 460) { + stageBuiltins[EShLangMeshNV].append( + "in int gl_DrawID;" + "\n"); + + stageBuiltins[EShLangTaskNV].append( + "in int gl_DrawID;" + "\n"); + } + } +#endif // !GLSLANG_ANGLE + + //============================================================================ + // + // Define the interface to the vertex shader. + // + //============================================================================ + + if (profile != EEsProfile) { + if (version < 130) { + stageBuiltins[EShLangVertex].append( + "attribute vec4 gl_Color;" + "attribute vec4 gl_SecondaryColor;" + "attribute vec3 gl_Normal;" + "attribute vec4 gl_Vertex;" + "attribute vec4 gl_MultiTexCoord0;" + "attribute vec4 gl_MultiTexCoord1;" + "attribute vec4 gl_MultiTexCoord2;" + "attribute vec4 gl_MultiTexCoord3;" + "attribute vec4 gl_MultiTexCoord4;" + "attribute vec4 gl_MultiTexCoord5;" + "attribute vec4 gl_MultiTexCoord6;" + "attribute vec4 gl_MultiTexCoord7;" + "attribute float gl_FogCoord;" + "\n"); + } else if (IncludeLegacy(version, profile, spvVersion)) { + stageBuiltins[EShLangVertex].append( + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + "in vec3 gl_Normal;" + "in vec4 gl_Vertex;" + "in vec4 gl_MultiTexCoord0;" + "in vec4 gl_MultiTexCoord1;" + "in vec4 gl_MultiTexCoord2;" + "in vec4 gl_MultiTexCoord3;" + "in vec4 gl_MultiTexCoord4;" + "in vec4 gl_MultiTexCoord5;" + "in vec4 gl_MultiTexCoord6;" + "in vec4 gl_MultiTexCoord7;" + "in float gl_FogCoord;" + "\n"); + } + + if (version < 150) { + if (version < 130) { + stageBuiltins[EShLangVertex].append( + " vec4 gl_ClipVertex;" // needs qualifier fixed later + "varying vec4 gl_FrontColor;" + "varying vec4 gl_BackColor;" + "varying vec4 gl_FrontSecondaryColor;" + "varying vec4 gl_BackSecondaryColor;" + "varying vec4 gl_TexCoord[];" + "varying float gl_FogFragCoord;" + "\n"); + } else if (IncludeLegacy(version, profile, spvVersion)) { + stageBuiltins[EShLangVertex].append( + " vec4 gl_ClipVertex;" // needs qualifier fixed later + "out vec4 gl_FrontColor;" + "out vec4 gl_BackColor;" + "out vec4 gl_FrontSecondaryColor;" + "out vec4 gl_BackSecondaryColor;" + "out vec4 gl_TexCoord[];" + "out float gl_FogFragCoord;" + "\n"); + } + stageBuiltins[EShLangVertex].append( + "vec4 gl_Position;" // needs qualifier fixed later + "float gl_PointSize;" // needs qualifier fixed later + ); + + if (version == 130 || version == 140) + stageBuiltins[EShLangVertex].append( + "out float gl_ClipDistance[];" + ); + } else { + // version >= 150 + stageBuiltins[EShLangVertex].append( + "out gl_PerVertex {" + "vec4 gl_Position;" // needs qualifier fixed later + "float gl_PointSize;" // needs qualifier fixed later + "float gl_ClipDistance[];" + ); + if (IncludeLegacy(version, profile, spvVersion)) + stageBuiltins[EShLangVertex].append( + "vec4 gl_ClipVertex;" // needs qualifier fixed later + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangVertex].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangVertex].append( + "};" + "\n"); + } + if (version >= 130 && spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "int gl_VertexID;" // needs qualifier fixed later + ); + if (version >= 140 && spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "int gl_InstanceID;" // needs qualifier fixed later + ); + if (spvVersion.vulkan > 0 && version >= 140) + stageBuiltins[EShLangVertex].append( + "in int gl_VertexIndex;" + "in int gl_InstanceIndex;" + ); + if (version >= 440) { + stageBuiltins[EShLangVertex].append( + "in int gl_BaseVertexARB;" + "in int gl_BaseInstanceARB;" + "in int gl_DrawIDARB;" + ); + } + if (version >= 410) { + stageBuiltins[EShLangVertex].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + ); + } + if (version >= 460) { + stageBuiltins[EShLangVertex].append( + "in int gl_BaseVertex;" + "in int gl_BaseInstance;" + "in int gl_DrawID;" + ); + } + + if (version >= 450) + stageBuiltins[EShLangVertex].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + } else { + // ES profile + if (version == 100) { + stageBuiltins[EShLangVertex].append( + "highp vec4 gl_Position;" // needs qualifier fixed later + "mediump float gl_PointSize;" // needs qualifier fixed later + ); + } else { + if (spvVersion.vulkan == 0) + stageBuiltins[EShLangVertex].append( + "in highp int gl_VertexID;" // needs qualifier fixed later + "in highp int gl_InstanceID;" // needs qualifier fixed later + ); + if (spvVersion.vulkan > 0) +#endif + stageBuiltins[EShLangVertex].append( + "in highp int gl_VertexIndex;" + "in highp int gl_InstanceIndex;" + ); +#ifndef GLSLANG_WEB + if (version < 310) +#endif + stageBuiltins[EShLangVertex].append( + "highp vec4 gl_Position;" // needs qualifier fixed later + "highp float gl_PointSize;" // needs qualifier fixed later + ); +#ifndef GLSLANG_WEB + else + stageBuiltins[EShLangVertex].append( + "out gl_PerVertex {" + "highp vec4 gl_Position;" // needs qualifier fixed later + "highp float gl_PointSize;" // needs qualifier fixed later + "};" + ); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangVertex].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + if (version >= 300 /* both ES and non-ES */) { + stageBuiltins[EShLangVertex].append( + "in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 + "\n"); + } + + + //============================================================================ + // + // Define the interface to the geometry shader. + // + //============================================================================ + + if (profile == ECoreProfile || profile == ECompatibilityProfile) { + stageBuiltins[EShLangGeometry].append( + "in gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + stageBuiltins[EShLangGeometry].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "float gl_CullDistance[];" + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + stageBuiltins[EShLangGeometry].append( + "} gl_in[];" + + "in int gl_PrimitiveIDIn;" + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + "\n"); + if (profile == ECompatibilityProfile && version >= 400) + stageBuiltins[EShLangGeometry].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangGeometry].append( + "};" + + "out int gl_PrimitiveID;" + "out int gl_Layer;"); + + if (version >= 150) + stageBuiltins[EShLangGeometry].append( + "out int gl_ViewportIndex;" + ); + + if (profile == ECompatibilityProfile && version < 400) + stageBuiltins[EShLangGeometry].append( + "out vec4 gl_ClipVertex;" + ); + + if (version >= 400) + stageBuiltins[EShLangGeometry].append( + "in int gl_InvocationID;" + ); + + if (version >= 450) + stageBuiltins[EShLangGeometry].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + + stageBuiltins[EShLangGeometry].append("\n"); + } else if (profile == EEsProfile && version >= 310) { + stageBuiltins[EShLangGeometry].append( + "in gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "} gl_in[];" + "\n" + "in highp int gl_PrimitiveIDIn;" + "in highp int gl_InvocationID;" + "\n" + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "};" + "\n" + "out highp int gl_PrimitiveID;" + "out highp int gl_Layer;" + "\n" + ); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangGeometry].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the tessellation control shader. + // + //============================================================================ + + if (profile != EEsProfile && version >= 150) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessControl].append( + "in int gl_PatchVerticesIn;" + "in int gl_PrimitiveID;" + "in int gl_InvocationID;" + + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + stageBuiltins[EShLangTessControl].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangTessControl].append( + "float gl_CullDistance[];" + "int gl_ViewportMask[];" // GL_NV_viewport_array2 + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + stageBuiltins[EShLangTessControl].append( + "} gl_out[];" + + "patch out float gl_TessLevelOuter[4];" + "patch out float gl_TessLevelInner[2];" + "\n"); + + if (version >= 410) + stageBuiltins[EShLangTessControl].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + "\n"); + + } else { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessControl].append( + "in highp int gl_PatchVerticesIn;" + "in highp int gl_PrimitiveID;" + "in highp int gl_InvocationID;" + + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + ); + stageBuiltins[EShLangTessControl].append( + "} gl_out[];" + + "patch out highp float gl_TessLevelOuter[4];" + "patch out highp float gl_TessLevelInner[2];" + "patch out highp vec4 gl_BoundingBoxOES[2];" + "patch out highp vec4 gl_BoundingBoxEXT[2];" + "\n"); + if (profile == EEsProfile && version >= 320) { + stageBuiltins[EShLangTessControl].append( + "patch out highp vec4 gl_BoundingBox[2];" + "\n" + ); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangTessControl].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the tessellation evaluation shader. + // + //============================================================================ + + if (profile != EEsProfile && version >= 150) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessEvaluation].append( + "in int gl_PatchVerticesIn;" + "in int gl_PrimitiveID;" + "in vec3 gl_TessCoord;" + + "patch in float gl_TessLevelOuter[4];" + "patch in float gl_TessLevelInner[2];" + + "out gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (version >= 400 && profile == ECompatibilityProfile) + stageBuiltins[EShLangTessEvaluation].append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (version >= 450) + stageBuiltins[EShLangTessEvaluation].append( + "float gl_CullDistance[];" + ); + stageBuiltins[EShLangTessEvaluation].append( + "};" + "\n"); + + if (version >= 410) + stageBuiltins[EShLangTessEvaluation].append( + "out int gl_ViewportIndex;" + "out int gl_Layer;" + "\n"); + + if (version >= 450) + stageBuiltins[EShLangTessEvaluation].append( + "out int gl_ViewportMask[];" // GL_NV_viewport_array2 + "out vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "out int gl_SecondaryViewportMaskNV[];" // GL_NV_stereo_view_rendering + "out vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "out int gl_ViewportMaskPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + + } else if (profile == EEsProfile && version >= 310) { + // Note: "in gl_PerVertex {...} gl_in[gl_MaxPatchVertices];" is declared in initialize() below, + // as it depends on the resource sizing of gl_MaxPatchVertices. + + stageBuiltins[EShLangTessEvaluation].append( + "in highp int gl_PatchVerticesIn;" + "in highp int gl_PrimitiveID;" + "in highp vec3 gl_TessCoord;" + + "patch in highp float gl_TessLevelOuter[4];" + "patch in highp float gl_TessLevelInner[2];" + + "out gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + ); + stageBuiltins[EShLangTessEvaluation].append( + "};" + "\n"); + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangTessEvaluation].append( + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + + //============================================================================ + // + // Define the interface to the fragment shader. + // + //============================================================================ + + if (profile != EEsProfile) { + + stageBuiltins[EShLangFragment].append( + "vec4 gl_FragCoord;" // needs qualifier fixed later + "bool gl_FrontFacing;" // needs qualifier fixed later + "float gl_FragDepth;" // needs qualifier fixed later + ); + if (version >= 120) + stageBuiltins[EShLangFragment].append( + "vec2 gl_PointCoord;" // needs qualifier fixed later + ); + if (version >= 140) + stageBuiltins[EShLangFragment].append( + "out int gl_FragStencilRefARB;" + ); + if (IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && version < 420)) + stageBuiltins[EShLangFragment].append( + "vec4 gl_FragColor;" // needs qualifier fixed later + ); + + if (version < 130) { + stageBuiltins[EShLangFragment].append( + "varying vec4 gl_Color;" + "varying vec4 gl_SecondaryColor;" + "varying vec4 gl_TexCoord[];" + "varying float gl_FogFragCoord;" + ); + } else { + stageBuiltins[EShLangFragment].append( + "in float gl_ClipDistance[];" + ); + + if (IncludeLegacy(version, profile, spvVersion)) { + if (version < 150) + stageBuiltins[EShLangFragment].append( + "in float gl_FogFragCoord;" + "in vec4 gl_TexCoord[];" + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + ); + else + stageBuiltins[EShLangFragment].append( + "in gl_PerFragment {" + "in float gl_FogFragCoord;" + "in vec4 gl_TexCoord[];" + "in vec4 gl_Color;" + "in vec4 gl_SecondaryColor;" + "};" + ); + } + } + + if (version >= 150) + stageBuiltins[EShLangFragment].append( + "flat in int gl_PrimitiveID;" + ); + + if (version >= 130) { // ARB_sample_shading + stageBuiltins[EShLangFragment].append( + "flat in int gl_SampleID;" + " in vec2 gl_SamplePosition;" + " out int gl_SampleMask[];" + ); + + if (spvVersion.spv == 0) { + stageBuiltins[EShLangFragment].append( + "uniform int gl_NumSamples;" + ); + } + } + + if (version >= 400) + stageBuiltins[EShLangFragment].append( + "flat in int gl_SampleMaskIn[];" + ); + + if (version >= 430) + stageBuiltins[EShLangFragment].append( + "flat in int gl_Layer;" + "flat in int gl_ViewportIndex;" + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "in float gl_CullDistance[];" + "bool gl_HelperInvocation;" // needs qualifier fixed later + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density + "flat in ivec2 gl_FragSizeEXT;" + "flat in int gl_FragInvocationCountEXT;" + ); + + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "in vec2 gl_BaryCoordNoPerspAMD;" + "in vec2 gl_BaryCoordNoPerspCentroidAMD;" + "in vec2 gl_BaryCoordNoPerspSampleAMD;" + "in vec2 gl_BaryCoordSmoothAMD;" + "in vec2 gl_BaryCoordSmoothCentroidAMD;" + "in vec2 gl_BaryCoordSmoothSampleAMD;" + "in vec3 gl_BaryCoordPullModelAMD;" + ); + + if (version >= 430) + stageBuiltins[EShLangFragment].append( + "in bool gl_FragFullyCoveredNV;" + ); + if (version >= 450) + stageBuiltins[EShLangFragment].append( + "flat in ivec2 gl_FragmentSizeNV;" // GL_NV_shading_rate_image + "flat in int gl_InvocationsPerPixelNV;" + "in vec3 gl_BaryCoordNV;" // GL_NV_fragment_shader_barycentric + "in vec3 gl_BaryCoordNoPerspNV;" + ); + + } else { + // ES profile + + if (version == 100) { + stageBuiltins[EShLangFragment].append( + "mediump vec4 gl_FragCoord;" // needs qualifier fixed later + " bool gl_FrontFacing;" // needs qualifier fixed later + "mediump vec4 gl_FragColor;" // needs qualifier fixed later + "mediump vec2 gl_PointCoord;" // needs qualifier fixed later + ); + } +#endif + if (version >= 300) { + stageBuiltins[EShLangFragment].append( + "highp vec4 gl_FragCoord;" // needs qualifier fixed later + " bool gl_FrontFacing;" // needs qualifier fixed later + "mediump vec2 gl_PointCoord;" // needs qualifier fixed later + "highp float gl_FragDepth;" // needs qualifier fixed later + ); + } +#ifndef GLSLANG_WEB + if (version >= 310) { + stageBuiltins[EShLangFragment].append( + "bool gl_HelperInvocation;" // needs qualifier fixed later + "flat in highp int gl_PrimitiveID;" // needs qualifier fixed later + "flat in highp int gl_Layer;" // needs qualifier fixed later + ); + + stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables + "flat in lowp int gl_SampleID;" + " in mediump vec2 gl_SamplePosition;" + "flat in highp int gl_SampleMaskIn[];" + " out highp int gl_SampleMask[];" + ); + if (spvVersion.spv == 0) + stageBuiltins[EShLangFragment].append( // GL_OES_sample_variables + "uniform lowp int gl_NumSamples;" + ); + } + stageBuiltins[EShLangFragment].append( + "highp float gl_FragDepthEXT;" // GL_EXT_frag_depth + ); + + if (version >= 310) + stageBuiltins[EShLangFragment].append( // GL_EXT_fragment_invocation_density + "flat in ivec2 gl_FragSizeEXT;" + "flat in int gl_FragInvocationCountEXT;" + ); + if (version >= 320) + stageBuiltins[EShLangFragment].append( // GL_NV_shading_rate_image + "flat in ivec2 gl_FragmentSizeNV;" + "flat in int gl_InvocationsPerPixelNV;" + ); + if (version >= 320) + stageBuiltins[EShLangFragment].append( + "in vec3 gl_BaryCoordNV;" + "in vec3 gl_BaryCoordNoPerspNV;" + ); + } +#endif + + stageBuiltins[EShLangFragment].append("\n"); + + if (version >= 130) + add2ndGenerationSamplingImaging(version, profile, spvVersion); + +#ifndef GLSLANG_WEB + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + stageBuiltins[EShLangFragment].append( + "flat in highp int gl_DeviceIndex;" // GL_EXT_device_group + "flat in highp int gl_ViewIndex;" // GL_EXT_multiview + "\n"); + } + +#ifndef GLSLANG_ANGLE + // GL_ARB_shader_ballot + if (profile != EEsProfile && version >= 450) { + const char* ballotDecls = + "uniform uint gl_SubGroupSizeARB;" + "in uint gl_SubGroupInvocationARB;" + "in uint64_t gl_SubGroupEqMaskARB;" + "in uint64_t gl_SubGroupGeMaskARB;" + "in uint64_t gl_SubGroupGtMaskARB;" + "in uint64_t gl_SubGroupLeMaskARB;" + "in uint64_t gl_SubGroupLtMaskARB;" + "\n"; + const char* fragmentBallotDecls = + "uniform uint gl_SubGroupSizeARB;" + "flat in uint gl_SubGroupInvocationARB;" + "flat in uint64_t gl_SubGroupEqMaskARB;" + "flat in uint64_t gl_SubGroupGeMaskARB;" + "flat in uint64_t gl_SubGroupGtMaskARB;" + "flat in uint64_t gl_SubGroupLeMaskARB;" + "flat in uint64_t gl_SubGroupLtMaskARB;" + "\n"; + stageBuiltins[EShLangVertex] .append(ballotDecls); + stageBuiltins[EShLangTessControl] .append(ballotDecls); + stageBuiltins[EShLangTessEvaluation].append(ballotDecls); + stageBuiltins[EShLangGeometry] .append(ballotDecls); + stageBuiltins[EShLangCompute] .append(ballotDecls); + stageBuiltins[EShLangFragment] .append(fragmentBallotDecls); + stageBuiltins[EShLangMeshNV] .append(ballotDecls); + stageBuiltins[EShLangTaskNV] .append(ballotDecls); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + const char* subgroupDecls = + "in mediump uint gl_SubgroupSize;" + "in mediump uint gl_SubgroupInvocationID;" + "in highp uvec4 gl_SubgroupEqMask;" + "in highp uvec4 gl_SubgroupGeMask;" + "in highp uvec4 gl_SubgroupGtMask;" + "in highp uvec4 gl_SubgroupLeMask;" + "in highp uvec4 gl_SubgroupLtMask;" + // GL_NV_shader_sm_builtins + "in highp uint gl_WarpsPerSMNV;" + "in highp uint gl_SMCountNV;" + "in highp uint gl_WarpIDNV;" + "in highp uint gl_SMIDNV;" + "\n"; + const char* fragmentSubgroupDecls = + "flat in mediump uint gl_SubgroupSize;" + "flat in mediump uint gl_SubgroupInvocationID;" + "flat in highp uvec4 gl_SubgroupEqMask;" + "flat in highp uvec4 gl_SubgroupGeMask;" + "flat in highp uvec4 gl_SubgroupGtMask;" + "flat in highp uvec4 gl_SubgroupLeMask;" + "flat in highp uvec4 gl_SubgroupLtMask;" + // GL_NV_shader_sm_builtins + "flat in highp uint gl_WarpsPerSMNV;" + "flat in highp uint gl_SMCountNV;" + "flat in highp uint gl_WarpIDNV;" + "flat in highp uint gl_SMIDNV;" + "\n"; + const char* computeSubgroupDecls = + "in highp uint gl_NumSubgroups;" + "in highp uint gl_SubgroupID;" + "\n"; + + stageBuiltins[EShLangVertex] .append(subgroupDecls); + stageBuiltins[EShLangTessControl] .append(subgroupDecls); + stageBuiltins[EShLangTessEvaluation].append(subgroupDecls); + stageBuiltins[EShLangGeometry] .append(subgroupDecls); + stageBuiltins[EShLangCompute] .append(subgroupDecls); + stageBuiltins[EShLangCompute] .append(computeSubgroupDecls); + stageBuiltins[EShLangFragment] .append(fragmentSubgroupDecls); + stageBuiltins[EShLangMeshNV] .append(subgroupDecls); + stageBuiltins[EShLangMeshNV] .append(computeSubgroupDecls); + stageBuiltins[EShLangTaskNV] .append(subgroupDecls); + stageBuiltins[EShLangTaskNV] .append(computeSubgroupDecls); + stageBuiltins[EShLangRayGen] .append(subgroupDecls); + stageBuiltins[EShLangIntersect] .append(subgroupDecls); + stageBuiltins[EShLangAnyHit] .append(subgroupDecls); + stageBuiltins[EShLangClosestHit] .append(subgroupDecls); + stageBuiltins[EShLangMiss] .append(subgroupDecls); + stageBuiltins[EShLangCallable] .append(subgroupDecls); + } + + // GL_NV_ray_tracing/GL_EXT_ray_tracing + if (profile != EEsProfile && version >= 460) { + + const char *constRayFlags = + "const uint gl_RayFlagsNoneNV = 0U;" + "const uint gl_RayFlagsNoneEXT = 0U;" + "const uint gl_RayFlagsOpaqueNV = 1U;" + "const uint gl_RayFlagsOpaqueEXT = 1U;" + "const uint gl_RayFlagsNoOpaqueNV = 2U;" + "const uint gl_RayFlagsNoOpaqueEXT = 2U;" + "const uint gl_RayFlagsTerminateOnFirstHitNV = 4U;" + "const uint gl_RayFlagsTerminateOnFirstHitEXT = 4U;" + "const uint gl_RayFlagsSkipClosestHitShaderNV = 8U;" + "const uint gl_RayFlagsSkipClosestHitShaderEXT = 8U;" + "const uint gl_RayFlagsCullBackFacingTrianglesNV = 16U;" + "const uint gl_RayFlagsCullBackFacingTrianglesEXT = 16U;" + "const uint gl_RayFlagsCullFrontFacingTrianglesNV = 32U;" + "const uint gl_RayFlagsCullFrontFacingTrianglesEXT = 32U;" + "const uint gl_RayFlagsCullOpaqueNV = 64U;" + "const uint gl_RayFlagsCullOpaqueEXT = 64U;" + "const uint gl_RayFlagsCullNoOpaqueNV = 128U;" + "const uint gl_RayFlagsCullNoOpaqueEXT = 128U;" + "const uint gl_RayFlagsSkipTrianglesEXT = 256U;" + "const uint gl_RayFlagsSkipAABBEXT = 512U;" + "const uint gl_HitKindFrontFacingTriangleEXT = 254U;" + "const uint gl_HitKindBackFacingTriangleEXT = 255U;" + "\n"; + + const char *constRayQueryIntersection = + "const uint gl_RayQueryCandidateIntersectionEXT = 0U;" + "const uint gl_RayQueryCommittedIntersectionEXT = 1U;" + "const uint gl_RayQueryCommittedIntersectionNoneEXT = 0U;" + "const uint gl_RayQueryCommittedIntersectionTriangleEXT = 1U;" + "const uint gl_RayQueryCommittedIntersectionGeneratedEXT = 2U;" + "const uint gl_RayQueryCandidateIntersectionTriangleEXT = 0U;" + "const uint gl_RayQueryCandidateIntersectionAABBEXT = 1U;" + "\n"; + + const char *rayGenDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "\n"; + const char *intersectDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in int gl_PrimitiveID;" + "in int gl_InstanceID;" + "in int gl_InstanceCustomIndexNV;" + "in int gl_InstanceCustomIndexEXT;" + "in int gl_GeometryIndexEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayOriginEXT;" + "in vec3 gl_ObjectRayDirectionNV;" + "in vec3 gl_ObjectRayDirectionEXT;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in mat4x3 gl_ObjectToWorldNV;" + "in mat4x3 gl_ObjectToWorldEXT;" + "in mat3x4 gl_ObjectToWorld3x4EXT;" + "in mat4x3 gl_WorldToObjectNV;" + "in mat4x3 gl_WorldToObjectEXT;" + "in mat3x4 gl_WorldToObject3x4EXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + const char *hitDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in int gl_PrimitiveID;" + "in int gl_InstanceID;" + "in int gl_InstanceCustomIndexNV;" + "in int gl_InstanceCustomIndexEXT;" + "in int gl_GeometryIndexEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayOriginEXT;" + "in vec3 gl_ObjectRayDirectionNV;" + "in vec3 gl_ObjectRayDirectionEXT;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in float gl_HitTNV;" + "in float gl_HitTEXT;" + "in uint gl_HitKindNV;" + "in uint gl_HitKindEXT;" + "in mat4x3 gl_ObjectToWorldNV;" + "in mat4x3 gl_ObjectToWorldEXT;" + "in mat3x4 gl_ObjectToWorld3x4EXT;" + "in mat4x3 gl_WorldToObjectNV;" + "in mat4x3 gl_WorldToObjectEXT;" + "in mat3x4 gl_WorldToObject3x4EXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + const char *missDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "in vec3 gl_WorldRayOriginNV;" + "in vec3 gl_WorldRayOriginEXT;" + "in vec3 gl_WorldRayDirectionNV;" + "in vec3 gl_WorldRayDirectionEXT;" + "in vec3 gl_ObjectRayOriginNV;" + "in vec3 gl_ObjectRayDirectionNV;" + "in float gl_RayTminNV;" + "in float gl_RayTminEXT;" + "in float gl_RayTmaxNV;" + "in float gl_RayTmaxEXT;" + "in uint gl_IncomingRayFlagsNV;" + "in uint gl_IncomingRayFlagsEXT;" + "\n"; + + const char *callableDecls = + "in uvec3 gl_LaunchIDNV;" + "in uvec3 gl_LaunchIDEXT;" + "in uvec3 gl_LaunchSizeNV;" + "in uvec3 gl_LaunchSizeEXT;" + "\n"; + + + commonBuiltins.append(constRayQueryIntersection); + commonBuiltins.append(constRayFlags); + + stageBuiltins[EShLangRayGen].append(rayGenDecls); + stageBuiltins[EShLangIntersect].append(intersectDecls); + stageBuiltins[EShLangAnyHit].append(hitDecls); + stageBuiltins[EShLangClosestHit].append(hitDecls); + stageBuiltins[EShLangMiss].append(missDecls); + stageBuiltins[EShLangCallable].append(callableDecls); + + } + + if ((profile != EEsProfile && version >= 140)) { + const char *deviceIndex = + "in highp int gl_DeviceIndex;" // GL_EXT_device_group + "\n"; + + stageBuiltins[EShLangRayGen].append(deviceIndex); + stageBuiltins[EShLangIntersect].append(deviceIndex); + stageBuiltins[EShLangAnyHit].append(deviceIndex); + stageBuiltins[EShLangClosestHit].append(deviceIndex); + stageBuiltins[EShLangMiss].append(deviceIndex); + } + + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) { + commonBuiltins.append("const int gl_ScopeDevice = 1;\n"); + commonBuiltins.append("const int gl_ScopeWorkgroup = 2;\n"); + commonBuiltins.append("const int gl_ScopeSubgroup = 3;\n"); + commonBuiltins.append("const int gl_ScopeInvocation = 4;\n"); + commonBuiltins.append("const int gl_ScopeQueueFamily = 5;\n"); + commonBuiltins.append("const int gl_ScopeShaderCallEXT = 6;\n"); + + commonBuiltins.append("const int gl_SemanticsRelaxed = 0x0;\n"); + commonBuiltins.append("const int gl_SemanticsAcquire = 0x2;\n"); + commonBuiltins.append("const int gl_SemanticsRelease = 0x4;\n"); + commonBuiltins.append("const int gl_SemanticsAcquireRelease = 0x8;\n"); + commonBuiltins.append("const int gl_SemanticsMakeAvailable = 0x2000;\n"); + commonBuiltins.append("const int gl_SemanticsMakeVisible = 0x4000;\n"); + commonBuiltins.append("const int gl_SemanticsVolatile = 0x8000;\n"); + + commonBuiltins.append("const int gl_StorageSemanticsNone = 0x0;\n"); + commonBuiltins.append("const int gl_StorageSemanticsBuffer = 0x40;\n"); + commonBuiltins.append("const int gl_StorageSemanticsShared = 0x100;\n"); + commonBuiltins.append("const int gl_StorageSemanticsImage = 0x800;\n"); + commonBuiltins.append("const int gl_StorageSemanticsOutput = 0x1000;\n"); + } + +#endif // !GLSLANG_ANGLE + + if (version >= 300 /* both ES and non-ES */) { + stageBuiltins[EShLangFragment].append( + "flat in highp uint gl_ViewID_OVR;" // GL_OVR_multiview, GL_OVR_multiview2 + "\n"); + } +#endif // !GLSLANG_WEB + + // printf("%s\n", commonBuiltins.c_str()); + // printf("%s\n", stageBuiltins[EShLangFragment].c_str()); +} + +// +// Helper function for initialize(), to add the second set of names for texturing, +// when adding context-independent built-in functions. +// +void TBuiltIns::add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion) +{ + // + // In this function proper, enumerate the types, then calls the next set of functions + // to enumerate all the uses for that type. + // + + // enumerate all the types + const TBasicType bTypes[] = { EbtFloat, EbtInt, EbtUint, +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + EbtFloat16 +#endif + }; +#ifdef GLSLANG_WEB + bool skipBuffer = true; + bool skipCubeArrayed = true; + const int image = 0; +#else + bool skipBuffer = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 140); + bool skipCubeArrayed = (profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 130); + for (int image = 0; image <= 1; ++image) // loop over "bool" image vs sampler +#endif + { + for (int shadow = 0; shadow <= 1; ++shadow) { // loop over "bool" shadow or not +#ifdef GLSLANG_WEB + const int ms = 0; +#else + for (int ms = 0; ms <= 1; ++ms) // loop over "bool" multisample or not +#endif + { + if ((ms || image) && shadow) + continue; + if (ms && profile != EEsProfile && version < 150) + continue; + if (ms && image && profile == EEsProfile) + continue; + if (ms && profile == EEsProfile && version < 310) + continue; + + for (int arrayed = 0; arrayed <= 1; ++arrayed) { // loop over "bool" arrayed or not +#ifdef GLSLANG_WEB + for (int dim = Esd2D; dim <= EsdCube; ++dim) { // 2D, 3D, and Cube +#else +#if defined(GLSLANG_ANGLE) + for (int dim = Esd2D; dim < EsdNumDims; ++dim) { // 2D, ..., buffer, subpass +#else + for (int dim = Esd1D; dim < EsdNumDims; ++dim) { // 1D, ..., buffer, subpass +#endif + if (dim == EsdSubpass && spvVersion.vulkan == 0) + continue; + if (dim == EsdSubpass && (image || shadow || arrayed)) + continue; + if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile) + continue; + if (dim == EsdSubpass && spvVersion.vulkan == 0) + continue; + if (dim == EsdSubpass && (image || shadow || arrayed)) + continue; + if ((dim == Esd1D || dim == EsdRect) && profile == EEsProfile) + continue; + if (dim != Esd2D && dim != EsdSubpass && ms) + continue; + if (dim == EsdBuffer && skipBuffer) + continue; + if (dim == EsdBuffer && (shadow || arrayed || ms)) + continue; + if (ms && arrayed && profile == EEsProfile && version < 310) + continue; +#endif + if (dim == Esd3D && shadow) + continue; + if (dim == EsdCube && arrayed && skipCubeArrayed) + continue; + if ((dim == Esd3D || dim == EsdRect) && arrayed) + continue; + + // Loop over the bTypes + for (size_t bType = 0; bType < sizeof(bTypes)/sizeof(TBasicType); ++bType) { +#ifndef GLSLANG_WEB + if (bTypes[bType] == EbtFloat16 && (profile == EEsProfile || version < 450)) + continue; + if (dim == EsdRect && version < 140 && bType > 0) + continue; +#endif + if (shadow && (bTypes[bType] == EbtInt || bTypes[bType] == EbtUint)) + continue; + + // + // Now, make all the function prototypes for the type we just built... + // + TSampler sampler; +#ifndef GLSLANG_WEB + if (dim == EsdSubpass) { + sampler.setSubpass(bTypes[bType], ms ? true : false); + } else +#endif + if (image) { + sampler.setImage(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false, + shadow ? true : false, + ms ? true : false); + } else { + sampler.set(bTypes[bType], (TSamplerDim)dim, arrayed ? true : false, + shadow ? true : false, + ms ? true : false); + } + + TString typeName = sampler.getString(); + +#ifndef GLSLANG_WEB + if (dim == EsdSubpass) { + addSubpassSampling(sampler, typeName, version, profile); + continue; + } +#endif + + addQueryFunctions(sampler, typeName, version, profile); + + if (image) + addImageFunctions(sampler, typeName, version, profile); + else { + addSamplingFunctions(sampler, typeName, version, profile); +#ifndef GLSLANG_WEB + addGatherFunctions(sampler, typeName, version, profile); + if (spvVersion.vulkan > 0 && sampler.isCombined() && !sampler.shadow) { + // Base Vulkan allows texelFetch() for + // textureBuffer (i.e. without sampler). + // + // GL_EXT_samplerless_texture_functions + // allows texelFetch() and query functions + // (other than textureQueryLod()) for all + // texture types. + sampler.setTexture(sampler.type, sampler.dim, sampler.arrayed, sampler.shadow, + sampler.ms); + TString textureTypeName = sampler.getString(); + addSamplingFunctions(sampler, textureTypeName, version, profile); + addQueryFunctions(sampler, textureTypeName, version, profile); + } +#endif + } + } + } + } + } + } + } + + // + // sparseTexelsResidentARB() + // + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append("bool sparseTexelsResidentARB(int code);\n"); + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the query functions for the given type. +// +void TBuiltIns::addQueryFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ + // + // textureSize() and imageSize() + // + + int sizeDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0) - (sampler.dim == EsdCube ? 1 : 0); + +#ifdef GLSLANG_WEB + commonBuiltins.append("highp "); + commonBuiltins.append("ivec"); + commonBuiltins.append(postfixes[sizeDims]); + commonBuiltins.append(" textureSize("); + commonBuiltins.append(typeName); + commonBuiltins.append(",int);\n"); + return; +#endif + + if (sampler.isImage() && ((profile == EEsProfile && version < 310) || (profile != EEsProfile && version < 420))) + return; + + if (profile == EEsProfile) + commonBuiltins.append("highp "); + if (sizeDims == 1) + commonBuiltins.append("int"); + else { + commonBuiltins.append("ivec"); + commonBuiltins.append(postfixes[sizeDims]); + } + if (sampler.isImage()) + commonBuiltins.append(" imageSize(readonly writeonly volatile coherent "); + else + commonBuiltins.append(" textureSize("); + commonBuiltins.append(typeName); + if (! sampler.isImage() && ! sampler.isRect() && ! sampler.isBuffer() && ! sampler.isMultiSample()) + commonBuiltins.append(",int);\n"); + else + commonBuiltins.append(");\n"); + + // + // textureSamples() and imageSamples() + // + + // GL_ARB_shader_texture_image_samples + // TODO: spec issue? there are no memory qualifiers; how to query a writeonly/readonly image, etc? + if (profile != EEsProfile && version >= 430 && sampler.isMultiSample()) { + commonBuiltins.append("int "); + if (sampler.isImage()) + commonBuiltins.append("imageSamples(readonly writeonly volatile coherent "); + else + commonBuiltins.append("textureSamples("); + commonBuiltins.append(typeName); + commonBuiltins.append(");\n"); + } + + // + // textureQueryLod(), fragment stage only + // Also enabled with extension GL_ARB_texture_query_lod + + if (profile != EEsProfile && version >= 150 && sampler.isCombined() && sampler.dim != EsdRect && + ! sampler.isMultiSample() && ! sampler.isBuffer()) { + for (int f16TexAddr = 0; f16TexAddr < 2; ++f16TexAddr) { + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + stageBuiltins[EShLangFragment].append("vec2 textureQueryLod("); + stageBuiltins[EShLangFragment].append(typeName); + if (dimMap[sampler.dim] == 1) + if (f16TexAddr) + stageBuiltins[EShLangFragment].append(", float16_t"); + else + stageBuiltins[EShLangFragment].append(", float"); + else { + if (f16TexAddr) + stageBuiltins[EShLangFragment].append(", f16vec"); + else + stageBuiltins[EShLangFragment].append(", vec"); + stageBuiltins[EShLangFragment].append(postfixes[dimMap[sampler.dim]]); + } + stageBuiltins[EShLangFragment].append(");\n"); + } + + stageBuiltins[EShLangCompute].append("vec2 textureQueryLod("); + stageBuiltins[EShLangCompute].append(typeName); + if (dimMap[sampler.dim] == 1) + stageBuiltins[EShLangCompute].append(", float"); + else { + stageBuiltins[EShLangCompute].append(", vec"); + stageBuiltins[EShLangCompute].append(postfixes[dimMap[sampler.dim]]); + } + stageBuiltins[EShLangCompute].append(");\n"); + } + + // + // textureQueryLevels() + // + + if (profile != EEsProfile && version >= 430 && ! sampler.isImage() && sampler.dim != EsdRect && + ! sampler.isMultiSample() && ! sampler.isBuffer()) { + commonBuiltins.append("int textureQueryLevels("); + commonBuiltins.append(typeName); + commonBuiltins.append(");\n"); + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the image access functions for the given type. +// +void TBuiltIns::addImageFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ + int dims = dimMap[sampler.dim]; + // most things with an array add a dimension, except for cubemaps + if (sampler.arrayed && sampler.dim != EsdCube) + ++dims; + + TString imageParams = typeName; + if (dims == 1) + imageParams.append(", int"); + else { + imageParams.append(", ivec"); + imageParams.append(postfixes[dims]); + } + if (sampler.isMultiSample()) + imageParams.append(", int"); + + if (profile == EEsProfile) + commonBuiltins.append("highp "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4 imageLoad(readonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(");\n"); + + commonBuiltins.append("void imageStore(writeonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4);\n"); + + if (! sampler.is1D() && ! sampler.isBuffer() && profile != EEsProfile && version >= 450) { + commonBuiltins.append("int sparseImageLoadARB(readonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", out "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4"); + commonBuiltins.append(");\n"); + } + + if ( profile != EEsProfile || + (profile == EEsProfile && version >= 310)) { + if (sampler.type == EbtInt || sampler.type == EbtUint) { + const char* dataType = sampler.type == EbtInt ? "highp int" : "highp uint"; + + const int numBuiltins = 7; + + static const char* atomicFunc[numBuiltins] = { + " imageAtomicAdd(volatile coherent ", + " imageAtomicMin(volatile coherent ", + " imageAtomicMax(volatile coherent ", + " imageAtomicAnd(volatile coherent ", + " imageAtomicOr(volatile coherent ", + " imageAtomicXor(volatile coherent ", + " imageAtomicExchange(volatile coherent " + }; + + // Loop twice to add prototypes with/without scope/semantics + for (int j = 0; j < 2; ++j) { + for (size_t i = 0; i < numBuiltins; ++i) { + commonBuiltins.append(dataType); + commonBuiltins.append(atomicFunc[i]); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + if (j == 1) { + commonBuiltins.append(", int, int, int"); + } + commonBuiltins.append(");\n"); + } + + commonBuiltins.append(dataType); + commonBuiltins.append(" imageAtomicCompSwap(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + if (j == 1) { + commonBuiltins.append(", int, int, int, int, int"); + } + commonBuiltins.append(");\n"); + } + + commonBuiltins.append(dataType); + commonBuiltins.append(" imageAtomicLoad(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", int, int, int);\n"); + + commonBuiltins.append("void imageAtomicStore(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", "); + commonBuiltins.append(dataType); + commonBuiltins.append(", int, int, int);\n"); + + } else { + // not int or uint + // GL_ARB_ES3_1_compatibility + // TODO: spec issue: are there restrictions on the kind of layout() that can be used? what about dropping memory qualifiers? + if (profile == EEsProfile && version >= 310) { + commonBuiltins.append("float imageAtomicExchange(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float);\n"); + } + if (profile != EEsProfile && version >= 450) { + commonBuiltins.append("float imageAtomicAdd(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float);\n"); + + commonBuiltins.append("float imageAtomicAdd(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float"); + commonBuiltins.append(", int, int, int);\n"); + + commonBuiltins.append("float imageAtomicExchange(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float);\n"); + + commonBuiltins.append("float imageAtomicExchange(volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float"); + commonBuiltins.append(", int, int, int);\n"); + + commonBuiltins.append("float imageAtomicLoad(readonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", int, int, int);\n"); + + commonBuiltins.append("void imageAtomicStore(writeonly volatile coherent "); + commonBuiltins.append(imageParams); + commonBuiltins.append(", float"); + commonBuiltins.append(", int, int, int);\n"); + } + } + } + + if (sampler.dim == EsdRect || sampler.dim == EsdBuffer || sampler.shadow || sampler.isMultiSample()) + return; + + if (profile == EEsProfile || version < 450) + return; + + TString imageLodParams = typeName; + if (dims == 1) + imageLodParams.append(", int"); + else { + imageLodParams.append(", ivec"); + imageLodParams.append(postfixes[dims]); + } + imageLodParams.append(", int"); + + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4 imageLoadLodAMD(readonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(");\n"); + + commonBuiltins.append("void imageStoreLodAMD(writeonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(", "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4);\n"); + + if (! sampler.is1D()) { + commonBuiltins.append("int sparseImageLoadLodAMD(readonly volatile coherent "); + commonBuiltins.append(imageLodParams); + commonBuiltins.append(", out "); + commonBuiltins.append(prefixes[sampler.type]); + commonBuiltins.append("vec4"); + commonBuiltins.append(");\n"); + } +} + +// +// Helper function for initialize(), +// when adding context-independent built-in functions. +// +// Add all the subpass access functions for the given type. +// +void TBuiltIns::addSubpassSampling(TSampler sampler, const TString& typeName, int /*version*/, EProfile /*profile*/) +{ + stageBuiltins[EShLangFragment].append(prefixes[sampler.type]); + stageBuiltins[EShLangFragment].append("vec4 subpassLoad"); + stageBuiltins[EShLangFragment].append("("); + stageBuiltins[EShLangFragment].append(typeName.c_str()); + if (sampler.isMultiSample()) + stageBuiltins[EShLangFragment].append(", int"); + stageBuiltins[EShLangFragment].append(");\n"); +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the texture lookup functions for the given type. +// +void TBuiltIns::addSamplingFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#elif defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + + // + // texturing + // + for (int proj = 0; proj <= 1; ++proj) { // loop over "bool" projective or not + + if (proj && (sampler.dim == EsdCube || sampler.isBuffer() || sampler.arrayed || sampler.isMultiSample() + || !sampler.isCombined())) + continue; + + for (int lod = 0; lod <= 1; ++lod) { + + if (lod && (sampler.isBuffer() || sampler.isRect() || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (lod && sampler.dim == Esd2D && sampler.arrayed && sampler.shadow) + continue; + if (lod && sampler.dim == EsdCube && sampler.shadow) + continue; + + for (int bias = 0; bias <= 1; ++bias) { + + if (bias && (lod || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (bias && (sampler.dim == Esd2D || sampler.dim == EsdCube) && sampler.shadow && sampler.arrayed) + continue; + if (bias && (sampler.isRect() || sampler.isBuffer())) + continue; + + for (int offset = 0; offset <= 1; ++offset) { // loop over "bool" offset or not + + if (proj + offset + bias + lod > 3) + continue; + if (offset && (sampler.dim == EsdCube || sampler.isBuffer() || sampler.isMultiSample())) + continue; + + for (int fetch = 0; fetch <= 1; ++fetch) { // loop over "bool" fetch or not + + if (proj + offset + fetch + bias + lod > 3) + continue; + if (fetch && (lod || bias)) + continue; + if (fetch && (sampler.shadow || sampler.dim == EsdCube)) + continue; + if (fetch == 0 && (sampler.isMultiSample() || sampler.isBuffer() + || !sampler.isCombined())) + continue; + + for (int grad = 0; grad <= 1; ++grad) { // loop over "bool" grad or not + + if (grad && (lod || bias || sampler.isMultiSample() || !sampler.isCombined())) + continue; + if (grad && sampler.isBuffer()) + continue; + if (proj + offset + fetch + grad + bias + lod > 3) + continue; + + for (int extraProj = 0; extraProj <= 1; ++extraProj) { + bool compare = false; + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + // skip dummy unused second component for 1D non-array shadows + if (sampler.shadow && totalDims < 2) + totalDims = 2; + totalDims += (sampler.shadow ? 1 : 0) + proj; + if (totalDims > 4 && sampler.shadow) { + compare = true; + totalDims = 4; + } + assert(totalDims <= 4); + + if (extraProj && ! proj) + continue; + if (extraProj && (sampler.dim == Esd3D || sampler.shadow || !sampler.isCombined())) + continue; + + // loop over 16-bit floating-point texel addressing +#if defined(GLSLANG_WEB) || defined(GLSLANG_ANGLE) + const int f16TexAddr = 0; +#else + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) +#endif + { + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + if (f16TexAddr && sampler.shadow && ! compare) { + compare = true; // compare argument is always present + totalDims--; + } + // loop over "bool" lod clamp +#if defined(GLSLANG_WEB) || defined(GLSLANG_ANGLE) + const int lodClamp = 0; +#else + for (int lodClamp = 0; lodClamp <= 1 ;++lodClamp) +#endif + { + if (lodClamp && (profile == EEsProfile || version < 450)) + continue; + if (lodClamp && (proj || lod || fetch)) + continue; + + // loop over "bool" sparse or not +#if defined(GLSLANG_WEB) || defined(GLSLANG_ANGLE) + const int sparse = 0; +#else + for (int sparse = 0; sparse <= 1; ++sparse) +#endif + { + if (sparse && (profile == EEsProfile || version < 450)) + continue; + // Sparse sampling is not for 1D/1D array texture, buffer texture, and + // projective texture + if (sparse && (sampler.is1D() || sampler.isBuffer() || proj)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + if (sampler.shadow) + if (sampler.type == EbtFloat16) + s.append("float16_t "); + else + s.append("float "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + } + + // name + if (sparse) { + if (fetch) + s.append("sparseTexel"); + else + s.append("sparseTexture"); + } + else { + if (fetch) + s.append("texel"); + else + s.append("texture"); + } + if (proj) + s.append("Proj"); + if (lod) + s.append("Lod"); + if (grad) + s.append("Grad"); + if (fetch) + s.append("Fetch"); + if (offset) + s.append("Offset"); + if (lodClamp) + s.append("Clamp"); + if (lodClamp != 0 || sparse) + s.append("ARB"); + s.append("("); + + // sampler type + s.append(typeName); + // P coordinate + if (extraProj) { + if (f16TexAddr) + s.append(",f16vec4"); + else + s.append(",vec4"); + } else { + s.append(","); + TBasicType t = fetch ? EbtInt : (f16TexAddr ? EbtFloat16 : EbtFloat); + if (totalDims == 1) + s.append(TType::getBasicString(t)); + else { + s.append(prefixes[t]); + s.append("vec"); + s.append(postfixes[totalDims]); + } + } + // non-optional compare + if (compare) + s.append(",float"); + + // non-optional lod argument (lod that's not driven by lod loop) or sample + if ((fetch && !sampler.isBuffer() && + !sampler.isRect() && !sampler.isMultiSample()) + || (sampler.isMultiSample() && fetch)) + s.append(",int"); + // non-optional lod + if (lod) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + // gradient arguments + if (grad) { + if (dimMap[sampler.dim] == 1) { + if (f16TexAddr) + s.append(",float16_t,float16_t"); + else + s.append(",float,float"); + } else { + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + s.append(postfixes[dimMap[sampler.dim]]); + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + s.append(postfixes[dimMap[sampler.dim]]); + } + } + // offset + if (offset) { + if (dimMap[sampler.dim] == 1) + s.append(",int"); + else { + s.append(",ivec"); + s.append(postfixes[dimMap[sampler.dim]]); + } + } + + // lod clamp + if (lodClamp) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + if (sampler.shadow) + if (sampler.type == EbtFloat16) + s.append("float16_t"); + else + s.append("float"); + else { + s.append(prefixes[sampler.type]); + s.append("vec4"); + } + } + // optional bias + if (bias) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + s.append(");\n"); + + // Add to the per-language set of built-ins + if (bias || lodClamp != 0) { + stageBuiltins[EShLangFragment].append(s); + stageBuiltins[EShLangCompute].append(s); + } else + commonBuiltins.append(s); + + } + } + } + } + } + } + } + } + } + } +} + +// +// Helper function for add2ndGenerationSamplingImaging(), +// when adding context-independent built-in functions. +// +// Add all the texture gather functions for the given type. +// +void TBuiltIns::addGatherFunctions(TSampler sampler, const TString& typeName, int version, EProfile profile) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#elif defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + + switch (sampler.dim) { + case Esd2D: + case EsdRect: + case EsdCube: + break; + default: + return; + } + + if (sampler.isMultiSample()) + return; + + if (version < 140 && sampler.dim == EsdRect && sampler.type != EbtFloat) + return; + + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing + + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets + + for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument + + if (comp > 0 && sampler.shadow) + continue; + + if (offset > 0 && sampler.dim == EsdCube) + continue; + + for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not + if (sparse && (profile == EEsProfile || version < 450)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // name + if (sparse) + s.append("sparseTextureGather"); + else + s.append("textureGather"); + switch (offset) { + case 1: + s.append("Offset"); + break; + case 2: + s.append("Offsets"); + break; + default: + break; + } + if (sparse) + s.append("ARB"); + s.append("("); + + // sampler type argument + s.append(typeName); + + // P coordinate argument + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + s.append(postfixes[totalDims]); + + // refZ argument + if (sampler.shadow) + s.append(",float"); + + // offset argument + if (offset > 0) { + s.append(",ivec2"); + if (offset == 2) + s.append("[4]"); + } + + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // comp argument + if (comp) + s.append(",int"); + + s.append(");\n"); + commonBuiltins.append(s); + } + } + } + } + + if (sampler.dim == EsdRect || sampler.shadow) + return; + + if (profile == EEsProfile || version < 450) + return; + + for (int bias = 0; bias < 2; ++bias) { // loop over presence of bias argument + + for (int lod = 0; lod < 2; ++lod) { // loop over presence of lod argument + + if ((lod && bias) || (lod == 0 && bias == 0)) + continue; + + for (int f16TexAddr = 0; f16TexAddr <= 1; ++f16TexAddr) { // loop over 16-bit floating-point texel addressing + + if (f16TexAddr && sampler.type != EbtFloat16) + continue; + + for (int offset = 0; offset < 3; ++offset) { // loop over three forms of offset in the call name: none, Offset, and Offsets + + for (int comp = 0; comp < 2; ++comp) { // loop over presence of comp argument + + if (comp == 0 && bias) + continue; + + if (offset > 0 && sampler.dim == EsdCube) + continue; + + for (int sparse = 0; sparse <= 1; ++sparse) { // loop over "bool" sparse or not + if (sparse && (profile == EEsProfile || version < 450)) + continue; + + TString s; + + // return type + if (sparse) + s.append("int "); + else { + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // name + if (sparse) + s.append("sparseTextureGather"); + else + s.append("textureGather"); + + if (lod) + s.append("Lod"); + + switch (offset) { + case 1: + s.append("Offset"); + break; + case 2: + s.append("Offsets"); + break; + default: + break; + } + + if (lod) + s.append("AMD"); + else if (sparse) + s.append("ARB"); + + s.append("("); + + // sampler type argument + s.append(typeName); + + // P coordinate argument + if (f16TexAddr) + s.append(",f16vec"); + else + s.append(",vec"); + int totalDims = dimMap[sampler.dim] + (sampler.arrayed ? 1 : 0); + s.append(postfixes[totalDims]); + + // lod argument + if (lod) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + // offset argument + if (offset > 0) { + s.append(",ivec2"); + if (offset == 2) + s.append("[4]"); + } + + // texel out (for sparse texture) + if (sparse) { + s.append(",out "); + s.append(prefixes[sampler.type]); + s.append("vec4 "); + } + + // comp argument + if (comp) + s.append(",int"); + + // bias argument + if (bias) { + if (f16TexAddr) + s.append(",float16_t"); + else + s.append(",float"); + } + + s.append(");\n"); + if (bias) + stageBuiltins[EShLangFragment].append(s); + else + commonBuiltins.append(s); + } + } + } + } + } + } +} + +// +// Add context-dependent built-in functions and variables that are present +// for the given version and profile. All the results are put into just the +// commonBuiltins, because it is called for just a specific stage. So, +// add stage-specific entries to the commonBuiltins, and only if that stage +// was requested. +// +void TBuiltIns::initialize(const TBuiltInResource &resources, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#elif defined(GLSLANG_ANGLE) + version = 450; + profile = ECoreProfile; +#endif + + // + // Initialize the context-dependent (resource-dependent) built-in strings for parsing. + // + + //============================================================================ + // + // Standard Uniforms + // + //============================================================================ + + TString& s = commonBuiltins; + const int maxSize = 200; + char builtInConstant[maxSize]; + + // + // Build string of implementation dependent constants. + // + + if (profile == EEsProfile) { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers); + s.append(builtInConstant); + + if (version == 100) { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVaryingVectors = %d;", resources.maxVaryingVectors); + s.append(builtInConstant); + } else { + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxVertexOutputVectors = %d;", resources.maxVertexOutputVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxFragmentInputVectors = %d;", resources.maxFragmentInputVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset); + s.append(builtInConstant); + } + +#ifndef GLSLANG_WEB + if (version >= 310) { + // geometry + + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources.maxGeometryAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources.maxGeometryAtomicCounterBuffers); + s.append(builtInConstant); + + // tessellation + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel); + s.append(builtInConstant); + + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + s.append( + "in gl_PerVertex {" + "highp vec4 gl_Position;" + "highp float gl_PointSize;" + "highp vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "highp vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + "} gl_in[gl_MaxPatchVertices];" + "\n"); + } + } + + if (version >= 320) { + // tessellation + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources.maxTessControlAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources.maxTessEvaluationAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources.maxTessControlAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources.maxTessEvaluationAtomicCounterBuffers); + s.append(builtInConstant); + } + + if (version >= 100) { + // GL_EXT_blend_func_extended + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxDualSourceDrawBuffersEXT = %d;", resources.maxDualSourceDrawBuffersEXT); + s.append(builtInConstant); + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxDualSourceDrawBuffersEXT + if (language == EShLangFragment) { + s.append( + "mediump vec4 gl_SecondaryFragColorEXT;" + "mediump vec4 gl_SecondaryFragDataEXT[gl_MaxDualSourceDrawBuffersEXT];" + "\n"); + } + } + } else { + // non-ES profile + + if (version > 400) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformVectors = %d;", resources.maxVertexUniformVectors); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformVectors = %d;", resources.maxFragmentUniformVectors); + s.append(builtInConstant); + } + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAttribs = %d;", resources.maxVertexAttribs); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexTextureImageUnits = %d;", resources.maxVertexTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedTextureImageUnits = %d;", resources.maxCombinedTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureImageUnits = %d;", resources.maxTextureImageUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxDrawBuffers = %d;", resources.maxDrawBuffers); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxLights = %d;", resources.maxLights); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxClipPlanes = %d;", resources.maxClipPlanes); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureUnits = %d;", resources.maxTextureUnits); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTextureCoords = %d;", resources.maxTextureCoords); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexUniformComponents = %d;", resources.maxVertexUniformComponents); + s.append(builtInConstant); + + if (version < 150 || ARBCompatibility) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingFloats = %d;", resources.maxVaryingFloats); + s.append(builtInConstant); + } + + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentUniformComponents = %d;", resources.maxFragmentUniformComponents); + s.append(builtInConstant); + + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) { + // + // OpenGL'uniform' state. Page numbers are in reference to version + // 1.4 of the OpenGL specification. + // + + // + // Matrix state. p. 31, 32, 37, 39, 40. + // + s.append("uniform mat4 gl_TextureMatrix[gl_MaxTextureCoords];" + + // + // Derived matrix state that provides inverse and transposed versions + // of the matrices above. + // + "uniform mat4 gl_TextureMatrixInverse[gl_MaxTextureCoords];" + + "uniform mat4 gl_TextureMatrixTranspose[gl_MaxTextureCoords];" + + "uniform mat4 gl_TextureMatrixInverseTranspose[gl_MaxTextureCoords];" + + // + // Clip planes p. 42. + // + "uniform vec4 gl_ClipPlane[gl_MaxClipPlanes];" + + // + // Light State p 50, 53, 55. + // + "uniform gl_LightSourceParameters gl_LightSource[gl_MaxLights];" + + // + // Derived state from products of light. + // + "uniform gl_LightProducts gl_FrontLightProduct[gl_MaxLights];" + "uniform gl_LightProducts gl_BackLightProduct[gl_MaxLights];" + + // + // Texture Environment and Generation, p. 152, p. 40-42. + // + "uniform vec4 gl_TextureEnvColor[gl_MaxTextureImageUnits];" + "uniform vec4 gl_EyePlaneS[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneT[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneR[gl_MaxTextureCoords];" + "uniform vec4 gl_EyePlaneQ[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneS[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneT[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneR[gl_MaxTextureCoords];" + "uniform vec4 gl_ObjectPlaneQ[gl_MaxTextureCoords];"); + } + + if (version >= 130) { + snprintf(builtInConstant, maxSize, "const int gl_MaxClipDistances = %d;", resources.maxClipDistances); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVaryingComponents = %d;", resources.maxVaryingComponents); + s.append(builtInConstant); + + // GL_ARB_shading_language_420pack + snprintf(builtInConstant, maxSize, "const mediump int gl_MinProgramTexelOffset = %d;", resources.minProgramTexelOffset); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const mediump int gl_MaxProgramTexelOffset = %d;", resources.maxProgramTexelOffset); + s.append(builtInConstant); + } + + // geometry + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryInputComponents = %d;", resources.maxGeometryInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputComponents = %d;", resources.maxGeometryOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTextureImageUnits = %d;", resources.maxGeometryTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryOutputVertices = %d;", resources.maxGeometryOutputVertices); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryTotalOutputComponents = %d;", resources.maxGeometryTotalOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryUniformComponents = %d;", resources.maxGeometryUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryVaryingComponents = %d;", resources.maxGeometryVaryingComponents); + s.append(builtInConstant); + + } + + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexOutputComponents = %d;", resources.maxVertexOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentInputComponents = %d;", resources.maxFragmentInputComponents); + s.append(builtInConstant); + } + + // tessellation + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlInputComponents = %d;", resources.maxTessControlInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlOutputComponents = %d;", resources.maxTessControlOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTextureImageUnits = %d;", resources.maxTessControlTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlUniformComponents = %d;", resources.maxTessControlUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlTotalOutputComponents = %d;", resources.maxTessControlTotalOutputComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationInputComponents = %d;", resources.maxTessEvaluationInputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationOutputComponents = %d;", resources.maxTessEvaluationOutputComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationTextureImageUnits = %d;", resources.maxTessEvaluationTextureImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationUniformComponents = %d;", resources.maxTessEvaluationUniformComponents); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxTessPatchComponents = %d;", resources.maxTessPatchComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessGenLevel = %d;", resources.maxTessGenLevel); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxPatchVertices = %d;", resources.maxPatchVertices); + s.append(builtInConstant); + + // this is here instead of with the others in initialize(version, profile) due to the dependence on gl_MaxPatchVertices + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + s.append( + "in gl_PerVertex {" + "vec4 gl_Position;" + "float gl_PointSize;" + "float gl_ClipDistance[];" + ); + if (profile == ECompatibilityProfile) + s.append( + "vec4 gl_ClipVertex;" + "vec4 gl_FrontColor;" + "vec4 gl_BackColor;" + "vec4 gl_FrontSecondaryColor;" + "vec4 gl_BackSecondaryColor;" + "vec4 gl_TexCoord[];" + "float gl_FogFragCoord;" + ); + if (profile != EEsProfile && version >= 450) + s.append( + "float gl_CullDistance[];" + "vec4 gl_SecondaryPositionNV;" // GL_NV_stereo_view_rendering + "vec4 gl_PositionPerViewNV[];" // GL_NVX_multiview_per_view_attributes + ); + s.append( + "} gl_in[gl_MaxPatchVertices];" + "\n"); + } + } + + if (version >= 150) { + snprintf(builtInConstant, maxSize, "const int gl_MaxViewports = %d;", resources.maxViewports); + s.append(builtInConstant); + } + + // images + if (version >= 130) { + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUnitsAndFragmentOutputs = %d;", resources.maxCombinedImageUnitsAndFragmentOutputs); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxImageSamples = %d;", resources.maxImageSamples); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlImageUniforms = %d;", resources.maxTessControlImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationImageUniforms = %d;", resources.maxTessEvaluationImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryImageUniforms = %d;", resources.maxGeometryImageUniforms); + s.append(builtInConstant); + } + + // enhanced layouts + if (version >= 430) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackBuffers = %d;", resources.maxTransformFeedbackBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTransformFeedbackInterleavedComponents = %d;", resources.maxTransformFeedbackInterleavedComponents); + s.append(builtInConstant); + } +#endif + } + + // compute + if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupCount = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupCountX, + resources.maxComputeWorkGroupCountY, + resources.maxComputeWorkGroupCountZ); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxComputeWorkGroupSize = ivec3(%d,%d,%d);", resources.maxComputeWorkGroupSizeX, + resources.maxComputeWorkGroupSizeY, + resources.maxComputeWorkGroupSizeZ); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeUniformComponents = %d;", resources.maxComputeUniformComponents); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeTextureImageUnits = %d;", resources.maxComputeTextureImageUnits); + s.append(builtInConstant); + + s.append("\n"); + } + +#ifndef GLSLANG_WEB + // images (some in compute below) + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 130)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxImageUnits = %d;", resources.maxImageUnits); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedShaderOutputResources = %d;", resources.maxCombinedShaderOutputResources); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexImageUniforms = %d;", resources.maxVertexImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentImageUniforms = %d;", resources.maxFragmentImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedImageUniforms = %d;", resources.maxCombinedImageUniforms); + s.append(builtInConstant); + } + + // compute + if ((profile == EEsProfile && version >= 310) || (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeImageUniforms = %d;", resources.maxComputeImageUniforms); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounters = %d;", resources.maxComputeAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxComputeAtomicCounterBuffers = %d;", resources.maxComputeAtomicCounterBuffers); + s.append(builtInConstant); + + s.append("\n"); + } + +#ifndef GLSLANG_ANGLE + // atomic counters (some in compute below) + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 420)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounters = %d;", resources. maxVertexAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounters = %d;", resources. maxFragmentAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounters = %d;", resources. maxCombinedAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBindings = %d;", resources. maxAtomicCounterBindings); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxVertexAtomicCounterBuffers = %d;", resources. maxVertexAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxFragmentAtomicCounterBuffers = %d;", resources. maxFragmentAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedAtomicCounterBuffers = %d;", resources. maxCombinedAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxAtomicCounterBufferSize = %d;", resources. maxAtomicCounterBufferSize); + s.append(builtInConstant); + } + if (profile != EEsProfile && version >= 420) { + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounters = %d;", resources. maxTessControlAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounters = %d;", resources. maxTessEvaluationAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounters = %d;", resources. maxGeometryAtomicCounters); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessControlAtomicCounterBuffers = %d;", resources. maxTessControlAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxTessEvaluationAtomicCounterBuffers = %d;", resources. maxTessEvaluationAtomicCounterBuffers); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxGeometryAtomicCounterBuffers = %d;", resources. maxGeometryAtomicCounterBuffers); + s.append(builtInConstant); + + s.append("\n"); + } +#endif // !GLSLANG_ANGLE + + // GL_ARB_cull_distance + if (profile != EEsProfile && version >= 450) { + snprintf(builtInConstant, maxSize, "const int gl_MaxCullDistances = %d;", resources.maxCullDistances); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const int gl_MaxCombinedClipAndCullDistances = %d;", resources.maxCombinedClipAndCullDistances); + s.append(builtInConstant); + } + + // GL_ARB_ES3_1_compatibility + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 310)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxSamples = %d;", resources.maxSamples); + s.append(builtInConstant); + } + +#ifndef GLSLANG_ANGLE + // SPV_NV_mesh_shader + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputVerticesNV = %d;", resources.maxMeshOutputVerticesNV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshOutputPrimitivesNV = %d;", resources.maxMeshOutputPrimitivesNV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxMeshWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxMeshWorkGroupSizeX_NV, + resources.maxMeshWorkGroupSizeY_NV, + resources.maxMeshWorkGroupSizeZ_NV); + s.append(builtInConstant); + snprintf(builtInConstant, maxSize, "const ivec3 gl_MaxTaskWorkGroupSizeNV = ivec3(%d,%d,%d);", resources.maxTaskWorkGroupSizeX_NV, + resources.maxTaskWorkGroupSizeY_NV, + resources.maxTaskWorkGroupSizeZ_NV); + s.append(builtInConstant); + + snprintf(builtInConstant, maxSize, "const int gl_MaxMeshViewCountNV = %d;", resources.maxMeshViewCountNV); + s.append(builtInConstant); + + s.append("\n"); + } +#endif +#endif + + s.append("\n"); +} + +// +// To support special built-ins that have a special qualifier that cannot be declared textually +// in a shader, like gl_Position. +// +// This lets the type of the built-in be declared textually, and then have just its qualifier be +// updated afterward. +// +// Safe to call even if name is not present. +// +// Only use this for built-in variables that have a special qualifier in TStorageQualifier. +// New built-in variables should use a generic (textually declarable) qualifier in +// TStoraregQualifier and only call BuiltInVariable(). +// +static void SpecialQualifier(const char* name, TStorageQualifier qualifier, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol == nullptr) + return; + + TQualifier& symQualifier = symbol->getWritableType().getQualifier(); + symQualifier.storage = qualifier; + symQualifier.builtIn = builtIn; +} + +// +// To tag built-in variables with their TBuiltInVariable enum. Use this when the +// normal declaration text already gets the qualifier right, and all that's needed +// is setting the builtIn field. This should be the normal way for all new +// built-in variables. +// +// If SpecialQualifier() was called, this does not need to be called. +// +// Safe to call even if name is not present. +// +static void BuiltInVariable(const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol == nullptr) + return; + + TQualifier& symQualifier = symbol->getWritableType().getQualifier(); + symQualifier.builtIn = builtIn; +} + +// +// For built-in variables inside a named block. +// SpecialQualifier() won't ever go inside a block; their member's qualifier come +// from the qualification of the block. +// +// See comments above for other detail. +// +static void BuiltInVariable(const char* blockName, const char* name, TBuiltInVariable builtIn, TSymbolTable& symbolTable) +{ + TSymbol* symbol = symbolTable.find(blockName); + if (symbol == nullptr) + return; + + TTypeList& structure = *symbol->getWritableType().getWritableStruct(); + for (int i = 0; i < (int)structure.size(); ++i) { + if (structure[i].type->getFieldName().compare(name) == 0) { + structure[i].type->getQualifier().builtIn = builtIn; + return; + } + } +} + +// +// Finish adding/processing context-independent built-in symbols. +// 1) Programmatically add symbols that could not be added by simple text strings above. +// 2) Map built-in functions to operators, for those that will turn into an operation node +// instead of remaining a function call. +// 3) Tag extension-related symbols added to their base version with their extensions, so +// that if an early version has the extension turned off, there is an error reported on use. +// +void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) +{ +#ifdef GLSLANG_WEB + version = 310; + profile = EEsProfile; +#elif defined(GLSLANG_ANGLE) + version = 450; + profile = ECoreProfile; +#endif + + // + // Tag built-in variables and functions with additional qualifier and extension information + // that cannot be declared with the text strings. + // + + // N.B.: a symbol should only be tagged once, and this function is called multiple times, once + // per stage that's used for this profile. So + // - generally, stick common ones in the fragment stage to ensure they are tagged exactly once + // - for ES, which has different precisions for different stages, the coarsest-grained tagging + // for a built-in used in many stages needs to be once for the fragment stage and once for + // the vertex stage + + switch(language) { + case EShLangVertex: + if (spvVersion.vulkan > 0) { + BuiltInVariable("gl_VertexIndex", EbvVertexIndex, symbolTable); + BuiltInVariable("gl_InstanceIndex", EbvInstanceIndex, symbolTable); + } + +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) { + SpecialQualifier("gl_VertexID", EvqVertexId, EbvVertexId, symbolTable); + SpecialQualifier("gl_InstanceID", EvqInstanceId, EbvInstanceId, symbolTable); + } + + if (profile != EEsProfile) { + if (version >= 440) { + symbolTable.setVariableExtensions("gl_BaseVertexARB", 1, &E_GL_ARB_shader_draw_parameters); + symbolTable.setVariableExtensions("gl_BaseInstanceARB", 1, &E_GL_ARB_shader_draw_parameters); + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_BaseVertexARB", EbvBaseVertex, symbolTable); + BuiltInVariable("gl_BaseInstanceARB", EbvBaseInstance, symbolTable); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + } + if (version >= 460) { + BuiltInVariable("gl_BaseVertex", EbvBaseVertex, symbolTable); + BuiltInVariable("gl_BaseInstance", EbvBaseInstance, symbolTable); + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + symbolTable.setFunctionExtensions("ballotARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setFunctionExtensions("readInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setFunctionExtensions("readFirstInvocationARB", 1, &E_GL_ARB_shader_ballot); + + if (version >= 430) { + symbolTable.setFunctionExtensions("anyInvocationARB", 1, &E_GL_ARB_shader_group_vote); + symbolTable.setFunctionExtensions("allInvocationsARB", 1, &E_GL_ARB_shader_group_vote); + symbolTable.setFunctionExtensions("allInvocationsEqualARB", 1, &E_GL_ARB_shader_group_vote); + } + } + + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("minInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("swizzleInvocationsAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("swizzleInvocationsWithPatternAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("writeInvocationAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("mbcntAMD", 1, &E_GL_AMD_shader_ballot); + + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + } + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("min3", 1, &E_GL_AMD_shader_trinary_minmax); + symbolTable.setFunctionExtensions("max3", 1, &E_GL_AMD_shader_trinary_minmax); + symbolTable.setFunctionExtensions("mid3", 1, &E_GL_AMD_shader_trinary_minmax); + } + + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SIMDGroupSizeAMD", 1, &E_GL_AMD_gcn_shader); + SpecialQualifier("gl_SIMDGroupSizeAMD", EvqVaryingIn, EbvSubGroupSize, symbolTable); + + symbolTable.setFunctionExtensions("cubeFaceIndexAMD", 1, &E_GL_AMD_gcn_shader); + symbolTable.setFunctionExtensions("cubeFaceCoordAMD", 1, &E_GL_AMD_gcn_shader); + symbolTable.setFunctionExtensions("timeAMD", 1, &E_GL_AMD_gcn_shader); + } + + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("fragmentMaskFetchAMD", 1, &E_GL_AMD_shader_fragment_mask); + symbolTable.setFunctionExtensions("fragmentFetchAMD", 1, &E_GL_AMD_shader_fragment_mask); + } + + symbolTable.setFunctionExtensions("countLeadingZeros", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("countTrailingZeros", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("absoluteDifference", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("addSaturate", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("subtractSaturate", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("average", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("averageRounded", 1, &E_GL_INTEL_shader_integer_functions2); + symbolTable.setFunctionExtensions("multiply32x16", 1, &E_GL_INTEL_shader_integer_functions2); + + symbolTable.setFunctionExtensions("textureFootprintNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintClampNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintLodNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintGradNV", 1, &E_GL_NV_shader_texture_footprint); + symbolTable.setFunctionExtensions("textureFootprintGradClampNV", 1, &E_GL_NV_shader_texture_footprint); + // Compatibility variables, vertex only + if (spvVersion.spv == 0) { + BuiltInVariable("gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable); + BuiltInVariable("gl_Normal", EbvNormal, symbolTable); + BuiltInVariable("gl_Vertex", EbvVertex, symbolTable); + BuiltInVariable("gl_MultiTexCoord0", EbvMultiTexCoord0, symbolTable); + BuiltInVariable("gl_MultiTexCoord1", EbvMultiTexCoord1, symbolTable); + BuiltInVariable("gl_MultiTexCoord2", EbvMultiTexCoord2, symbolTable); + BuiltInVariable("gl_MultiTexCoord3", EbvMultiTexCoord3, symbolTable); + BuiltInVariable("gl_MultiTexCoord4", EbvMultiTexCoord4, symbolTable); + BuiltInVariable("gl_MultiTexCoord5", EbvMultiTexCoord5, symbolTable); + BuiltInVariable("gl_MultiTexCoord6", EbvMultiTexCoord6, symbolTable); + BuiltInVariable("gl_MultiTexCoord7", EbvMultiTexCoord7, symbolTable); + BuiltInVariable("gl_FogCoord", EbvFogFragCoord, symbolTable); + } + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod); + if (version == 310) + symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + if (version == 310) + symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + + if (profile == EEsProfile && version < 320) { + symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic); + } + + if (version >= 300 /* both ES and non-ES */) { + symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs); + BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable); + } + + if (profile == EEsProfile) { + symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers); + symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers); + } + // Fall through + + case EShLangTessControl: + if (profile == EEsProfile && version >= 310) { + BuiltInVariable("gl_BoundingBoxEXT", EbvBoundingBox, symbolTable); + symbolTable.setVariableExtensions("gl_BoundingBoxEXT", 1, + &E_GL_EXT_primitive_bounding_box); + BuiltInVariable("gl_BoundingBoxOES", EbvBoundingBox, symbolTable); + symbolTable.setVariableExtensions("gl_BoundingBoxOES", 1, + &E_GL_OES_primitive_bounding_box); + + if (version >= 320) { + BuiltInVariable("gl_BoundingBox", EbvBoundingBox, symbolTable); + } + } + // Fall through + + case EShLangTessEvaluation: + case EShLangGeometry: +#endif // !GLSLANG_WEB + SpecialQualifier("gl_Position", EvqPosition, EbvPosition, symbolTable); + SpecialQualifier("gl_PointSize", EvqPointSize, EbvPointSize, symbolTable); + + BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable); + + BuiltInVariable("gl_out", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_out", "gl_PointSize", EbvPointSize, symbolTable); + +#ifndef GLSLANG_WEB + SpecialQualifier("gl_ClipVertex", EvqClipVertex, EbvClipVertex, symbolTable); + + BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable); + + BuiltInVariable("gl_out", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_out", "gl_CullDistance", EbvCullDistance, symbolTable); + + BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable); + BuiltInVariable("gl_PrimitiveIDIn", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_InvocationID", EbvInvocationId, symbolTable); + BuiltInVariable("gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable); + + if (language != EShLangGeometry) { + symbolTable.setVariableExtensions("gl_Layer", Num_viewportEXTs, viewportEXTs); + symbolTable.setVariableExtensions("gl_ViewportIndex", Num_viewportEXTs, viewportEXTs); + } + symbolTable.setVariableExtensions("gl_ViewportMask", 1, &E_GL_NV_viewport_array2); + symbolTable.setVariableExtensions("gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + symbolTable.setVariableExtensions("gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_ViewportMask", EbvViewportMaskNV, symbolTable); + BuiltInVariable("gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable); + BuiltInVariable("gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + if (language == EShLangVertex || language == EShLangGeometry) { + symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + } + symbolTable.setVariableExtensions("gl_out", "gl_ViewportMask", 1, &E_GL_NV_viewport_array2); + symbolTable.setVariableExtensions("gl_out", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_out", "gl_SecondaryViewportMaskNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_out", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + symbolTable.setVariableExtensions("gl_out", "gl_ViewportMaskPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_out", "gl_ViewportMask", EbvViewportMaskNV, symbolTable); + BuiltInVariable("gl_out", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_out", "gl_SecondaryViewportMaskNV", EbvSecondaryViewportMaskNV, symbolTable); + BuiltInVariable("gl_out", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_out", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + BuiltInVariable("gl_PatchVerticesIn", EbvPatchVertices, symbolTable); + BuiltInVariable("gl_TessLevelOuter", EbvTessLevelOuter, symbolTable); + BuiltInVariable("gl_TessLevelInner", EbvTessLevelInner, symbolTable); + BuiltInVariable("gl_TessCoord", EbvTessCoord, symbolTable); + + if (version < 410) + symbolTable.setVariableExtensions("gl_ViewportIndex", 1, &E_GL_ARB_viewport_array); + + // Compatibility variables + + BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + BuiltInVariable("gl_out", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_out", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_out", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_out", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_out", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_out", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_out", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + BuiltInVariable("gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + // gl_PointSize, when it needs to be tied to an extension, is always a member of a block. + // (Sometimes with an instance name, sometimes anonymous). + if (profile == EEsProfile) { + if (language == EShLangGeometry) { + symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size); + symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_geometry_point_size, AEP_geometry_point_size); + } else if (language == EShLangTessEvaluation || language == EShLangTessControl) { + // gl_in tessellation settings of gl_PointSize are in the context-dependent paths + symbolTable.setVariableExtensions("gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + symbolTable.setVariableExtensions("gl_out", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + } + } + + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + } + + if (profile != EEsProfile) { + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } +#endif // !GLSLANG_WEB + break; + + case EShLangFragment: + SpecialQualifier("gl_FrontFacing", EvqFace, EbvFace, symbolTable); + SpecialQualifier("gl_FragCoord", EvqFragCoord, EbvFragCoord, symbolTable); + SpecialQualifier("gl_PointCoord", EvqPointCoord, EbvPointCoord, symbolTable); + if (spvVersion.spv == 0) + SpecialQualifier("gl_FragColor", EvqFragColor, EbvFragColor, symbolTable); + else { + TSymbol* symbol = symbolTable.find("gl_FragColor"); + if (symbol) { + symbol->getWritableType().getQualifier().storage = EvqVaryingOut; + symbol->getWritableType().getQualifier().layoutLocation = 0; + } + } + SpecialQualifier("gl_FragDepth", EvqFragDepth, EbvFragDepth, symbolTable); +#ifndef GLSLANG_WEB + SpecialQualifier("gl_FragDepthEXT", EvqFragDepth, EbvFragDepth, symbolTable); + SpecialQualifier("gl_HelperInvocation", EvqVaryingIn, EbvHelperInvocation, symbolTable); + + BuiltInVariable("gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_CullDistance", EbvCullDistance, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + + if (profile != EEsProfile && version >= 140) { + symbolTable.setVariableExtensions("gl_FragStencilRefARB", 1, &E_GL_ARB_shader_stencil_export); + BuiltInVariable("gl_FragStencilRefARB", EbvFragStencilRef, symbolTable); + } + + if (profile != EEsProfile && version < 400) { + symbolTable.setFunctionExtensions("textureQueryLod", 1, &E_GL_ARB_texture_query_lod); + } + + if (profile != EEsProfile && version >= 460) { + symbolTable.setFunctionExtensions("rayQueryInitializeEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryTerminateEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGenerateIntersectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryConfirmIntersectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryProceedEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionTypeEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionTEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetRayFlagsEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetRayTMinEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceCustomIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceIdEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionGeometryIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionPrimitiveIndexEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionBarycentricsEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionFrontFaceEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionCandidateAABBOpaqueEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectRayDirectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectRayOriginEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionObjectToWorldEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetIntersectionWorldToObjectEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetWorldRayOriginEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setFunctionExtensions("rayQueryGetWorldRayDirectionEXT", 1, &E_GL_EXT_ray_query); + symbolTable.setVariableExtensions("gl_RayFlagsSkipAABBEXT", 1, &E_GL_EXT_ray_flags_primitive_culling); + symbolTable.setVariableExtensions("gl_RayFlagsSkipTrianglesEXT", 1, &E_GL_EXT_ray_flags_primitive_culling); + } + + if ((profile != EEsProfile && version >= 130) || + (profile == EEsProfile && version >= 310)) { + BuiltInVariable("gl_SampleID", EbvSampleId, symbolTable); + BuiltInVariable("gl_SamplePosition", EbvSamplePosition, symbolTable); + BuiltInVariable("gl_SampleMask", EbvSampleMask, symbolTable); + + if (profile != EEsProfile && version < 400) { + BuiltInVariable("gl_NumSamples", EbvSampleMask, symbolTable); + + symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_ARB_sample_shading); + symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_ARB_sample_shading); + } else { + BuiltInVariable("gl_SampleMaskIn", EbvSampleMask, symbolTable); + + if (profile == EEsProfile && version < 320) { + symbolTable.setVariableExtensions("gl_SampleID", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SamplePosition", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SampleMaskIn", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_SampleMask", 1, &E_GL_OES_sample_variables); + symbolTable.setVariableExtensions("gl_NumSamples", 1, &E_GL_OES_sample_variables); + } + } + } + + BuiltInVariable("gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_ViewportIndex", EbvViewportIndex, symbolTable); + + // Compatibility variables + + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_in", "gl_SecondaryColor", EbvSecondaryColor, symbolTable); + + BuiltInVariable("gl_FogFragCoord", EbvFogFragCoord, symbolTable); + BuiltInVariable("gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_Color", EbvColor, symbolTable); + BuiltInVariable("gl_SecondaryColor", EbvSecondaryColor, symbolTable); + + // built-in functions + + if (profile == EEsProfile) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture2DLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeLodEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradEXT", 1, &E_GL_EXT_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradEXT", 1, &E_GL_EXT_shader_texture_lod); + if (version < 320) + symbolTable.setFunctionExtensions("textureGatherOffsets", Num_AEP_gpu_shader5, AEP_gpu_shader5); + } + if (version == 100) { + symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_OES_standard_derivatives); + symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_OES_standard_derivatives); + symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_OES_standard_derivatives); + } + if (version == 310) { + symbolTable.setFunctionExtensions("fma", Num_AEP_gpu_shader5, AEP_gpu_shader5); + symbolTable.setFunctionExtensions("interpolateAtCentroid", 1, &E_GL_OES_shader_multisample_interpolation); + symbolTable.setFunctionExtensions("interpolateAtSample", 1, &E_GL_OES_shader_multisample_interpolation); + symbolTable.setFunctionExtensions("interpolateAtOffset", 1, &E_GL_OES_shader_multisample_interpolation); + } + } else if (version < 130) { + if (spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture1DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture1DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DProjLod", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DProjLod", 1, &E_GL_ARB_shader_texture_lod); + } + } + + // E_GL_ARB_shader_texture_lod functions usable only with the extension enabled + if (profile != EEsProfile && spvVersion.spv == 0) { + symbolTable.setFunctionExtensions("texture1DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture3DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("textureCubeGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow1DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("texture2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DRectGradARB", 1, &E_GL_ARB_shader_texture_lod); + symbolTable.setFunctionExtensions("shadow2DRectProjGradARB", 1, &E_GL_ARB_shader_texture_lod); + } + + // E_GL_ARB_shader_image_load_store + if (profile != EEsProfile && version < 420) + symbolTable.setFunctionExtensions("memoryBarrier", 1, &E_GL_ARB_shader_image_load_store); + // All the image access functions are protected by checks on the type of the first argument. + + // E_GL_ARB_shader_atomic_counters + if (profile != EEsProfile && version < 420) { + symbolTable.setFunctionExtensions("atomicCounterIncrement", 1, &E_GL_ARB_shader_atomic_counters); + symbolTable.setFunctionExtensions("atomicCounterDecrement", 1, &E_GL_ARB_shader_atomic_counters); + symbolTable.setFunctionExtensions("atomicCounter" , 1, &E_GL_ARB_shader_atomic_counters); + } + + // E_GL_ARB_derivative_control + if (profile != EEsProfile && version < 450) { + symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_ARB_derivative_control); + symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_ARB_derivative_control); + } + + // E_GL_ARB_sparse_texture2 + if (profile != EEsProfile) + { + symbolTable.setFunctionExtensions("sparseTextureARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureLodARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelFetchARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelFetchOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureLodOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGradARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGradOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherOffsetARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTextureGatherOffsetsARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseImageLoadARB", 1, &E_GL_ARB_sparse_texture2); + symbolTable.setFunctionExtensions("sparseTexelsResident", 1, &E_GL_ARB_sparse_texture2); + } + + // E_GL_ARB_sparse_texture_clamp + if (profile != EEsProfile) + { + symbolTable.setFunctionExtensions("sparseTextureClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("sparseTextureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureGradClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + symbolTable.setFunctionExtensions("textureGradOffsetClampARB", 1, &E_GL_ARB_sparse_texture_clamp); + } + + // E_GL_AMD_shader_explicit_vertex_parameter + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothCentroidAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordSmoothSampleAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + symbolTable.setVariableExtensions("gl_BaryCoordPullModelAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + + symbolTable.setFunctionExtensions("interpolateAtVertexAMD", 1, &E_GL_AMD_shader_explicit_vertex_parameter); + + BuiltInVariable("gl_BaryCoordNoPerspAMD", EbvBaryCoordNoPersp, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspCentroidAMD", EbvBaryCoordNoPerspCentroid, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspSampleAMD", EbvBaryCoordNoPerspSample, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothAMD", EbvBaryCoordSmooth, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothCentroidAMD", EbvBaryCoordSmoothCentroid, symbolTable); + BuiltInVariable("gl_BaryCoordSmoothSampleAMD", EbvBaryCoordSmoothSample, symbolTable); + BuiltInVariable("gl_BaryCoordPullModelAMD", EbvBaryCoordPullModel, symbolTable); + } + + // E_GL_AMD_texture_gather_bias_lod + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("textureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("textureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("textureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + symbolTable.setFunctionExtensions("sparseTextureGatherLodOffsetsAMD", 1, &E_GL_AMD_texture_gather_bias_lod); + } + + // E_GL_AMD_shader_image_load_store_lod + if (profile != EEsProfile) { + symbolTable.setFunctionExtensions("imageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + symbolTable.setFunctionExtensions("imageStoreLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + symbolTable.setFunctionExtensions("sparseImageLoadLodAMD", 1, &E_GL_AMD_shader_image_load_store_lod); + } + if (profile != EEsProfile && version >= 430) { + symbolTable.setVariableExtensions("gl_FragFullyCoveredNV", 1, &E_GL_NV_conservative_raster_underestimation); + BuiltInVariable("gl_FragFullyCoveredNV", EbvFragFullyCoveredNV, symbolTable); + } + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + symbolTable.setVariableExtensions("gl_FragmentSizeNV", 1, &E_GL_NV_shading_rate_image); + symbolTable.setVariableExtensions("gl_InvocationsPerPixelNV", 1, &E_GL_NV_shading_rate_image); + BuiltInVariable("gl_FragmentSizeNV", EbvFragmentSizeNV, symbolTable); + BuiltInVariable("gl_InvocationsPerPixelNV", EbvInvocationsPerPixelNV, symbolTable); + symbolTable.setVariableExtensions("gl_BaryCoordNV", 1, &E_GL_NV_fragment_shader_barycentric); + symbolTable.setVariableExtensions("gl_BaryCoordNoPerspNV", 1, &E_GL_NV_fragment_shader_barycentric); + BuiltInVariable("gl_BaryCoordNV", EbvBaryCoordNV, symbolTable); + BuiltInVariable("gl_BaryCoordNoPerspNV", EbvBaryCoordNoPerspNV, symbolTable); + } + + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_FragSizeEXT", 1, &E_GL_EXT_fragment_invocation_density); + symbolTable.setVariableExtensions("gl_FragInvocationCountEXT", 1, &E_GL_EXT_fragment_invocation_density); + BuiltInVariable("gl_FragSizeEXT", EbvFragSizeEXT, symbolTable); + BuiltInVariable("gl_FragInvocationCountEXT", EbvFragInvocationCountEXT, symbolTable); + } + + symbolTable.setVariableExtensions("gl_FragDepthEXT", 1, &E_GL_EXT_frag_depth); + + symbolTable.setFunctionExtensions("clockARB", 1, &E_GL_ARB_shader_clock); + symbolTable.setFunctionExtensions("clock2x32ARB", 1, &E_GL_ARB_shader_clock); + + symbolTable.setFunctionExtensions("clockRealtimeEXT", 1, &E_GL_EXT_shader_realtime_clock); + symbolTable.setFunctionExtensions("clockRealtime2x32EXT", 1, &E_GL_EXT_shader_realtime_clock); + + if (profile == EEsProfile && version < 320) { + symbolTable.setVariableExtensions("gl_PrimitiveID", Num_AEP_geometry_shader, AEP_geometry_shader); + symbolTable.setVariableExtensions("gl_Layer", Num_AEP_geometry_shader, AEP_geometry_shader); + } + + if (profile == EEsProfile && version < 320) { + symbolTable.setFunctionExtensions("imageAtomicAdd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMin", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicMax", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicAnd", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicOr", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicXor", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicExchange", 1, &E_GL_OES_shader_image_atomic); + symbolTable.setFunctionExtensions("imageAtomicCompSwap", 1, &E_GL_OES_shader_image_atomic); + } + + if (profile != EEsProfile && version < 330 ) { + symbolTable.setFunctionExtensions("floatBitsToInt", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("floatBitsToUint", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("intBitsToFloat", 1, &E_GL_ARB_shader_bit_encoding); + symbolTable.setFunctionExtensions("uintBitsToFloat", 1, &E_GL_ARB_shader_bit_encoding); + } + + if (profile != EEsProfile && version < 430 ) { + symbolTable.setFunctionExtensions("imageSize", 1, &E_GL_ARB_shader_image_size); + } + + // GL_ARB_shader_storage_buffer_object + if (profile != EEsProfile && version < 430 ) { + symbolTable.setFunctionExtensions("atomicAdd", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicMin", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicMax", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicAnd", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicOr", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicXor", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicExchange", 1, &E_GL_ARB_shader_storage_buffer_object); + symbolTable.setFunctionExtensions("atomicCompSwap", 1, &E_GL_ARB_shader_storage_buffer_object); + } + + // GL_ARB_shading_language_packing + if (profile != EEsProfile && version < 400 ) { + symbolTable.setFunctionExtensions("packUnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackUnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packSnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packUnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackSnorm4x8", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackUnorm4x8", 1, &E_GL_ARB_shading_language_packing); + } + if (profile != EEsProfile && version < 420 ) { + symbolTable.setFunctionExtensions("packSnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackSnorm2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("unpackHalf2x16", 1, &E_GL_ARB_shading_language_packing); + symbolTable.setFunctionExtensions("packHalf2x16", 1, &E_GL_ARB_shading_language_packing); + } + + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + if (version >= 300 /* both ES and non-ES */) { + symbolTable.setVariableExtensions("gl_ViewID_OVR", Num_OVR_multiview_EXTs, OVR_multiview_EXTs); + BuiltInVariable("gl_ViewID_OVR", EbvViewIndex, symbolTable); + } + + // GL_ARB_shader_ballot + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupBarrier", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrier", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrierBuffer", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupMemoryBarrierImage", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupElect", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setFunctionExtensions("subgroupAll", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupAny", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupAllEqual", 1, &E_GL_KHR_shader_subgroup_vote); + symbolTable.setFunctionExtensions("subgroupBroadcast", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBroadcastFirst", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallot", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupInverseBallot", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotBitExtract", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotInclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotExclusiveBitCount", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotFindLSB", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupBallotFindMSB", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setFunctionExtensions("subgroupShuffle", 1, &E_GL_KHR_shader_subgroup_shuffle); + symbolTable.setFunctionExtensions("subgroupShuffleXor", 1, &E_GL_KHR_shader_subgroup_shuffle); + symbolTable.setFunctionExtensions("subgroupShuffleUp", 1, &E_GL_KHR_shader_subgroup_shuffle_relative); + symbolTable.setFunctionExtensions("subgroupShuffleDown", 1, &E_GL_KHR_shader_subgroup_shuffle_relative); + symbolTable.setFunctionExtensions("subgroupAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupInclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveAdd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMul", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMin", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveMax", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveAnd", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveOr", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupExclusiveXor", 1, &E_GL_KHR_shader_subgroup_arithmetic); + symbolTable.setFunctionExtensions("subgroupClusteredAdd", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMul", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMin", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredMax", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredAnd", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredOr", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupClusteredXor", 1, &E_GL_KHR_shader_subgroup_clustered); + symbolTable.setFunctionExtensions("subgroupQuadBroadcast", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapHorizontal", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapVertical", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupQuadSwapDiagonal", 1, &E_GL_KHR_shader_subgroup_quad); + symbolTable.setFunctionExtensions("subgroupPartitionNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedInclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAddNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMulNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMinNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveMaxNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveAndNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveOrNV", 1, &E_GL_NV_shader_subgroup_partitioned); + symbolTable.setFunctionExtensions("subgroupPartitionedExclusiveXorNV", 1, &E_GL_NV_shader_subgroup_partitioned); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + + if (profile == EEsProfile) { + symbolTable.setFunctionExtensions("shadow2DEXT", 1, &E_GL_EXT_shadow_samplers); + symbolTable.setFunctionExtensions("shadow2DProjEXT", 1, &E_GL_EXT_shadow_samplers); + } + + if (spvVersion.vulkan > 0) { + symbolTable.setVariableExtensions("gl_ScopeDevice", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeWorkgroup", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeSubgroup", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_ScopeInvocation", 1, &E_GL_KHR_memory_scope_semantics); + + symbolTable.setVariableExtensions("gl_SemanticsRelaxed", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsAcquire", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsRelease", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsAcquireRelease", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsMakeAvailable", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsMakeVisible", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_SemanticsVolatile", 1, &E_GL_KHR_memory_scope_semantics); + + symbolTable.setVariableExtensions("gl_StorageSemanticsNone", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsBuffer", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsShared", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsImage", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setVariableExtensions("gl_StorageSemanticsOutput", 1, &E_GL_KHR_memory_scope_semantics); + } + + symbolTable.setFunctionExtensions("helperInvocationEXT", 1, &E_GL_EXT_demote_to_helper_invocation); +#endif // !GLSLANG_WEB + break; + + case EShLangCompute: + BuiltInVariable("gl_NumWorkGroups", EbvNumWorkGroups, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + BuiltInVariable("gl_ViewIndex", EbvViewIndex, symbolTable); + +#ifndef GLSLANG_WEB + if ((profile != EEsProfile && version >= 140) || + (profile == EEsProfile && version >= 310)) { + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + symbolTable.setVariableExtensions("gl_ViewIndex", 1, &E_GL_EXT_multiview); + } + + if (profile != EEsProfile && version < 430) { + symbolTable.setVariableExtensions("gl_NumWorkGroups", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_ARB_compute_shader); + + symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupCount", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeWorkGroupSize", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeUniformComponents", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeTextureImageUnits", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeImageUniforms", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounters", 1, &E_GL_ARB_compute_shader); + symbolTable.setVariableExtensions("gl_MaxComputeAtomicCounterBuffers", 1, &E_GL_ARB_compute_shader); + + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierAtomicCounter", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierBuffer", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierImage", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_ARB_compute_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_ARB_compute_shader); + } + + + symbolTable.setFunctionExtensions("controlBarrier", 1, &E_GL_KHR_memory_scope_semantics); + symbolTable.setFunctionExtensions("debugPrintfEXT", 1, &E_GL_EXT_debug_printf); + + // GL_ARB_shader_ballot + if (profile != EEsProfile) { + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + } + + { + const char *coopExt[2] = { E_GL_NV_cooperative_matrix, E_GL_NV_integer_cooperative_matrix }; + symbolTable.setFunctionExtensions("coopMatLoadNV", 2, coopExt); + symbolTable.setFunctionExtensions("coopMatStoreNV", 2, coopExt); + symbolTable.setFunctionExtensions("coopMatMulAddNV", 2, coopExt); + } + + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.setFunctionExtensions("dFdx", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdy", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidth", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdxFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdyFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidthFine", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdxCoarse", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("dFdyCoarse", 1, &E_GL_NV_compute_shader_derivatives); + symbolTable.setFunctionExtensions("fwidthCoarse", 1, &E_GL_NV_compute_shader_derivatives); + } +#endif // !GLSLANG_WEB + break; + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (profile != EEsProfile && version >= 460) { + const char *rtexts[] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing }; + symbolTable.setVariableExtensions("gl_LaunchIDNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchIDEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchSizeNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_LaunchSizeEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_PrimitiveID", 2, rtexts); + symbolTable.setVariableExtensions("gl_InstanceID", 2, rtexts); + symbolTable.setVariableExtensions("gl_InstanceCustomIndexNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_InstanceCustomIndexEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_GeometryIndexEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayOriginNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayOriginEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayDirectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldRayDirectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayOriginNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayOriginEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayDirectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectRayDirectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTminNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTminEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTmaxNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_RayTmaxEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_HitTNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_HitTEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_HitKindNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_HitKindEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorldNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorldEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_ObjectToWorld3x4EXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObjectNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObjectEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_WorldToObject3x4EXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setVariableExtensions("gl_IncomingRayFlagsNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setVariableExtensions("gl_IncomingRayFlagsEXT", 1, &E_GL_EXT_ray_tracing); + + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + + + symbolTable.setFunctionExtensions("traceNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("traceRayEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("reportIntersectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("reportIntersectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("ignoreIntersectionNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("ignoreIntersectionEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("terminateRayNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("terminateRayEXT", 1, &E_GL_EXT_ray_tracing); + symbolTable.setFunctionExtensions("executeCallableNV", 1, &E_GL_NV_ray_tracing); + symbolTable.setFunctionExtensions("executeCallableEXT", 1, &E_GL_EXT_ray_tracing); + + + BuiltInVariable("gl_LaunchIDNV", EbvLaunchId, symbolTable); + BuiltInVariable("gl_LaunchIDEXT", EbvLaunchId, symbolTable); + BuiltInVariable("gl_LaunchSizeNV", EbvLaunchSize, symbolTable); + BuiltInVariable("gl_LaunchSizeEXT", EbvLaunchSize, symbolTable); + BuiltInVariable("gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_InstanceID", EbvInstanceId, symbolTable); + BuiltInVariable("gl_InstanceCustomIndexNV", EbvInstanceCustomIndex,symbolTable); + BuiltInVariable("gl_InstanceCustomIndexEXT", EbvInstanceCustomIndex,symbolTable); + BuiltInVariable("gl_GeometryIndexEXT", EbvGeometryIndex, symbolTable); + BuiltInVariable("gl_WorldRayOriginNV", EbvWorldRayOrigin, symbolTable); + BuiltInVariable("gl_WorldRayOriginEXT", EbvWorldRayOrigin, symbolTable); + BuiltInVariable("gl_WorldRayDirectionNV", EbvWorldRayDirection, symbolTable); + BuiltInVariable("gl_WorldRayDirectionEXT", EbvWorldRayDirection, symbolTable); + BuiltInVariable("gl_ObjectRayOriginNV", EbvObjectRayOrigin, symbolTable); + BuiltInVariable("gl_ObjectRayOriginEXT", EbvObjectRayOrigin, symbolTable); + BuiltInVariable("gl_ObjectRayDirectionNV", EbvObjectRayDirection, symbolTable); + BuiltInVariable("gl_ObjectRayDirectionEXT", EbvObjectRayDirection, symbolTable); + BuiltInVariable("gl_RayTminNV", EbvRayTmin, symbolTable); + BuiltInVariable("gl_RayTminEXT", EbvRayTmin, symbolTable); + BuiltInVariable("gl_RayTmaxNV", EbvRayTmax, symbolTable); + BuiltInVariable("gl_RayTmaxEXT", EbvRayTmax, symbolTable); + BuiltInVariable("gl_HitTNV", EbvHitT, symbolTable); + BuiltInVariable("gl_HitTEXT", EbvHitT, symbolTable); + BuiltInVariable("gl_HitKindNV", EbvHitKind, symbolTable); + BuiltInVariable("gl_HitKindEXT", EbvHitKind, symbolTable); + BuiltInVariable("gl_ObjectToWorldNV", EbvObjectToWorld, symbolTable); + BuiltInVariable("gl_ObjectToWorldEXT", EbvObjectToWorld, symbolTable); + BuiltInVariable("gl_ObjectToWorld3x4EXT", EbvObjectToWorld3x4, symbolTable); + BuiltInVariable("gl_WorldToObjectNV", EbvWorldToObject, symbolTable); + BuiltInVariable("gl_WorldToObjectEXT", EbvWorldToObject, symbolTable); + BuiltInVariable("gl_WorldToObject3x4EXT", EbvWorldToObject3x4, symbolTable); + BuiltInVariable("gl_IncomingRayFlagsNV", EbvIncomingRayFlags, symbolTable); + BuiltInVariable("gl_IncomingRayFlagsEXT", EbvIncomingRayFlags, symbolTable); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + + // GL_KHR_shader_subgroup + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; + + case EShLangMeshNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + // per-vertex builtins + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_Position", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PointSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistance", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistance", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshVerticesNV", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_PointSize", EbvPointSize, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistance", EbvCullDistance, symbolTable); + + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_PositionPerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshVerticesNV", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_ClipDistancePerViewNV", EbvClipDistancePerViewNV, symbolTable); + BuiltInVariable("gl_MeshVerticesNV", "gl_CullDistancePerViewNV", EbvCullDistancePerViewNV, symbolTable); + + // per-primitive builtins + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_PrimitiveID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_Layer", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportIndex", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMask", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshPrimitivesNV", "gl_PrimitiveID", EbvPrimitiveId, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_Layer", EbvLayer, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportIndex", EbvViewportIndex, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMask", EbvViewportMaskNV, symbolTable); + + // per-view per-primitive builtins + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_MeshPrimitivesNV", "gl_LayerPerViewNV", EbvLayerPerViewNV, symbolTable); + BuiltInVariable("gl_MeshPrimitivesNV", "gl_ViewportMaskPerViewNV", EbvViewportMaskPerViewNV, symbolTable); + + // other builtins + symbolTable.setVariableExtensions("gl_PrimitiveCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_PrimitiveIndicesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_PrimitiveCountNV", EbvPrimitiveCountNV, symbolTable); + BuiltInVariable("gl_PrimitiveIndicesNV", EbvPrimitiveIndicesNV, symbolTable); + BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable); + BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + + // builtin constants + symbolTable.setVariableExtensions("gl_MaxMeshOutputVerticesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshOutputPrimitivesNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader); + + // builtin functions + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader); + } + + if (profile != EEsProfile && version >= 450) { + // GL_EXT_device_group + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_draw_parameters + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + if (version >= 460) { + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; + + case EShLangTaskNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.setVariableExtensions("gl_TaskCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupSize", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_WorkGroupID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_GlobalInvocationID", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_LocalInvocationIndex", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewCountNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MeshViewIndicesNV", 1, &E_GL_NV_mesh_shader); + + BuiltInVariable("gl_TaskCountNV", EbvTaskCountNV, symbolTable); + BuiltInVariable("gl_WorkGroupSize", EbvWorkGroupSize, symbolTable); + BuiltInVariable("gl_WorkGroupID", EbvWorkGroupId, symbolTable); + BuiltInVariable("gl_LocalInvocationID", EbvLocalInvocationId, symbolTable); + BuiltInVariable("gl_GlobalInvocationID", EbvGlobalInvocationId, symbolTable); + BuiltInVariable("gl_LocalInvocationIndex", EbvLocalInvocationIndex, symbolTable); + BuiltInVariable("gl_MeshViewCountNV", EbvMeshViewCountNV, symbolTable); + BuiltInVariable("gl_MeshViewIndicesNV", EbvMeshViewIndicesNV, symbolTable); + + symbolTable.setVariableExtensions("gl_MaxTaskWorkGroupSizeNV", 1, &E_GL_NV_mesh_shader); + symbolTable.setVariableExtensions("gl_MaxMeshViewCountNV", 1, &E_GL_NV_mesh_shader); + + symbolTable.setFunctionExtensions("barrier", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("memoryBarrierShared", 1, &E_GL_NV_mesh_shader); + symbolTable.setFunctionExtensions("groupMemoryBarrier", 1, &E_GL_NV_mesh_shader); + } + + if (profile != EEsProfile && version >= 450) { + // GL_EXT_device_group + symbolTable.setVariableExtensions("gl_DeviceIndex", 1, &E_GL_EXT_device_group); + BuiltInVariable("gl_DeviceIndex", EbvDeviceIndex, symbolTable); + + // GL_ARB_shader_draw_parameters + symbolTable.setVariableExtensions("gl_DrawIDARB", 1, &E_GL_ARB_shader_draw_parameters); + BuiltInVariable("gl_DrawIDARB", EbvDrawId, symbolTable); + if (version >= 460) { + BuiltInVariable("gl_DrawID", EbvDrawId, symbolTable); + } + + // GL_ARB_shader_ballot + symbolTable.setVariableExtensions("gl_SubGroupSizeARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupInvocationARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupEqMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupGtMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLeMaskARB", 1, &E_GL_ARB_shader_ballot); + symbolTable.setVariableExtensions("gl_SubGroupLtMaskARB", 1, &E_GL_ARB_shader_ballot); + + BuiltInVariable("gl_SubGroupInvocationARB", EbvSubGroupInvocation, symbolTable); + BuiltInVariable("gl_SubGroupEqMaskARB", EbvSubGroupEqMask, symbolTable); + BuiltInVariable("gl_SubGroupGeMaskARB", EbvSubGroupGeMask, symbolTable); + BuiltInVariable("gl_SubGroupGtMaskARB", EbvSubGroupGtMask, symbolTable); + BuiltInVariable("gl_SubGroupLeMaskARB", EbvSubGroupLeMask, symbolTable); + BuiltInVariable("gl_SubGroupLtMaskARB", EbvSubGroupLtMask, symbolTable); + + if (spvVersion.vulkan > 0) + // Treat "gl_SubGroupSizeARB" as shader input instead of uniform for Vulkan + SpecialQualifier("gl_SubGroupSizeARB", EvqVaryingIn, EbvSubGroupSize, symbolTable); + else + BuiltInVariable("gl_SubGroupSizeARB", EbvSubGroupSize, symbolTable); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.setVariableExtensions("gl_NumSubgroups", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupSize", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupInvocationID", 1, &E_GL_KHR_shader_subgroup_basic); + symbolTable.setVariableExtensions("gl_SubgroupEqMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupGtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLeMask", 1, &E_GL_KHR_shader_subgroup_ballot); + symbolTable.setVariableExtensions("gl_SubgroupLtMask", 1, &E_GL_KHR_shader_subgroup_ballot); + + BuiltInVariable("gl_NumSubgroups", EbvNumSubgroups, symbolTable); + BuiltInVariable("gl_SubgroupID", EbvSubgroupID, symbolTable); + BuiltInVariable("gl_SubgroupSize", EbvSubgroupSize2, symbolTable); + BuiltInVariable("gl_SubgroupInvocationID", EbvSubgroupInvocation2, symbolTable); + BuiltInVariable("gl_SubgroupEqMask", EbvSubgroupEqMask2, symbolTable); + BuiltInVariable("gl_SubgroupGeMask", EbvSubgroupGeMask2, symbolTable); + BuiltInVariable("gl_SubgroupGtMask", EbvSubgroupGtMask2, symbolTable); + BuiltInVariable("gl_SubgroupLeMask", EbvSubgroupLeMask2, symbolTable); + BuiltInVariable("gl_SubgroupLtMask", EbvSubgroupLtMask2, symbolTable); + + symbolTable.setFunctionExtensions("subgroupMemoryBarrierShared", 1, &E_GL_KHR_shader_subgroup_basic); + + // GL_NV_shader_sm_builtins + symbolTable.setVariableExtensions("gl_WarpsPerSMNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMCountNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_WarpIDNV", 1, &E_GL_NV_shader_sm_builtins); + symbolTable.setVariableExtensions("gl_SMIDNV", 1, &E_GL_NV_shader_sm_builtins); + BuiltInVariable("gl_WarpsPerSMNV", EbvWarpsPerSM, symbolTable); + BuiltInVariable("gl_SMCountNV", EbvSMCount, symbolTable); + BuiltInVariable("gl_WarpIDNV", EbvWarpID, symbolTable); + BuiltInVariable("gl_SMIDNV", EbvSMID, symbolTable); + } + break; +#endif + + default: + assert(false && "Language not supported"); + break; + } + + // + // Next, identify which built-ins have a mapping to an operator. + // If PureOperatorBuiltins is false, those that are not identified as such are + // expected to be resolved through a library of functions, versus as + // operations. + // + + relateTabledBuiltins(version, profile, spvVersion, language, symbolTable); + +#ifndef GLSLANG_WEB + symbolTable.relateToOperator("doubleBitsToInt64", EOpDoubleBitsToInt64); + symbolTable.relateToOperator("doubleBitsToUint64", EOpDoubleBitsToUint64); + symbolTable.relateToOperator("int64BitsToDouble", EOpInt64BitsToDouble); + symbolTable.relateToOperator("uint64BitsToDouble", EOpUint64BitsToDouble); + symbolTable.relateToOperator("halfBitsToInt16", EOpFloat16BitsToInt16); + symbolTable.relateToOperator("halfBitsToUint16", EOpFloat16BitsToUint16); + symbolTable.relateToOperator("float16BitsToInt16", EOpFloat16BitsToInt16); + symbolTable.relateToOperator("float16BitsToUint16", EOpFloat16BitsToUint16); + symbolTable.relateToOperator("int16BitsToFloat16", EOpInt16BitsToFloat16); + symbolTable.relateToOperator("uint16BitsToFloat16", EOpUint16BitsToFloat16); + + symbolTable.relateToOperator("int16BitsToHalf", EOpInt16BitsToFloat16); + symbolTable.relateToOperator("uint16BitsToHalf", EOpUint16BitsToFloat16); + + symbolTable.relateToOperator("packSnorm4x8", EOpPackSnorm4x8); + symbolTable.relateToOperator("unpackSnorm4x8", EOpUnpackSnorm4x8); + symbolTable.relateToOperator("packUnorm4x8", EOpPackUnorm4x8); + symbolTable.relateToOperator("unpackUnorm4x8", EOpUnpackUnorm4x8); + + symbolTable.relateToOperator("packDouble2x32", EOpPackDouble2x32); + symbolTable.relateToOperator("unpackDouble2x32", EOpUnpackDouble2x32); + + symbolTable.relateToOperator("packInt2x32", EOpPackInt2x32); + symbolTable.relateToOperator("unpackInt2x32", EOpUnpackInt2x32); + symbolTable.relateToOperator("packUint2x32", EOpPackUint2x32); + symbolTable.relateToOperator("unpackUint2x32", EOpUnpackUint2x32); + + symbolTable.relateToOperator("packInt2x16", EOpPackInt2x16); + symbolTable.relateToOperator("unpackInt2x16", EOpUnpackInt2x16); + symbolTable.relateToOperator("packUint2x16", EOpPackUint2x16); + symbolTable.relateToOperator("unpackUint2x16", EOpUnpackUint2x16); + + symbolTable.relateToOperator("packInt4x16", EOpPackInt4x16); + symbolTable.relateToOperator("unpackInt4x16", EOpUnpackInt4x16); + symbolTable.relateToOperator("packUint4x16", EOpPackUint4x16); + symbolTable.relateToOperator("unpackUint4x16", EOpUnpackUint4x16); + symbolTable.relateToOperator("packFloat2x16", EOpPackFloat2x16); + symbolTable.relateToOperator("unpackFloat2x16", EOpUnpackFloat2x16); + + symbolTable.relateToOperator("pack16", EOpPack16); + symbolTable.relateToOperator("pack32", EOpPack32); + symbolTable.relateToOperator("pack64", EOpPack64); + + symbolTable.relateToOperator("unpack32", EOpUnpack32); + symbolTable.relateToOperator("unpack16", EOpUnpack16); + symbolTable.relateToOperator("unpack8", EOpUnpack8); + + symbolTable.relateToOperator("controlBarrier", EOpBarrier); + symbolTable.relateToOperator("memoryBarrierAtomicCounter", EOpMemoryBarrierAtomicCounter); + symbolTable.relateToOperator("memoryBarrierImage", EOpMemoryBarrierImage); + + symbolTable.relateToOperator("atomicLoad", EOpAtomicLoad); + symbolTable.relateToOperator("atomicStore", EOpAtomicStore); + + symbolTable.relateToOperator("atomicCounterIncrement", EOpAtomicCounterIncrement); + symbolTable.relateToOperator("atomicCounterDecrement", EOpAtomicCounterDecrement); + symbolTable.relateToOperator("atomicCounter", EOpAtomicCounter); + + symbolTable.relateToOperator("clockARB", EOpReadClockSubgroupKHR); + symbolTable.relateToOperator("clock2x32ARB", EOpReadClockSubgroupKHR); + + symbolTable.relateToOperator("clockRealtimeEXT", EOpReadClockDeviceKHR); + symbolTable.relateToOperator("clockRealtime2x32EXT", EOpReadClockDeviceKHR); + + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("atomicCounterAdd", EOpAtomicCounterAdd); + symbolTable.relateToOperator("atomicCounterSubtract", EOpAtomicCounterSubtract); + symbolTable.relateToOperator("atomicCounterMin", EOpAtomicCounterMin); + symbolTable.relateToOperator("atomicCounterMax", EOpAtomicCounterMax); + symbolTable.relateToOperator("atomicCounterAnd", EOpAtomicCounterAnd); + symbolTable.relateToOperator("atomicCounterOr", EOpAtomicCounterOr); + symbolTable.relateToOperator("atomicCounterXor", EOpAtomicCounterXor); + symbolTable.relateToOperator("atomicCounterExchange", EOpAtomicCounterExchange); + symbolTable.relateToOperator("atomicCounterCompSwap", EOpAtomicCounterCompSwap); + } + + symbolTable.relateToOperator("fma", EOpFma); + symbolTable.relateToOperator("frexp", EOpFrexp); + symbolTable.relateToOperator("ldexp", EOpLdexp); + symbolTable.relateToOperator("uaddCarry", EOpAddCarry); + symbolTable.relateToOperator("usubBorrow", EOpSubBorrow); + symbolTable.relateToOperator("umulExtended", EOpUMulExtended); + symbolTable.relateToOperator("imulExtended", EOpIMulExtended); + symbolTable.relateToOperator("bitfieldExtract", EOpBitfieldExtract); + symbolTable.relateToOperator("bitfieldInsert", EOpBitfieldInsert); + symbolTable.relateToOperator("bitfieldReverse", EOpBitFieldReverse); + symbolTable.relateToOperator("bitCount", EOpBitCount); + symbolTable.relateToOperator("findLSB", EOpFindLSB); + symbolTable.relateToOperator("findMSB", EOpFindMSB); + + symbolTable.relateToOperator("helperInvocationEXT", EOpIsHelperInvocation); + + symbolTable.relateToOperator("countLeadingZeros", EOpCountLeadingZeros); + symbolTable.relateToOperator("countTrailingZeros", EOpCountTrailingZeros); + symbolTable.relateToOperator("absoluteDifference", EOpAbsDifference); + symbolTable.relateToOperator("addSaturate", EOpAddSaturate); + symbolTable.relateToOperator("subtractSaturate", EOpSubSaturate); + symbolTable.relateToOperator("average", EOpAverage); + symbolTable.relateToOperator("averageRounded", EOpAverageRounded); + symbolTable.relateToOperator("multiply32x16", EOpMul32x16); + symbolTable.relateToOperator("debugPrintfEXT", EOpDebugPrintf); + + + if (PureOperatorBuiltins) { + symbolTable.relateToOperator("imageSize", EOpImageQuerySize); + symbolTable.relateToOperator("imageSamples", EOpImageQuerySamples); + symbolTable.relateToOperator("imageLoad", EOpImageLoad); + symbolTable.relateToOperator("imageStore", EOpImageStore); + symbolTable.relateToOperator("imageAtomicAdd", EOpImageAtomicAdd); + symbolTable.relateToOperator("imageAtomicMin", EOpImageAtomicMin); + symbolTable.relateToOperator("imageAtomicMax", EOpImageAtomicMax); + symbolTable.relateToOperator("imageAtomicAnd", EOpImageAtomicAnd); + symbolTable.relateToOperator("imageAtomicOr", EOpImageAtomicOr); + symbolTable.relateToOperator("imageAtomicXor", EOpImageAtomicXor); + symbolTable.relateToOperator("imageAtomicExchange", EOpImageAtomicExchange); + symbolTable.relateToOperator("imageAtomicCompSwap", EOpImageAtomicCompSwap); + symbolTable.relateToOperator("imageAtomicLoad", EOpImageAtomicLoad); + symbolTable.relateToOperator("imageAtomicStore", EOpImageAtomicStore); + + symbolTable.relateToOperator("subpassLoad", EOpSubpassLoad); + symbolTable.relateToOperator("subpassLoadMS", EOpSubpassLoadMS); + + symbolTable.relateToOperator("textureGather", EOpTextureGather); + symbolTable.relateToOperator("textureGatherOffset", EOpTextureGatherOffset); + symbolTable.relateToOperator("textureGatherOffsets", EOpTextureGatherOffsets); + + symbolTable.relateToOperator("noise1", EOpNoise); + symbolTable.relateToOperator("noise2", EOpNoise); + symbolTable.relateToOperator("noise3", EOpNoise); + symbolTable.relateToOperator("noise4", EOpNoise); + + symbolTable.relateToOperator("textureFootprintNV", EOpImageSampleFootprintNV); + symbolTable.relateToOperator("textureFootprintClampNV", EOpImageSampleFootprintClampNV); + symbolTable.relateToOperator("textureFootprintLodNV", EOpImageSampleFootprintLodNV); + symbolTable.relateToOperator("textureFootprintGradNV", EOpImageSampleFootprintGradNV); + symbolTable.relateToOperator("textureFootprintGradClampNV", EOpImageSampleFootprintGradClampNV); + + if (spvVersion.spv == 0 && IncludeLegacy(version, profile, spvVersion)) + symbolTable.relateToOperator("ftransform", EOpFtransform); + + if (spvVersion.spv == 0 && (IncludeLegacy(version, profile, spvVersion) || + (profile == EEsProfile && version == 100))) { + + symbolTable.relateToOperator("texture1D", EOpTexture); + symbolTable.relateToOperator("texture1DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture1DProj", EOpTextureProj); + symbolTable.relateToOperator("texture1DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture1DLod", EOpTextureLod); + symbolTable.relateToOperator("texture1DProjLod", EOpTextureProjLod); + + symbolTable.relateToOperator("texture2DRect", EOpTexture); + symbolTable.relateToOperator("texture2DRectProj", EOpTextureProj); + symbolTable.relateToOperator("texture2DRectGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture2DRectProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow2DRect", EOpTexture); + symbolTable.relateToOperator("shadow2DRectProj", EOpTextureProj); + symbolTable.relateToOperator("shadow2DRectGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow2DRectProjGradARB", EOpTextureProjGrad); + + symbolTable.relateToOperator("texture2D", EOpTexture); + symbolTable.relateToOperator("texture2DProj", EOpTextureProj); + symbolTable.relateToOperator("texture2DGradEXT", EOpTextureGrad); + symbolTable.relateToOperator("texture2DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture2DProjGradEXT", EOpTextureProjGrad); + symbolTable.relateToOperator("texture2DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture2DLod", EOpTextureLod); + symbolTable.relateToOperator("texture2DLodEXT", EOpTextureLod); + symbolTable.relateToOperator("texture2DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("texture2DProjLodEXT", EOpTextureProjLod); + + symbolTable.relateToOperator("texture3D", EOpTexture); + symbolTable.relateToOperator("texture3DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("texture3DProj", EOpTextureProj); + symbolTable.relateToOperator("texture3DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("texture3DLod", EOpTextureLod); + symbolTable.relateToOperator("texture3DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("textureCube", EOpTexture); + symbolTable.relateToOperator("textureCubeGradEXT", EOpTextureGrad); + symbolTable.relateToOperator("textureCubeGradARB", EOpTextureGrad); + symbolTable.relateToOperator("textureCubeLod", EOpTextureLod); + symbolTable.relateToOperator("textureCubeLodEXT", EOpTextureLod); + symbolTable.relateToOperator("shadow1D", EOpTexture); + symbolTable.relateToOperator("shadow1DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow2D", EOpTexture); + symbolTable.relateToOperator("shadow2DGradARB", EOpTextureGrad); + symbolTable.relateToOperator("shadow1DProj", EOpTextureProj); + symbolTable.relateToOperator("shadow2DProj", EOpTextureProj); + symbolTable.relateToOperator("shadow1DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow2DProjGradARB", EOpTextureProjGrad); + symbolTable.relateToOperator("shadow1DLod", EOpTextureLod); + symbolTable.relateToOperator("shadow2DLod", EOpTextureLod); + symbolTable.relateToOperator("shadow1DProjLod", EOpTextureProjLod); + symbolTable.relateToOperator("shadow2DProjLod", EOpTextureProjLod); + } + + if (profile != EEsProfile) { + symbolTable.relateToOperator("sparseTextureARB", EOpSparseTexture); + symbolTable.relateToOperator("sparseTextureLodARB", EOpSparseTextureLod); + symbolTable.relateToOperator("sparseTextureOffsetARB", EOpSparseTextureOffset); + symbolTable.relateToOperator("sparseTexelFetchARB", EOpSparseTextureFetch); + symbolTable.relateToOperator("sparseTexelFetchOffsetARB", EOpSparseTextureFetchOffset); + symbolTable.relateToOperator("sparseTextureLodOffsetARB", EOpSparseTextureLodOffset); + symbolTable.relateToOperator("sparseTextureGradARB", EOpSparseTextureGrad); + symbolTable.relateToOperator("sparseTextureGradOffsetARB", EOpSparseTextureGradOffset); + symbolTable.relateToOperator("sparseTextureGatherARB", EOpSparseTextureGather); + symbolTable.relateToOperator("sparseTextureGatherOffsetARB", EOpSparseTextureGatherOffset); + symbolTable.relateToOperator("sparseTextureGatherOffsetsARB", EOpSparseTextureGatherOffsets); + symbolTable.relateToOperator("sparseImageLoadARB", EOpSparseImageLoad); + symbolTable.relateToOperator("sparseTexelsResidentARB", EOpSparseTexelsResident); + + symbolTable.relateToOperator("sparseTextureClampARB", EOpSparseTextureClamp); + symbolTable.relateToOperator("sparseTextureOffsetClampARB", EOpSparseTextureOffsetClamp); + symbolTable.relateToOperator("sparseTextureGradClampARB", EOpSparseTextureGradClamp); + symbolTable.relateToOperator("sparseTextureGradOffsetClampARB", EOpSparseTextureGradOffsetClamp); + symbolTable.relateToOperator("textureClampARB", EOpTextureClamp); + symbolTable.relateToOperator("textureOffsetClampARB", EOpTextureOffsetClamp); + symbolTable.relateToOperator("textureGradClampARB", EOpTextureGradClamp); + symbolTable.relateToOperator("textureGradOffsetClampARB", EOpTextureGradOffsetClamp); + + symbolTable.relateToOperator("ballotARB", EOpBallot); + symbolTable.relateToOperator("readInvocationARB", EOpReadInvocation); + symbolTable.relateToOperator("readFirstInvocationARB", EOpReadFirstInvocation); + + if (version >= 430) { + symbolTable.relateToOperator("anyInvocationARB", EOpAnyInvocation); + symbolTable.relateToOperator("allInvocationsARB", EOpAllInvocations); + symbolTable.relateToOperator("allInvocationsEqualARB", EOpAllInvocationsEqual); + } + if (version >= 460) { + symbolTable.relateToOperator("anyInvocation", EOpAnyInvocation); + symbolTable.relateToOperator("allInvocations", EOpAllInvocations); + symbolTable.relateToOperator("allInvocationsEqual", EOpAllInvocationsEqual); + } + symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations); + symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations); + symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations); + symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform); + symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform); + symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform); + symbolTable.relateToOperator("minInvocationsInclusiveScanAMD", EOpMinInvocationsInclusiveScan); + symbolTable.relateToOperator("maxInvocationsInclusiveScanAMD", EOpMaxInvocationsInclusiveScan); + symbolTable.relateToOperator("addInvocationsInclusiveScanAMD", EOpAddInvocationsInclusiveScan); + symbolTable.relateToOperator("minInvocationsInclusiveScanNonUniformAMD", EOpMinInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsInclusiveScanNonUniformAMD", EOpMaxInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsInclusiveScanNonUniformAMD", EOpAddInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("minInvocationsExclusiveScanAMD", EOpMinInvocationsExclusiveScan); + symbolTable.relateToOperator("maxInvocationsExclusiveScanAMD", EOpMaxInvocationsExclusiveScan); + symbolTable.relateToOperator("addInvocationsExclusiveScanAMD", EOpAddInvocationsExclusiveScan); + symbolTable.relateToOperator("minInvocationsExclusiveScanNonUniformAMD", EOpMinInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsExclusiveScanNonUniformAMD", EOpMaxInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsExclusiveScanNonUniformAMD", EOpAddInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations); + symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked); + symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation); + symbolTable.relateToOperator("mbcntAMD", EOpMbcnt); + + symbolTable.relateToOperator("min3", EOpMin3); + symbolTable.relateToOperator("max3", EOpMax3); + symbolTable.relateToOperator("mid3", EOpMid3); + + symbolTable.relateToOperator("cubeFaceIndexAMD", EOpCubeFaceIndex); + symbolTable.relateToOperator("cubeFaceCoordAMD", EOpCubeFaceCoord); + symbolTable.relateToOperator("timeAMD", EOpTime); + + symbolTable.relateToOperator("textureGatherLodAMD", EOpTextureGatherLod); + symbolTable.relateToOperator("textureGatherLodOffsetAMD", EOpTextureGatherLodOffset); + symbolTable.relateToOperator("textureGatherLodOffsetsAMD", EOpTextureGatherLodOffsets); + symbolTable.relateToOperator("sparseTextureGatherLodAMD", EOpSparseTextureGatherLod); + symbolTable.relateToOperator("sparseTextureGatherLodOffsetAMD", EOpSparseTextureGatherLodOffset); + symbolTable.relateToOperator("sparseTextureGatherLodOffsetsAMD", EOpSparseTextureGatherLodOffsets); + + symbolTable.relateToOperator("imageLoadLodAMD", EOpImageLoadLod); + symbolTable.relateToOperator("imageStoreLodAMD", EOpImageStoreLod); + symbolTable.relateToOperator("sparseImageLoadLodAMD", EOpSparseImageLoadLod); + + symbolTable.relateToOperator("fragmentMaskFetchAMD", EOpFragmentMaskFetch); + symbolTable.relateToOperator("fragmentFetchAMD", EOpFragmentFetch); + } + + // GL_KHR_shader_subgroup + if ((profile == EEsProfile && version >= 310) || + (profile != EEsProfile && version >= 140)) { + symbolTable.relateToOperator("subgroupBarrier", EOpSubgroupBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrier", EOpSubgroupMemoryBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrierBuffer", EOpSubgroupMemoryBarrierBuffer); + symbolTable.relateToOperator("subgroupMemoryBarrierImage", EOpSubgroupMemoryBarrierImage); + symbolTable.relateToOperator("subgroupElect", EOpSubgroupElect); + symbolTable.relateToOperator("subgroupAll", EOpSubgroupAll); + symbolTable.relateToOperator("subgroupAny", EOpSubgroupAny); + symbolTable.relateToOperator("subgroupAllEqual", EOpSubgroupAllEqual); + symbolTable.relateToOperator("subgroupBroadcast", EOpSubgroupBroadcast); + symbolTable.relateToOperator("subgroupBroadcastFirst", EOpSubgroupBroadcastFirst); + symbolTable.relateToOperator("subgroupBallot", EOpSubgroupBallot); + symbolTable.relateToOperator("subgroupInverseBallot", EOpSubgroupInverseBallot); + symbolTable.relateToOperator("subgroupBallotBitExtract", EOpSubgroupBallotBitExtract); + symbolTable.relateToOperator("subgroupBallotBitCount", EOpSubgroupBallotBitCount); + symbolTable.relateToOperator("subgroupBallotInclusiveBitCount", EOpSubgroupBallotInclusiveBitCount); + symbolTable.relateToOperator("subgroupBallotExclusiveBitCount", EOpSubgroupBallotExclusiveBitCount); + symbolTable.relateToOperator("subgroupBallotFindLSB", EOpSubgroupBallotFindLSB); + symbolTable.relateToOperator("subgroupBallotFindMSB", EOpSubgroupBallotFindMSB); + symbolTable.relateToOperator("subgroupShuffle", EOpSubgroupShuffle); + symbolTable.relateToOperator("subgroupShuffleXor", EOpSubgroupShuffleXor); + symbolTable.relateToOperator("subgroupShuffleUp", EOpSubgroupShuffleUp); + symbolTable.relateToOperator("subgroupShuffleDown", EOpSubgroupShuffleDown); + symbolTable.relateToOperator("subgroupAdd", EOpSubgroupAdd); + symbolTable.relateToOperator("subgroupMul", EOpSubgroupMul); + symbolTable.relateToOperator("subgroupMin", EOpSubgroupMin); + symbolTable.relateToOperator("subgroupMax", EOpSubgroupMax); + symbolTable.relateToOperator("subgroupAnd", EOpSubgroupAnd); + symbolTable.relateToOperator("subgroupOr", EOpSubgroupOr); + symbolTable.relateToOperator("subgroupXor", EOpSubgroupXor); + symbolTable.relateToOperator("subgroupInclusiveAdd", EOpSubgroupInclusiveAdd); + symbolTable.relateToOperator("subgroupInclusiveMul", EOpSubgroupInclusiveMul); + symbolTable.relateToOperator("subgroupInclusiveMin", EOpSubgroupInclusiveMin); + symbolTable.relateToOperator("subgroupInclusiveMax", EOpSubgroupInclusiveMax); + symbolTable.relateToOperator("subgroupInclusiveAnd", EOpSubgroupInclusiveAnd); + symbolTable.relateToOperator("subgroupInclusiveOr", EOpSubgroupInclusiveOr); + symbolTable.relateToOperator("subgroupInclusiveXor", EOpSubgroupInclusiveXor); + symbolTable.relateToOperator("subgroupExclusiveAdd", EOpSubgroupExclusiveAdd); + symbolTable.relateToOperator("subgroupExclusiveMul", EOpSubgroupExclusiveMul); + symbolTable.relateToOperator("subgroupExclusiveMin", EOpSubgroupExclusiveMin); + symbolTable.relateToOperator("subgroupExclusiveMax", EOpSubgroupExclusiveMax); + symbolTable.relateToOperator("subgroupExclusiveAnd", EOpSubgroupExclusiveAnd); + symbolTable.relateToOperator("subgroupExclusiveOr", EOpSubgroupExclusiveOr); + symbolTable.relateToOperator("subgroupExclusiveXor", EOpSubgroupExclusiveXor); + symbolTable.relateToOperator("subgroupClusteredAdd", EOpSubgroupClusteredAdd); + symbolTable.relateToOperator("subgroupClusteredMul", EOpSubgroupClusteredMul); + symbolTable.relateToOperator("subgroupClusteredMin", EOpSubgroupClusteredMin); + symbolTable.relateToOperator("subgroupClusteredMax", EOpSubgroupClusteredMax); + symbolTable.relateToOperator("subgroupClusteredAnd", EOpSubgroupClusteredAnd); + symbolTable.relateToOperator("subgroupClusteredOr", EOpSubgroupClusteredOr); + symbolTable.relateToOperator("subgroupClusteredXor", EOpSubgroupClusteredXor); + symbolTable.relateToOperator("subgroupQuadBroadcast", EOpSubgroupQuadBroadcast); + symbolTable.relateToOperator("subgroupQuadSwapHorizontal", EOpSubgroupQuadSwapHorizontal); + symbolTable.relateToOperator("subgroupQuadSwapVertical", EOpSubgroupQuadSwapVertical); + symbolTable.relateToOperator("subgroupQuadSwapDiagonal", EOpSubgroupQuadSwapDiagonal); + + symbolTable.relateToOperator("subgroupPartitionNV", EOpSubgroupPartition); + symbolTable.relateToOperator("subgroupPartitionedAddNV", EOpSubgroupPartitionedAdd); + symbolTable.relateToOperator("subgroupPartitionedMulNV", EOpSubgroupPartitionedMul); + symbolTable.relateToOperator("subgroupPartitionedMinNV", EOpSubgroupPartitionedMin); + symbolTable.relateToOperator("subgroupPartitionedMaxNV", EOpSubgroupPartitionedMax); + symbolTable.relateToOperator("subgroupPartitionedAndNV", EOpSubgroupPartitionedAnd); + symbolTable.relateToOperator("subgroupPartitionedOrNV", EOpSubgroupPartitionedOr); + symbolTable.relateToOperator("subgroupPartitionedXorNV", EOpSubgroupPartitionedXor); + symbolTable.relateToOperator("subgroupPartitionedInclusiveAddNV", EOpSubgroupPartitionedInclusiveAdd); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMulNV", EOpSubgroupPartitionedInclusiveMul); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMinNV", EOpSubgroupPartitionedInclusiveMin); + symbolTable.relateToOperator("subgroupPartitionedInclusiveMaxNV", EOpSubgroupPartitionedInclusiveMax); + symbolTable.relateToOperator("subgroupPartitionedInclusiveAndNV", EOpSubgroupPartitionedInclusiveAnd); + symbolTable.relateToOperator("subgroupPartitionedInclusiveOrNV", EOpSubgroupPartitionedInclusiveOr); + symbolTable.relateToOperator("subgroupPartitionedInclusiveXorNV", EOpSubgroupPartitionedInclusiveXor); + symbolTable.relateToOperator("subgroupPartitionedExclusiveAddNV", EOpSubgroupPartitionedExclusiveAdd); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMulNV", EOpSubgroupPartitionedExclusiveMul); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMinNV", EOpSubgroupPartitionedExclusiveMin); + symbolTable.relateToOperator("subgroupPartitionedExclusiveMaxNV", EOpSubgroupPartitionedExclusiveMax); + symbolTable.relateToOperator("subgroupPartitionedExclusiveAndNV", EOpSubgroupPartitionedExclusiveAnd); + symbolTable.relateToOperator("subgroupPartitionedExclusiveOrNV", EOpSubgroupPartitionedExclusiveOr); + symbolTable.relateToOperator("subgroupPartitionedExclusiveXorNV", EOpSubgroupPartitionedExclusiveXor); + } + + if (profile == EEsProfile) { + symbolTable.relateToOperator("shadow2DEXT", EOpTexture); + symbolTable.relateToOperator("shadow2DProjEXT", EOpTextureProj); + } + } + + switch(language) { + case EShLangVertex: + break; + + case EShLangTessControl: + case EShLangTessEvaluation: + break; + + case EShLangGeometry: + symbolTable.relateToOperator("EmitStreamVertex", EOpEmitStreamVertex); + symbolTable.relateToOperator("EndStreamPrimitive", EOpEndStreamPrimitive); + symbolTable.relateToOperator("EmitVertex", EOpEmitVertex); + symbolTable.relateToOperator("EndPrimitive", EOpEndPrimitive); + break; + + case EShLangFragment: + if (profile != EEsProfile && version >= 400) { + symbolTable.relateToOperator("dFdxFine", EOpDPdxFine); + symbolTable.relateToOperator("dFdyFine", EOpDPdyFine); + symbolTable.relateToOperator("fwidthFine", EOpFwidthFine); + symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse); + symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse); + symbolTable.relateToOperator("fwidthCoarse", EOpFwidthCoarse); + } + + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("rayQueryInitializeEXT", EOpRayQueryInitialize); + symbolTable.relateToOperator("rayQueryTerminateEXT", EOpRayQueryTerminate); + symbolTable.relateToOperator("rayQueryGenerateIntersectionEXT", EOpRayQueryGenerateIntersection); + symbolTable.relateToOperator("rayQueryConfirmIntersectionEXT", EOpRayQueryConfirmIntersection); + symbolTable.relateToOperator("rayQueryProceedEXT", EOpRayQueryProceed); + symbolTable.relateToOperator("rayQueryGetIntersectionTypeEXT", EOpRayQueryGetIntersectionType); + symbolTable.relateToOperator("rayQueryGetRayTMinEXT", EOpRayQueryGetRayTMin); + symbolTable.relateToOperator("rayQueryGetRayFlagsEXT", EOpRayQueryGetRayFlags); + symbolTable.relateToOperator("rayQueryGetIntersectionTEXT", EOpRayQueryGetIntersectionT); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceCustomIndexEXT", EOpRayQueryGetIntersectionInstanceCustomIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceIdEXT", EOpRayQueryGetIntersectionInstanceId); + symbolTable.relateToOperator("rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT", EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset); + symbolTable.relateToOperator("rayQueryGetIntersectionGeometryIndexEXT", EOpRayQueryGetIntersectionGeometryIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionPrimitiveIndexEXT", EOpRayQueryGetIntersectionPrimitiveIndex); + symbolTable.relateToOperator("rayQueryGetIntersectionBarycentricsEXT", EOpRayQueryGetIntersectionBarycentrics); + symbolTable.relateToOperator("rayQueryGetIntersectionFrontFaceEXT", EOpRayQueryGetIntersectionFrontFace); + symbolTable.relateToOperator("rayQueryGetIntersectionCandidateAABBOpaqueEXT", EOpRayQueryGetIntersectionCandidateAABBOpaque); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectRayDirectionEXT", EOpRayQueryGetIntersectionObjectRayDirection); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectRayOriginEXT", EOpRayQueryGetIntersectionObjectRayOrigin); + symbolTable.relateToOperator("rayQueryGetWorldRayDirectionEXT", EOpRayQueryGetWorldRayDirection); + symbolTable.relateToOperator("rayQueryGetWorldRayOriginEXT", EOpRayQueryGetWorldRayOrigin); + symbolTable.relateToOperator("rayQueryGetIntersectionObjectToWorldEXT", EOpRayQueryGetIntersectionObjectToWorld); + symbolTable.relateToOperator("rayQueryGetIntersectionWorldToObjectEXT", EOpRayQueryGetIntersectionWorldToObject); + } + + symbolTable.relateToOperator("interpolateAtCentroid", EOpInterpolateAtCentroid); + symbolTable.relateToOperator("interpolateAtSample", EOpInterpolateAtSample); + symbolTable.relateToOperator("interpolateAtOffset", EOpInterpolateAtOffset); + + if (profile != EEsProfile) + symbolTable.relateToOperator("interpolateAtVertexAMD", EOpInterpolateAtVertex); + + symbolTable.relateToOperator("beginInvocationInterlockARB", EOpBeginInvocationInterlock); + symbolTable.relateToOperator("endInvocationInterlockARB", EOpEndInvocationInterlock); + + break; + + case EShLangCompute: + symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared); + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("dFdx", EOpDPdx); + symbolTable.relateToOperator("dFdy", EOpDPdy); + symbolTable.relateToOperator("fwidth", EOpFwidth); + symbolTable.relateToOperator("dFdxFine", EOpDPdxFine); + symbolTable.relateToOperator("dFdyFine", EOpDPdyFine); + symbolTable.relateToOperator("fwidthFine", EOpFwidthFine); + symbolTable.relateToOperator("dFdxCoarse", EOpDPdxCoarse); + symbolTable.relateToOperator("dFdyCoarse", EOpDPdyCoarse); + symbolTable.relateToOperator("fwidthCoarse",EOpFwidthCoarse); + } + symbolTable.relateToOperator("coopMatLoadNV", EOpCooperativeMatrixLoad); + symbolTable.relateToOperator("coopMatStoreNV", EOpCooperativeMatrixStore); + symbolTable.relateToOperator("coopMatMulAddNV", EOpCooperativeMatrixMulAdd); + break; + + case EShLangRayGen: + case EShLangClosestHit: + case EShLangMiss: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("traceNV", EOpTrace); + symbolTable.relateToOperator("traceRayEXT", EOpTrace); + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + } + break; + case EShLangIntersect: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("reportIntersectionNV", EOpReportIntersection); + symbolTable.relateToOperator("reportIntersectionEXT", EOpReportIntersection); + } + break; + case EShLangAnyHit: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("ignoreIntersectionNV", EOpIgnoreIntersection); + symbolTable.relateToOperator("ignoreIntersectionEXT", EOpIgnoreIntersection); + symbolTable.relateToOperator("terminateRayNV", EOpTerminateRay); + symbolTable.relateToOperator("terminateRayEXT", EOpTerminateRay); + } + break; + case EShLangCallable: + if (profile != EEsProfile && version >= 460) { + symbolTable.relateToOperator("executeCallableNV", EOpExecuteCallable); + symbolTable.relateToOperator("executeCallableEXT", EOpExecuteCallable); + } + break; + case EShLangMeshNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("writePackedPrimitiveIndices4x8NV", EOpWritePackedPrimitiveIndices4x8NV); + } + // fall through + case EShLangTaskNV: + if ((profile != EEsProfile && version >= 450) || (profile == EEsProfile && version >= 320)) { + symbolTable.relateToOperator("memoryBarrierShared", EOpMemoryBarrierShared); + symbolTable.relateToOperator("groupMemoryBarrier", EOpGroupMemoryBarrier); + symbolTable.relateToOperator("subgroupMemoryBarrierShared", EOpSubgroupMemoryBarrierShared); + } + break; + + default: + assert(false && "Language not supported"); + } +#endif // !GLSLANG_WEB +} + +// +// Add context-dependent (resource-specific) built-ins not handled by the above. These +// would be ones that need to be programmatically added because they cannot +// be added by simple text strings. For these, also +// 1) Map built-in functions to operators, for those that will turn into an operation node +// instead of remaining a function call. +// 2) Tag extension-related symbols added to their base version with their extensions, so +// that if an early version has the extension turned off, there is an error reported on use. +// +void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) +{ +#ifndef GLSLANG_WEB +#if defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + if (profile != EEsProfile && version >= 430 && version < 440) { + symbolTable.setVariableExtensions("gl_MaxTransformFeedbackBuffers", 1, &E_GL_ARB_enhanced_layouts); + symbolTable.setVariableExtensions("gl_MaxTransformFeedbackInterleavedComponents", 1, &E_GL_ARB_enhanced_layouts); + } + if (profile != EEsProfile && version >= 130 && version < 420) { + symbolTable.setVariableExtensions("gl_MinProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack); + symbolTable.setVariableExtensions("gl_MaxProgramTexelOffset", 1, &E_GL_ARB_shading_language_420pack); + } + if (profile != EEsProfile && version >= 150 && version < 410) + symbolTable.setVariableExtensions("gl_MaxViewports", 1, &E_GL_ARB_viewport_array); + + switch(language) { + case EShLangFragment: + // Set up gl_FragData based on current array size. + if (version == 100 || IncludeLegacy(version, profile, spvVersion) || (! ForwardCompatibility && profile != EEsProfile && version < 420)) { + TPrecisionQualifier pq = profile == EEsProfile ? EpqMedium : EpqNone; + TType fragData(EbtFloat, EvqFragColor, pq, 4); + TArraySizes* arraySizes = new TArraySizes; + arraySizes->addInnerSize(resources.maxDrawBuffers); + fragData.transferArraySizes(arraySizes); + symbolTable.insert(*new TVariable(NewPoolTString("gl_FragData"), fragData)); + SpecialQualifier("gl_FragData", EvqFragColor, EbvFragData, symbolTable); + } + + // GL_EXT_blend_func_extended + if (profile == EEsProfile && version >= 100) { + symbolTable.setVariableExtensions("gl_MaxDualSourceDrawBuffersEXT", 1, &E_GL_EXT_blend_func_extended); + symbolTable.setVariableExtensions("gl_SecondaryFragColorEXT", 1, &E_GL_EXT_blend_func_extended); + symbolTable.setVariableExtensions("gl_SecondaryFragDataEXT", 1, &E_GL_EXT_blend_func_extended); + SpecialQualifier("gl_SecondaryFragColorEXT", EvqVaryingOut, EbvSecondaryFragColorEXT, symbolTable); + SpecialQualifier("gl_SecondaryFragDataEXT", EvqVaryingOut, EbvSecondaryFragDataEXT, symbolTable); + } + + break; + + case EShLangTessControl: + case EShLangTessEvaluation: + // Because of the context-dependent array size (gl_MaxPatchVertices), + // these variables were added later than the others and need to be mapped now. + + // standard members + BuiltInVariable("gl_in", "gl_Position", EbvPosition, symbolTable); + BuiltInVariable("gl_in", "gl_PointSize", EbvPointSize, symbolTable); + BuiltInVariable("gl_in", "gl_ClipDistance", EbvClipDistance, symbolTable); + BuiltInVariable("gl_in", "gl_CullDistance", EbvCullDistance, symbolTable); + + // compatibility members + BuiltInVariable("gl_in", "gl_ClipVertex", EbvClipVertex, symbolTable); + BuiltInVariable("gl_in", "gl_FrontColor", EbvFrontColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackColor", EbvBackColor, symbolTable); + BuiltInVariable("gl_in", "gl_FrontSecondaryColor", EbvFrontSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_BackSecondaryColor", EbvBackSecondaryColor, symbolTable); + BuiltInVariable("gl_in", "gl_TexCoord", EbvTexCoord, symbolTable); + BuiltInVariable("gl_in", "gl_FogFragCoord", EbvFogFragCoord, symbolTable); + + symbolTable.setVariableExtensions("gl_in", "gl_SecondaryPositionNV", 1, &E_GL_NV_stereo_view_rendering); + symbolTable.setVariableExtensions("gl_in", "gl_PositionPerViewNV", 1, &E_GL_NVX_multiview_per_view_attributes); + + BuiltInVariable("gl_in", "gl_SecondaryPositionNV", EbvSecondaryPositionNV, symbolTable); + BuiltInVariable("gl_in", "gl_PositionPerViewNV", EbvPositionPerViewNV, symbolTable); + + // extension requirements + if (profile == EEsProfile) { + symbolTable.setVariableExtensions("gl_in", "gl_PointSize", Num_AEP_tessellation_point_size, AEP_tessellation_point_size); + } + + break; + + default: + break; + } +#endif +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Initialize.h b/src/third_party/glslang/glslang/MachineIndependent/Initialize.h new file mode 100644 index 0000000..ac8ec33 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Initialize.h @@ -0,0 +1,112 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _INITIALIZE_INCLUDED_ +#define _INITIALIZE_INCLUDED_ + +#include "../Include/ResourceLimits.h" +#include "../Include/Common.h" +#include "../Include/ShHandle.h" +#include "SymbolTable.h" +#include "Versions.h" + +namespace glslang { + +// +// This is made to hold parseable strings for almost all the built-in +// functions and variables for one specific combination of version +// and profile. (Some still need to be added programmatically.) +// This is a base class for language-specific derivations, which +// can be used for language independent builtins. +// +// The strings are organized by +// commonBuiltins: intersection of all stages' built-ins, processed just once +// stageBuiltins[]: anything a stage needs that's not in commonBuiltins +// +class TBuiltInParseables { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TBuiltInParseables(); + virtual ~TBuiltInParseables(); + virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0; + virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0; + virtual const TString& getCommonString() const { return commonBuiltins; } + virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; } + + virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0; + virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0; + +protected: + TString commonBuiltins; + TString stageBuiltins[EShLangCount]; +}; + +// +// This is a GLSL specific derivation of TBuiltInParseables. To present a stable +// interface and match other similar code, it is called TBuiltIns, rather +// than TBuiltInParseablesGlsl. +// +class TBuiltIns : public TBuiltInParseables { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TBuiltIns(); + virtual ~TBuiltIns(); + void initialize(int version, EProfile, const SpvVersion& spvVersion); + void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage); + + void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable); + void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources); + +protected: + void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion); + void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&); + void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion); + void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile); + void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile); + void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile); + + // Helpers for making textual representations of the permutations + // of texturing/imaging functions. + const char* postfixes[5]; + const char* prefixes[EbtNumTypes]; + int dimMap[EsdNumDims]; +}; + +} // end namespace glslang + +#endif // _INITIALIZE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp b/src/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp new file mode 100644 index 0000000..f46010b --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/IntermTraverse.cpp @@ -0,0 +1,302 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (c) 2002-2010 The ANGLE Project Authors. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/intermediate.h" + +namespace glslang { + +// +// Traverse the intermediate representation tree, and +// call a node type specific function for each node. +// Done recursively through the member function Traverse(). +// Node types can be skipped if their function to call is 0, +// but their subtree will still be traversed. +// Nodes with children can have their whole subtree skipped +// if preVisit is turned on and the type specific function +// returns false. +// +// preVisit, postVisit, and rightToLeft control what order +// nodes are visited in. +// + +// +// Traversal functions for terminals are straightforward.... +// +void TIntermMethod::traverse(TIntermTraverser*) +{ + // Tree should always resolve all methods as a non-method. +} + +void TIntermSymbol::traverse(TIntermTraverser *it) +{ + it->visitSymbol(this); +} + +void TIntermConstantUnion::traverse(TIntermTraverser *it) +{ + it->visitConstantUnion(this); +} + +// +// Traverse a binary node. +// +void TIntermBinary::traverse(TIntermTraverser *it) +{ + bool visit = true; + + // + // visit the node before children if pre-visiting. + // + if (it->preVisit) + visit = it->visitBinary(EvPreVisit, this); + + // + // Visit the children, in the right order. + // + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + if (right) + right->traverse(it); + + if (it->inVisit) + visit = it->visitBinary(EvInVisit, this); + + if (visit && left) + left->traverse(it); + } else { + if (left) + left->traverse(it); + + if (it->inVisit) + visit = it->visitBinary(EvInVisit, this); + + if (visit && right) + right->traverse(it); + } + + it->decrementDepth(); + } + + // + // Visit the node after the children, if requested and the traversal + // hasn't been canceled yet. + // + if (visit && it->postVisit) + it->visitBinary(EvPostVisit, this); +} + +// +// Traverse a unary node. Same comments in binary node apply here. +// +void TIntermUnary::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitUnary(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + operand->traverse(it); + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitUnary(EvPostVisit, this); +} + +// +// Traverse an aggregate node. Same comments in binary node apply here. +// +void TIntermAggregate::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitAggregate(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + for (TIntermSequence::reverse_iterator sit = sequence.rbegin(); sit != sequence.rend(); sit++) { + (*sit)->traverse(it); + + if (visit && it->inVisit) { + if (*sit != sequence.front()) + visit = it->visitAggregate(EvInVisit, this); + } + } + } else { + for (TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++) { + (*sit)->traverse(it); + + if (visit && it->inVisit) { + if (*sit != sequence.back()) + visit = it->visitAggregate(EvInVisit, this); + } + } + } + + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitAggregate(EvPostVisit, this); +} + +// +// Traverse a selection node. Same comments in binary node apply here. +// +void TIntermSelection::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitSelection(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + if (it->rightToLeft) { + if (falseBlock) + falseBlock->traverse(it); + if (trueBlock) + trueBlock->traverse(it); + condition->traverse(it); + } else { + condition->traverse(it); + if (trueBlock) + trueBlock->traverse(it); + if (falseBlock) + falseBlock->traverse(it); + } + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitSelection(EvPostVisit, this); +} + +// +// Traverse a loop node. Same comments in binary node apply here. +// +void TIntermLoop::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitLoop(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + + if (it->rightToLeft) { + if (terminal) + terminal->traverse(it); + + if (body) + body->traverse(it); + + if (test) + test->traverse(it); + } else { + if (test) + test->traverse(it); + + if (body) + body->traverse(it); + + if (terminal) + terminal->traverse(it); + } + + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitLoop(EvPostVisit, this); +} + +// +// Traverse a branch node. Same comments in binary node apply here. +// +void TIntermBranch::traverse(TIntermTraverser *it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitBranch(EvPreVisit, this); + + if (visit && expression) { + it->incrementDepth(this); + expression->traverse(it); + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitBranch(EvPostVisit, this); +} + +// +// Traverse a switch node. +// +void TIntermSwitch::traverse(TIntermTraverser* it) +{ + bool visit = true; + + if (it->preVisit) + visit = it->visitSwitch(EvPreVisit, this); + + if (visit) { + it->incrementDepth(this); + if (it->rightToLeft) { + body->traverse(it); + condition->traverse(it); + } else { + condition->traverse(it); + body->traverse(it); + } + it->decrementDepth(); + } + + if (visit && it->postVisit) + it->visitSwitch(EvPostVisit, this); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp b/src/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp new file mode 100644 index 0000000..b8c220d --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Intermediate.cpp @@ -0,0 +1,3986 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2015 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Build the intermediate representation. +// + +#include "localintermediate.h" +#include "RemoveTree.h" +#include "SymbolTable.h" +#include "propagateNoContraction.h" + +#include +#include +#include + +namespace glslang { + +//////////////////////////////////////////////////////////////////////////// +// +// First set of functions are to help build the intermediate representation. +// These functions are not member functions of the nodes. +// They are called from parser productions. +// +///////////////////////////////////////////////////////////////////////////// + +// +// Add a terminal node for an identifier in an expression. +// +// Returns the added node. +// + +TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray, + TIntermTyped* constSubtree, const TSourceLoc& loc) +{ + TIntermSymbol* node = new TIntermSymbol(id, name, type); + node->setLoc(loc); + node->setConstArray(constArray); + node->setConstSubtree(constSubtree); + + return node; +} + +TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol) +{ + return addSymbol(intermSymbol.getId(), + intermSymbol.getName(), + intermSymbol.getType(), + intermSymbol.getConstArray(), + intermSymbol.getConstSubtree(), + intermSymbol.getLoc()); +} + +TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable) +{ + glslang::TSourceLoc loc; // just a null location + loc.init(); + + return addSymbol(variable, loc); +} + +TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc) +{ + return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc); +} + +TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc) +{ + TConstUnionArray unionArray; // just a null constant + + return addSymbol(0, "", type, unionArray, nullptr, loc); +} + +// +// Connect two nodes with a new parent that does a binary operation on the nodes. +// +// Returns the added node. +// +// Returns nullptr if the working conversions and promotions could not be found. +// +TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc) +{ + // No operations work on blocks + if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) + return nullptr; + + // Convert "reference +/- int" and "reference - reference" to integer math + if (op == EOpAdd || op == EOpSub) { + + // No addressing math on struct with unsized array. + if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) || + (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) { + return nullptr; + } + + if (left->isReference() && isTypeInt(right->getBasicType())) { + const TType& referenceType = left->getType(); + TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true); + left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); + + right = createConversion(EbtInt64, right); + right = addBinaryMath(EOpMul, right, size, loc); + + TIntermTyped *node = addBinaryMath(op, left, right, loc); + node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); + return node; + } + } + + if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) { + const TType& referenceType = right->getType(); + TIntermConstantUnion* size = + addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true); + right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); + + left = createConversion(EbtInt64, left); + left = addBinaryMath(EOpMul, left, size, loc); + + TIntermTyped *node = addBinaryMath(op, left, right, loc); + node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); + return node; + } + + if (op == EOpSub && left->isReference() && right->isReference()) { + TIntermConstantUnion* size = + addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true); + + left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); + right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); + + left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64)); + right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64)); + + left = addBinaryMath(EOpSub, left, right, loc); + + TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc); + return node; + } + + // No other math operators supported on references + if (left->isReference() || right->isReference()) + return nullptr; + + // Try converting the children's base types to compatible types. + auto children = addPairConversion(op, left, right); + left = std::get<0>(children); + right = std::get<1>(children); + + if (left == nullptr || right == nullptr) + return nullptr; + + // Convert the children's type shape to be compatible. + addBiShapeConversion(op, left, right); + if (left == nullptr || right == nullptr) + return nullptr; + + // + // Need a new node holding things together. Make + // one and promote it to the right type. + // + TIntermBinary* node = addBinaryNode(op, left, right, loc); + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + // + // If they are both (non-specialization) constants, they must be folded. + // (Unless it's the sequence (comma) operator, but that's handled in addComma().) + // + TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion(); + TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion(); + if (leftTempConstant && rightTempConstant) { + TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant); + if (folded) + return folded; + } + + // If can propagate spec-constantness and if the operation is an allowed + // specialization-constant operation, make a spec-constant. + if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node)) + node->getWritableType().getQualifier().makeSpecConstant(); + + // If must propagate nonuniform, make a nonuniform. + if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) && + isNonuniformPropagating(node->getOp())) + node->getWritableType().getQualifier().nonUniform = true; + + return node; +} + +// +// Low level: add binary node (no promotions or other argument modifications) +// +TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, + const TSourceLoc& loc) const +{ + // build the node + TIntermBinary* node = new TIntermBinary(op); + node->setLoc(loc.line != 0 ? loc : left->getLoc()); + node->setLeft(left); + node->setRight(right); + + return node; +} + +// +// like non-type form, but sets node's type. +// +TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, + const TSourceLoc& loc, const TType& type) const +{ + TIntermBinary* node = addBinaryNode(op, left, right, loc); + node->setType(type); + return node; +} + +// +// Low level: add unary node (no promotions or other argument modifications) +// +TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc) const +{ + TIntermUnary* node = new TIntermUnary(op); + node->setLoc(loc.line != 0 ? loc : child->getLoc()); + node->setOperand(child); + + return node; +} + +// +// like non-type form, but sets node's type. +// +TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc, const TType& type) + const +{ + TIntermUnary* node = addUnaryNode(op, child, loc); + node->setType(type); + return node; +} + +// +// Connect two nodes through an assignment. +// +// Returns the added node. +// +// Returns nullptr if the 'right' type could not be converted to match the 'left' type, +// or the resulting operation cannot be properly promoted. +// +TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, + const TSourceLoc& loc) +{ + // No block assignment + if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) + return nullptr; + + // Convert "reference += int" to "reference = reference + int". We need this because the + // "reference + int" calculation involves a cast back to the original type, which makes it + // not an lvalue. + if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference()) { + if (!(right->getType().isScalar() && right->getType().isIntegerDomain())) + return nullptr; + + TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc); + if (!node) + return nullptr; + + TIntermSymbol* symbol = left->getAsSymbolNode(); + left = addSymbol(*symbol); + + node = addAssign(EOpAssign, left, node, loc); + return node; + } + + // + // Like adding binary math, except the conversion can only go + // from right to left. + // + + // convert base types, nullptr return means not possible + right = addConversion(op, left->getType(), right); + if (right == nullptr) + return nullptr; + + // convert shape + right = addUniShapeConversion(op, left->getType(), right); + + // build the node + TIntermBinary* node = addBinaryNode(op, left, right, loc); + + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + return node; +} + +// +// Connect two nodes through an index operator, where the left node is the base +// of an array or struct, and the right node is a direct or indirect offset. +// +// Returns the added node. +// The caller should set the type of the returned node. +// +TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, + const TSourceLoc& loc) +{ + // caller should set the type + return addBinaryNode(op, base, index, loc); +} + +// +// Add one node as the parent of another that it operates on. +// +// Returns the added node. +// +TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, + const TSourceLoc& loc) +{ + if (child == 0) + return nullptr; + + if (child->getType().getBasicType() == EbtBlock) + return nullptr; + + switch (op) { + case EOpLogicalNot: + if (getSource() == EShSourceHlsl) { + break; // HLSL can promote logical not + } + + if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) { + return nullptr; + } + break; + + case EOpPostIncrement: + case EOpPreIncrement: + case EOpPostDecrement: + case EOpPreDecrement: + case EOpNegative: + if (child->getType().getBasicType() == EbtStruct || child->getType().isArray()) + return nullptr; + default: break; // some compilers want this + } + + // + // Do we need to promote the operand? + // + TBasicType newType = EbtVoid; + switch (op) { + case EOpConstructBool: newType = EbtBool; break; + case EOpConstructFloat: newType = EbtFloat; break; + case EOpConstructInt: newType = EbtInt; break; + case EOpConstructUint: newType = EbtUint; break; +#ifndef GLSLANG_WEB + case EOpConstructInt8: newType = EbtInt8; break; + case EOpConstructUint8: newType = EbtUint8; break; + case EOpConstructInt16: newType = EbtInt16; break; + case EOpConstructUint16: newType = EbtUint16; break; + case EOpConstructInt64: newType = EbtInt64; break; + case EOpConstructUint64: newType = EbtUint64; break; + case EOpConstructDouble: newType = EbtDouble; break; + case EOpConstructFloat16: newType = EbtFloat16; break; +#endif + default: break; // some compilers want this + } + + if (newType != EbtVoid) { + child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(), + child->getMatrixCols(), + child->getMatrixRows(), + child->isVector()), + child); + if (child == nullptr) + return nullptr; + } + + // + // For constructors, we are now done, it was all in the conversion. + // TODO: but, did this bypass constant folding? + // + switch (op) { + case EOpConstructInt8: + case EOpConstructUint8: + case EOpConstructInt16: + case EOpConstructUint16: + case EOpConstructInt: + case EOpConstructUint: + case EOpConstructInt64: + case EOpConstructUint64: + case EOpConstructBool: + case EOpConstructFloat: + case EOpConstructDouble: + case EOpConstructFloat16: + return child; + default: break; // some compilers want this + } + + // + // Make a new node for the operator. + // + TIntermUnary* node = addUnaryNode(op, child, loc); + + if (! promote(node)) + return nullptr; + + node->updatePrecision(); + + // If it's a (non-specialization) constant, it must be folded. + if (node->getOperand()->getAsConstantUnion()) + return node->getOperand()->getAsConstantUnion()->fold(op, node->getType()); + + // If it's a specialization constant, the result is too, + // if the operation is allowed for specialization constants. + if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node)) + node->getWritableType().getQualifier().makeSpecConstant(); + + // If must propagate nonuniform, make a nonuniform. + if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp())) + node->getWritableType().getQualifier().nonUniform = true; + + return node; +} + +TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary, + TIntermNode* childNode, const TType& returnType) +{ + if (unary) { + // + // Treat it like a unary operator. + // addUnaryMath() should get the type correct on its own; + // including constness (which would differ from the prototype). + // + TIntermTyped* child = childNode->getAsTyped(); + if (child == nullptr) + return nullptr; + + if (child->getAsConstantUnion()) { + TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType); + if (folded) + return folded; + } + + return addUnaryNode(op, child, child->getLoc(), returnType); + } else { + // setAggregateOperater() calls fold() for constant folding + TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc); + + return node; + } +} + +// +// This is the safe way to change the operator on an aggregate, as it +// does lots of error checking and fixing. Especially for establishing +// a function call's operation on its set of parameters. Sequences +// of instructions are also aggregates, but they just directly set +// their operator to EOpSequence. +// +// Returns an aggregate node, which could be the one passed in if +// it was already an aggregate. +// +TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, + const TSourceLoc& loc) +{ + TIntermAggregate* aggNode; + + // + // Make sure we have an aggregate. If not turn it into one. + // + if (node != nullptr) { + aggNode = node->getAsAggregate(); + if (aggNode == nullptr || aggNode->getOp() != EOpNull) { + // + // Make an aggregate containing this node. + // + aggNode = new TIntermAggregate(); + aggNode->getSequence().push_back(node); + } + } else + aggNode = new TIntermAggregate(); + + // + // Set the operator. + // + aggNode->setOperator(op); + if (loc.line != 0 || node != nullptr) + aggNode->setLoc(loc.line != 0 ? loc : node->getLoc()); + + aggNode->setType(type); + + return fold(aggNode); +} + +bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const +{ + // + // Does the base type even allow the operation? + // + switch (node->getBasicType()) { + case EbtVoid: + return false; + case EbtAtomicUint: + case EbtSampler: + case EbtAccStruct: + // opaque types can be passed to functions + if (op == EOpFunction) + break; + + // HLSL can assign samplers directly (no constructor) + if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler) + break; + + // samplers can get assigned via a sampler constructor + // (well, not yet, but code in the rest of this function is ready for it) + if (node->getBasicType() == EbtSampler && op == EOpAssign && + node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler) + break; + + // otherwise, opaque types can't even be operated on, let alone converted + return false; + default: + break; + } + + return true; +} + +bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const +{ + switch (dst) { +#ifndef GLSLANG_WEB + case EbtDouble: + switch (src) { + case EbtUint: newOp = EOpConvUintToDouble; break; + case EbtBool: newOp = EOpConvBoolToDouble; break; + case EbtFloat: newOp = EOpConvFloatToDouble; break; + case EbtInt: newOp = EOpConvIntToDouble; break; + case EbtInt8: newOp = EOpConvInt8ToDouble; break; + case EbtUint8: newOp = EOpConvUint8ToDouble; break; + case EbtInt16: newOp = EOpConvInt16ToDouble; break; + case EbtUint16: newOp = EOpConvUint16ToDouble; break; + case EbtFloat16: newOp = EOpConvFloat16ToDouble; break; + case EbtInt64: newOp = EOpConvInt64ToDouble; break; + case EbtUint64: newOp = EOpConvUint64ToDouble; break; + default: + return false; + } + break; +#endif + case EbtFloat: + switch (src) { + case EbtInt: newOp = EOpConvIntToFloat; break; + case EbtUint: newOp = EOpConvUintToFloat; break; + case EbtBool: newOp = EOpConvBoolToFloat; break; +#ifndef GLSLANG_WEB + case EbtDouble: newOp = EOpConvDoubleToFloat; break; + case EbtInt8: newOp = EOpConvInt8ToFloat; break; + case EbtUint8: newOp = EOpConvUint8ToFloat; break; + case EbtInt16: newOp = EOpConvInt16ToFloat; break; + case EbtUint16: newOp = EOpConvUint16ToFloat; break; + case EbtFloat16: newOp = EOpConvFloat16ToFloat; break; + case EbtInt64: newOp = EOpConvInt64ToFloat; break; + case EbtUint64: newOp = EOpConvUint64ToFloat; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtFloat16: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToFloat16; break; + case EbtUint8: newOp = EOpConvUint8ToFloat16; break; + case EbtInt16: newOp = EOpConvInt16ToFloat16; break; + case EbtUint16: newOp = EOpConvUint16ToFloat16; break; + case EbtInt: newOp = EOpConvIntToFloat16; break; + case EbtUint: newOp = EOpConvUintToFloat16; break; + case EbtBool: newOp = EOpConvBoolToFloat16; break; + case EbtFloat: newOp = EOpConvFloatToFloat16; break; + case EbtDouble: newOp = EOpConvDoubleToFloat16; break; + case EbtInt64: newOp = EOpConvInt64ToFloat16; break; + case EbtUint64: newOp = EOpConvUint64ToFloat16; break; + default: + return false; + } + break; +#endif + case EbtBool: + switch (src) { + case EbtInt: newOp = EOpConvIntToBool; break; + case EbtUint: newOp = EOpConvUintToBool; break; + case EbtFloat: newOp = EOpConvFloatToBool; break; +#ifndef GLSLANG_WEB + case EbtDouble: newOp = EOpConvDoubleToBool; break; + case EbtInt8: newOp = EOpConvInt8ToBool; break; + case EbtUint8: newOp = EOpConvUint8ToBool; break; + case EbtInt16: newOp = EOpConvInt16ToBool; break; + case EbtUint16: newOp = EOpConvUint16ToBool; break; + case EbtFloat16: newOp = EOpConvFloat16ToBool; break; + case EbtInt64: newOp = EOpConvInt64ToBool; break; + case EbtUint64: newOp = EOpConvUint64ToBool; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtInt8: + switch (src) { + case EbtUint8: newOp = EOpConvUint8ToInt8; break; + case EbtInt16: newOp = EOpConvInt16ToInt8; break; + case EbtUint16: newOp = EOpConvUint16ToInt8; break; + case EbtInt: newOp = EOpConvIntToInt8; break; + case EbtUint: newOp = EOpConvUintToInt8; break; + case EbtInt64: newOp = EOpConvInt64ToInt8; break; + case EbtUint64: newOp = EOpConvUint64ToInt8; break; + case EbtBool: newOp = EOpConvBoolToInt8; break; + case EbtFloat: newOp = EOpConvFloatToInt8; break; + case EbtDouble: newOp = EOpConvDoubleToInt8; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt8; break; + default: + return false; + } + break; + case EbtUint8: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint8; break; + case EbtInt16: newOp = EOpConvInt16ToUint8; break; + case EbtUint16: newOp = EOpConvUint16ToUint8; break; + case EbtInt: newOp = EOpConvIntToUint8; break; + case EbtUint: newOp = EOpConvUintToUint8; break; + case EbtInt64: newOp = EOpConvInt64ToUint8; break; + case EbtUint64: newOp = EOpConvUint64ToUint8; break; + case EbtBool: newOp = EOpConvBoolToUint8; break; + case EbtFloat: newOp = EOpConvFloatToUint8; break; + case EbtDouble: newOp = EOpConvDoubleToUint8; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint8; break; + default: + return false; + } + break; + + case EbtInt16: + switch (src) { + case EbtUint8: newOp = EOpConvUint8ToInt16; break; + case EbtInt8: newOp = EOpConvInt8ToInt16; break; + case EbtUint16: newOp = EOpConvUint16ToInt16; break; + case EbtInt: newOp = EOpConvIntToInt16; break; + case EbtUint: newOp = EOpConvUintToInt16; break; + case EbtInt64: newOp = EOpConvInt64ToInt16; break; + case EbtUint64: newOp = EOpConvUint64ToInt16; break; + case EbtBool: newOp = EOpConvBoolToInt16; break; + case EbtFloat: newOp = EOpConvFloatToInt16; break; + case EbtDouble: newOp = EOpConvDoubleToInt16; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt16; break; + default: + return false; + } + break; + case EbtUint16: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint16; break; + case EbtUint8: newOp = EOpConvUint8ToUint16; break; + case EbtInt16: newOp = EOpConvInt16ToUint16; break; + case EbtInt: newOp = EOpConvIntToUint16; break; + case EbtUint: newOp = EOpConvUintToUint16; break; + case EbtInt64: newOp = EOpConvInt64ToUint16; break; + case EbtUint64: newOp = EOpConvUint64ToUint16; break; + case EbtBool: newOp = EOpConvBoolToUint16; break; + case EbtFloat: newOp = EOpConvFloatToUint16; break; + case EbtDouble: newOp = EOpConvDoubleToUint16; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint16; break; + default: + return false; + } + break; +#endif + + case EbtInt: + switch (src) { + case EbtUint: newOp = EOpConvUintToInt; break; + case EbtBool: newOp = EOpConvBoolToInt; break; + case EbtFloat: newOp = EOpConvFloatToInt; break; +#ifndef GLSLANG_WEB + case EbtInt8: newOp = EOpConvInt8ToInt; break; + case EbtUint8: newOp = EOpConvUint8ToInt; break; + case EbtInt16: newOp = EOpConvInt16ToInt; break; + case EbtUint16: newOp = EOpConvUint16ToInt; break; + case EbtDouble: newOp = EOpConvDoubleToInt; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt; break; + case EbtInt64: newOp = EOpConvInt64ToInt; break; + case EbtUint64: newOp = EOpConvUint64ToInt; break; +#endif + default: + return false; + } + break; + case EbtUint: + switch (src) { + case EbtInt: newOp = EOpConvIntToUint; break; + case EbtBool: newOp = EOpConvBoolToUint; break; + case EbtFloat: newOp = EOpConvFloatToUint; break; +#ifndef GLSLANG_WEB + case EbtInt8: newOp = EOpConvInt8ToUint; break; + case EbtUint8: newOp = EOpConvUint8ToUint; break; + case EbtInt16: newOp = EOpConvInt16ToUint; break; + case EbtUint16: newOp = EOpConvUint16ToUint; break; + case EbtDouble: newOp = EOpConvDoubleToUint; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint; break; + case EbtInt64: newOp = EOpConvInt64ToUint; break; + case EbtUint64: newOp = EOpConvUint64ToUint; break; +#endif + default: + return false; + } + break; +#ifndef GLSLANG_WEB + case EbtInt64: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToInt64; break; + case EbtUint8: newOp = EOpConvUint8ToInt64; break; + case EbtInt16: newOp = EOpConvInt16ToInt64; break; + case EbtUint16: newOp = EOpConvUint16ToInt64; break; + case EbtInt: newOp = EOpConvIntToInt64; break; + case EbtUint: newOp = EOpConvUintToInt64; break; + case EbtBool: newOp = EOpConvBoolToInt64; break; + case EbtFloat: newOp = EOpConvFloatToInt64; break; + case EbtDouble: newOp = EOpConvDoubleToInt64; break; + case EbtFloat16: newOp = EOpConvFloat16ToInt64; break; + case EbtUint64: newOp = EOpConvUint64ToInt64; break; + default: + return false; + } + break; + case EbtUint64: + switch (src) { + case EbtInt8: newOp = EOpConvInt8ToUint64; break; + case EbtUint8: newOp = EOpConvUint8ToUint64; break; + case EbtInt16: newOp = EOpConvInt16ToUint64; break; + case EbtUint16: newOp = EOpConvUint16ToUint64; break; + case EbtInt: newOp = EOpConvIntToUint64; break; + case EbtUint: newOp = EOpConvUintToUint64; break; + case EbtBool: newOp = EOpConvBoolToUint64; break; + case EbtFloat: newOp = EOpConvFloatToUint64; break; + case EbtDouble: newOp = EOpConvDoubleToUint64; break; + case EbtFloat16: newOp = EOpConvFloat16ToUint64; break; + case EbtInt64: newOp = EOpConvInt64ToUint64; break; + default: + return false; + } + break; +#endif + default: + return false; + } + return true; +} + +// This is 'mechanism' here, it does any conversion told. +// It is about basic type, not about shape. +// The policy comes from the shader or the calling code. +TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const +{ + // + // Add a new newNode for the conversion. + // + +#ifndef GLSLANG_WEB + bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 || + convertTo == EbtInt16 || convertTo == EbtUint16 || + convertTo == EbtInt || convertTo == EbtUint || + convertTo == EbtInt64 || convertTo == EbtUint64); + + bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 || + node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 || + node->getBasicType() == EbtInt || node->getBasicType() == EbtUint || + node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64); + + bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble); + + bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 || + node->getBasicType() == EbtFloat || + node->getBasicType() == EbtDouble); + + if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) || + ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) { + if (! getArithemeticInt8Enabled()) { + return nullptr; + } + } + + if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) || + ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) { + if (! getArithemeticInt16Enabled()) { + return nullptr; + } + } + + if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) || + (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) { + if (! getArithemeticFloat16Enabled()) { + return nullptr; + } + } +#endif + + TIntermUnary* newNode = nullptr; + TOperator newOp = EOpNull; + if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) { + return nullptr; + } + + TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows()); + newNode = addUnaryNode(newOp, node, node->getLoc(), newType); + + if (node->getAsConstantUnion()) { +#ifndef GLSLANG_WEB + // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions + // to those types + if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) && + (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) && + (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16))) +#endif + { + TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType); + if (folded) + return folded; + } + } + + // Propagate specialization-constant-ness, if allowed + if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode)) + newNode->getWritableType().getQualifier().makeSpecConstant(); + + return newNode; +} + +TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const +{ + return createConversion(convertTo, node); +} + +// For converting a pair of operands to a binary operation to compatible +// types with each other, relative to the operation in 'op'. +// This does not cover assignment operations, which is asymmetric in that the +// left type is not changeable. +// See addConversion(op, type, node) for assignments and unary operation +// conversions. +// +// Generally, this is focused on basic type conversion, not shape conversion. +// See addShapeConversion() for shape conversions. +// +// Returns the converted pair of nodes. +// Returns when there is no conversion. +std::tuple +TIntermediate::addPairConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) +{ + if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1)) + return std::make_tuple(nullptr, nullptr); + + if (node0->getType() != node1->getType()) { + // If differing structure, then no conversions. + if (node0->isStruct() || node1->isStruct()) + return std::make_tuple(nullptr, nullptr); + + // If differing arrays, then no conversions. + if (node0->getType().isArray() || node1->getType().isArray()) + return std::make_tuple(nullptr, nullptr); + + // No implicit conversions for operations involving cooperative matrices + if (node0->getType().isCoopMat() || node1->getType().isCoopMat()) + return std::make_tuple(node0, node1); + } + + auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes); + + switch (op) { + // + // List all the binary ops that can implicitly convert one operand to the other's type; + // This implements the 'policy' for implicit type conversion. + // + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpEqual: + case EOpNotEqual: + + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpDiv: + case EOpMod: + + case EOpVectorTimesScalar: + case EOpVectorTimesMatrix: + case EOpMatrixTimesVector: + case EOpMatrixTimesScalar: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpSequence: // used by ?: + + if (node0->getBasicType() == node1->getBasicType()) + return std::make_tuple(node0, node1); + + promoteTo = getConversionDestinationType(node0->getBasicType(), node1->getBasicType(), op); + if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes) + return std::make_tuple(nullptr, nullptr); + + break; + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + if (getSource() == EShSourceHlsl) + promoteTo = std::make_tuple(EbtBool, EbtBool); + else + return std::make_tuple(node0, node1); + break; + + // There are no conversions needed for GLSL; the shift amount just needs to be an + // integer type, as does the base. + // HLSL can promote bools to ints to make this work. + case EOpLeftShift: + case EOpRightShift: + if (getSource() == EShSourceHlsl) { + TBasicType node0BasicType = node0->getBasicType(); + if (node0BasicType == EbtBool) + node0BasicType = EbtInt; + if (node1->getBasicType() == EbtBool) + promoteTo = std::make_tuple(node0BasicType, EbtInt); + else + promoteTo = std::make_tuple(node0BasicType, node1->getBasicType()); + } else { + if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType())) + return std::make_tuple(node0, node1); + else + return std::make_tuple(nullptr, nullptr); + } + break; + + default: + if (node0->getType() == node1->getType()) + return std::make_tuple(node0, node1); + + return std::make_tuple(nullptr, nullptr); + } + + TIntermTyped* newNode0; + TIntermTyped* newNode1; + + if (std::get<0>(promoteTo) != node0->getType().getBasicType()) { + if (node0->getAsConstantUnion()) + newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion()); + else + newNode0 = createConversion(std::get<0>(promoteTo), node0); + } else + newNode0 = node0; + + if (std::get<1>(promoteTo) != node1->getType().getBasicType()) { + if (node1->getAsConstantUnion()) + newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion()); + else + newNode1 = createConversion(std::get<1>(promoteTo), node1); + } else + newNode1 = node1; + + return std::make_tuple(newNode0, newNode1); +} + +// +// Convert the node's type to the given type, as allowed by the operation involved: 'op'. +// For implicit conversions, 'op' is not the requested conversion, it is the explicit +// operation requiring the implicit conversion. +// +// Binary operation conversions should be handled by addConversion(op, node, node), not here. +// +// Returns a node representing the conversion, which could be the same +// node passed in if no conversion was needed. +// +// Generally, this is focused on basic type conversion, not shape conversion. +// See addShapeConversion() for shape conversions. +// +// Return nullptr if a conversion can't be done. +// +TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node) +{ + if (!isConversionAllowed(op, node)) + return nullptr; + + // Otherwise, if types are identical, no problem + if (type == node->getType()) + return node; + + // If one's a structure, then no conversions. + if (type.isStruct() || node->isStruct()) + return nullptr; + + // If one's an array, then no conversions. + if (type.isArray() || node->getType().isArray()) + return nullptr; + + // Note: callers are responsible for other aspects of shape, + // like vector and matrix sizes. + + switch (op) { + // + // Explicit conversions (unary operations) + // + case EOpConstructBool: + case EOpConstructFloat: + case EOpConstructInt: + case EOpConstructUint: +#ifndef GLSLANG_WEB + case EOpConstructDouble: + case EOpConstructFloat16: + case EOpConstructInt8: + case EOpConstructUint8: + case EOpConstructInt16: + case EOpConstructUint16: + case EOpConstructInt64: + case EOpConstructUint64: + break; + +#endif + + // + // Implicit conversions + // + case EOpLogicalNot: + + case EOpFunctionCall: + + case EOpReturn: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpVectorTimesScalarAssign: + case EOpMatrixTimesScalarAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + case EOpAtan: + case EOpClamp: + case EOpCross: + case EOpDistance: + case EOpDot: + case EOpDst: + case EOpFaceForward: + case EOpFma: + case EOpFrexp: + case EOpLdexp: + case EOpMix: + case EOpLit: + case EOpMax: + case EOpMin: + case EOpMod: + case EOpModf: + case EOpPow: + case EOpReflect: + case EOpRefract: + case EOpSmoothStep: + case EOpStep: + + case EOpSequence: + case EOpConstructStruct: + case EOpConstructCooperativeMatrix: + + if (type.isReference() || node->getType().isReference()) { + // types must match to assign a reference + if (type == node->getType()) + return node; + else + return nullptr; + } + + if (type.getBasicType() == node->getType().getBasicType()) + return node; + + if (! canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op)) + return nullptr; + break; + + // For GLSL, there are no conversions needed; the shift amount just needs to be an + // integer type, as do the base/result. + // HLSL can convert the shift from a bool to an int. + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + { + if (!(getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)) { + if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType())) + return node; + else + return nullptr; + } + break; + } + + default: + // default is to require a match; all exceptions should have case statements above + + if (type.getBasicType() == node->getType().getBasicType()) + return node; + else + return nullptr; + } + + bool canPromoteConstant = true; +#ifndef GLSLANG_WEB + // GL_EXT_shader_16bit_storage can't do OpConstantComposite with + // 16-bit types, so disable promotion for those types. + // Many issues with this, from JohnK: + // - this isn't really right to discuss SPIR-V here + // - this could easily be entirely about scalars, so is overstepping + // - we should be looking at what the shader asked for, and saying whether or + // not it can be done, in the parser, by calling requireExtensions(), not + // changing language sementics on the fly by asking what extensions are in use + // - at the time of this writing (14-Aug-2020), no test results are changed by this. + switch (op) { + case EOpConstructFloat16: + canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16); + break; + case EOpConstructInt8: + case EOpConstructUint8: + canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8); + break; + case EOpConstructInt16: + case EOpConstructUint16: + canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16); + break; + default: + break; + } +#endif + + if (canPromoteConstant && node->getAsConstantUnion()) + return promoteConstantUnion(type.getBasicType(), node->getAsConstantUnion()); + + // + // Add a new newNode for the conversion. + // + TIntermTyped* newNode = createConversion(type.getBasicType(), node); + + return newNode; +} + +// Convert the node's shape of type for the given type, as allowed by the +// operation involved: 'op'. This is for situations where there is only one +// direction to consider doing the shape conversion. +// +// This implements policy, it call addShapeConversion() for the mechanism. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +// Return 'node' if no conversion was done. Promotion handles final shape +// checking. +// +TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node) +{ + // some source languages don't do this + switch (getSource()) { + case EShSourceHlsl: + break; + case EShSourceGlsl: + default: + return node; + } + + // some operations don't do this + switch (op) { + case EOpFunctionCall: + case EOpReturn: + break; + + case EOpMulAssign: + // want to support vector *= scalar native ops in AST and lower, not smear, similarly for + // matrix *= scalar, etc. + + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + if (node->getVectorSize() == 1) + return node; + break; + + case EOpAssign: + break; + + case EOpMix: + break; + + default: + return node; + } + + return addShapeConversion(type, node); +} + +// Convert the nodes' shapes to be compatible for the operation 'op'. +// +// This implements policy, it call addShapeConversion() for the mechanism. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode) +{ + // some source languages don't do this + switch (getSource()) { + case EShSourceHlsl: + break; + case EShSourceGlsl: + default: + return; + } + + // some operations don't do this + // 'break' will mean attempt bidirectional conversion + switch (op) { + case EOpMulAssign: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + // switch to unidirectional conversion (the lhs can't change) + rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode); + return; + + case EOpMul: + // matrix multiply does not change shapes + if (lhsNode->isMatrix() && rhsNode->isMatrix()) + return; + case EOpAdd: + case EOpSub: + case EOpDiv: + // want to support vector * scalar native ops in AST and lower, not smear, similarly for + // matrix * vector, etc. + if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1) + return; + break; + + case EOpRightShift: + case EOpLeftShift: + // can natively support the right operand being a scalar and the left a vector, + // but not the reverse + if (rhsNode->getVectorSize() == 1) + return; + break; + + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpEqual: + case EOpNotEqual: + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpMix: + break; + + default: + return; + } + + // Do bidirectional conversions + if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) { + if (lhsNode->getType().isScalarOrVec1()) + lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); + else + rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); + } + lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); + rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); +} + +// Convert the node's shape of type for the given type, as allowed by the +// operation involved: 'op'. +// +// Generally, the AST represents allowed GLSL shapes, so this isn't needed +// for GLSL. Bad shapes are caught in conversion or promotion. +// +// Return 'node' if no conversion was done. Promotion handles final shape +// checking. +// +TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node) +{ + // no conversion needed + if (node->getType() == type) + return node; + + // structures and arrays don't change shape, either to or from + if (node->getType().isStruct() || node->getType().isArray() || + type.isStruct() || type.isArray()) + return node; + + // The new node that handles the conversion + TOperator constructorOp = mapTypeToConstructorOp(type); + + if (getSource() == EShSourceHlsl) { + // HLSL rules for scalar, vector and matrix conversions: + // 1) scalar can become anything, initializing every component with its value + // 2) vector and matrix can become scalar, first element is used (warning: truncation) + // 3) matrix can become matrix with less rows and/or columns (warning: truncation) + // 4) vector can become vector with less rows size (warning: truncation) + // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret) + // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret) + + const TType &sourceType = node->getType(); + + // rule 1 for scalar to matrix is special + if (sourceType.isScalarOrVec1() && type.isMatrix()) { + + // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its + // own devices, the constructor from a scalar would populate the diagonal. This forces replication + // to every matrix element. + + // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here + // repeatedly, so we copy it to a temp, then use the temp. + const int matSize = type.computeNumComponents(); + TIntermAggregate* rhsAggregate = new TIntermAggregate(); + + const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr); + + if (!isSimple) { + assert(0); // TODO: use node replicator service when available. + } + + for (int x = 0; x < matSize; ++x) + rhsAggregate->getSequence().push_back(node); + + return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc()); + } + + // rule 1 and 2 + if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar())) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + + // rule 3 and 5b + if (sourceType.isMatrix()) { + // rule 3 + if (type.isMatrix()) { + if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) && + sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows()) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + // rule 5b + } else if (type.isVector()) { + if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + } + } + + // rule 4 and 5a + if (sourceType.isVector()) { + // rule 4 + if (type.isVector()) + { + if (sourceType.getVectorSize() > type.getVectorSize()) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + // rule 5a + } else if (type.isMatrix()) { + if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + } + } + } + + // scalar -> vector or vec1 -> vector or + // vector -> scalar or + // bigger vector -> smaller vector + if ((node->getType().isScalarOrVec1() && type.isVector()) || + (node->getType().isVector() && type.isScalar()) || + (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize())) + return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); + + return node; +} + +bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const +{ + // integral promotions + if (to == EbtInt) { + switch(from) { + case EbtInt8: + case EbtInt16: + case EbtUint8: + case EbtUint16: + return true; + default: + break; + } + } + return false; +} + +bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const +{ + // floating-point promotions + if (to == EbtDouble) { + switch(from) { + case EbtFloat16: + case EbtFloat: + return true; + default: + break; + } + } + return false; +} + +bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const +{ +#ifdef GLSLANG_WEB + return false; +#endif + + switch (from) { + case EbtInt: + switch(to) { + case EbtUint: + return version >= 400 || getSource() == EShSourceHlsl; + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint: + switch(to) { + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt8: + switch (to) { + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint8: + switch (to) { + case EbtInt16: + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt16: + switch(to) { + case EbtUint16: + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtUint16: + switch(to) { + case EbtUint: + case EbtInt64: + case EbtUint64: + return true; + default: + break; + } + break; + case EbtInt64: + if (to == EbtUint64) { + return true; + } + break; + default: + break; + } + return false; +} + +bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const +{ +#ifdef GLSLANG_WEB + return false; +#endif + + if (to == EbtFloat && from == EbtFloat16) { + return true; + } else { + return false; + } +} + +bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const +{ + switch (from) { + case EbtInt: + case EbtUint: + switch(to) { + case EbtFloat: + case EbtDouble: + return true; + default: + break; + } + break; +#ifndef GLSLANG_WEB + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + switch (to) { + case EbtFloat16: + case EbtFloat: + case EbtDouble: + return true; + default: + break; + } + break; + case EbtInt64: + case EbtUint64: + if (to == EbtDouble) { + return true; + } + break; +#endif + default: + break; + } + return false; +} + +// +// See if the 'from' type is allowed to be implicitly converted to the +// 'to' type. This is not about vector/array/struct, only about basic type. +// +bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const +{ + if ((isEsProfile() && version < 310 ) || version == 110) + return false; + + if (from == to) + return true; + + // TODO: Move more policies into language-specific handlers. + // Some languages allow more general (or potentially, more specific) conversions under some conditions. + if (getSource() == EShSourceHlsl) { + const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool); + const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool); + + if (fromConvertable && toConvertable) { + switch (op) { + case EOpAndAssign: // assignments can perform arbitrary conversions + case EOpInclusiveOrAssign: // ... + case EOpExclusiveOrAssign: // ... + case EOpAssign: // ... + case EOpAddAssign: // ... + case EOpSubAssign: // ... + case EOpMulAssign: // ... + case EOpVectorTimesScalarAssign: // ... + case EOpMatrixTimesScalarAssign: // ... + case EOpDivAssign: // ... + case EOpModAssign: // ... + case EOpReturn: // function returns can also perform arbitrary conversions + case EOpFunctionCall: // conversion of a calling parameter + case EOpLogicalNot: + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + case EOpConstructStruct: + return true; + default: + break; + } + } + } + + if (getSource() == EShSourceHlsl) { + // HLSL + if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat)) + return true; + } else { + // GLSL + if (isIntegralPromotion(from, to) || + isFPPromotion(from, to) || + isIntegralConversion(from, to) || + isFPConversion(from, to) || + isFPIntegralConversion(from, to)) { + + if (numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int32) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int64) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float32) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float64)) { + return true; + } + } + } + + if (isEsProfile()) { + switch (to) { + case EbtFloat: + switch (from) { + case EbtInt: + case EbtUint: + return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions); + default: + return false; + } + case EbtUint: + switch (from) { + case EbtInt: + return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions); + default: + return false; + } + default: + return false; + } + } else { + switch (to) { + case EbtDouble: + switch (from) { + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtFloat: + return version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64); + case EbtInt16: + case EbtUint16: + return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) && + numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + case EbtFloat16: + return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) && + numericFeatures.contains(TNumericFeatures::gpu_shader_half_float); + default: + return false; + } + case EbtFloat: + switch (from) { + case EbtInt: + case EbtUint: + return true; + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + case EbtUint16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + case EbtFloat16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_half_float) || + getSource() == EShSourceHlsl; + default: + return false; + } + case EbtUint: + switch (from) { + case EbtInt: + return version >= 400 || getSource() == EShSourceHlsl; + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + case EbtUint16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + return false; + } + case EbtInt: + switch (from) { + case EbtBool: + return getSource() == EShSourceHlsl; + case EbtInt16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + return false; + } + case EbtUint64: + switch (from) { + case EbtInt: + case EbtUint: + case EbtInt64: + return true; + case EbtInt16: + case EbtUint16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + return false; + } + case EbtInt64: + switch (from) { + case EbtInt: + return true; + case EbtInt16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + return false; + } + case EbtFloat16: + switch (from) { + case EbtInt16: + case EbtUint16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + break; + } + return false; + case EbtUint16: + switch (from) { + case EbtInt16: + return numericFeatures.contains(TNumericFeatures::gpu_shader_int16); + default: + break; + } + return false; + default: + return false; + } + } + + return false; +} + +static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType) +{ +#ifdef GLSLANG_WEB + return false; +#endif + + switch(sintType) { + case EbtInt8: + switch(uintType) { + case EbtUint8: + case EbtUint16: + case EbtUint: + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt16: + switch(uintType) { + case EbtUint8: + return true; + case EbtUint16: + case EbtUint: + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt: + switch(uintType) { + case EbtUint8: + case EbtUint16: + return true; + case EbtUint: + return false; + default: + assert(false); + return false; + } + break; + case EbtInt64: + switch(uintType) { + case EbtUint8: + case EbtUint16: + case EbtUint: + return true; + case EbtUint64: + return false; + default: + assert(false); + return false; + } + break; + default: + assert(false); + return false; + } +} + + +static TBasicType getCorrespondingUnsignedType(TBasicType type) +{ +#ifdef GLSLANG_WEB + assert(type == EbtInt); + return EbtUint; +#endif + + switch(type) { + case EbtInt8: + return EbtUint8; + case EbtInt16: + return EbtUint16; + case EbtInt: + return EbtUint; + case EbtInt64: + return EbtUint64; + default: + assert(false); + return EbtNumTypes; + } +} + +// Implements the following rules +// - If either operand has type float64_t or derived from float64_t, +// the other shall be converted to float64_t or derived type. +// - Otherwise, if either operand has type float32_t or derived from +// float32_t, the other shall be converted to float32_t or derived type. +// - Otherwise, if either operand has type float16_t or derived from +// float16_t, the other shall be converted to float16_t or derived type. +// - Otherwise, if both operands have integer types the following rules +// shall be applied to the operands: +// - If both operands have the same type, no further conversion +// is needed. +// - Otherwise, if both operands have signed integer types or both +// have unsigned integer types, the operand with the type of lesser +// integer conversion rank shall be converted to the type of the +// operand with greater rank. +// - Otherwise, if the operand that has unsigned integer type has rank +// greater than or equal to the rank of the type of the other +// operand, the operand with signed integer type shall be converted +// to the type of the operand with unsigned integer type. +// - Otherwise, if the type of the operand with signed integer type can +// represent all of the values of the type of the operand with +// unsigned integer type, the operand with unsigned integer type +// shall be converted to the type of the operand with signed +// integer type. +// - Otherwise, both operands shall be converted to the unsigned +// integer type corresponding to the type of the operand with signed +// integer type. + +std::tuple TIntermediate::getConversionDestinationType(TBasicType type0, TBasicType type1, TOperator op) const +{ + TBasicType res0 = EbtNumTypes; + TBasicType res1 = EbtNumTypes; + + if ((isEsProfile() && + (version < 310 || !numericFeatures.contains(TNumericFeatures::shader_implicit_conversions))) || + version == 110) + return std::make_tuple(res0, res1); + + if (getSource() == EShSourceHlsl) { + if (canImplicitlyPromote(type1, type0, op)) { + res0 = type0; + res1 = type0; + } else if (canImplicitlyPromote(type0, type1, op)) { + res0 = type1; + res1 = type1; + } + return std::make_tuple(res0, res1); + } + + if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) || + (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) { + res0 = EbtDouble; + res1 = EbtDouble; + } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) || + (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) { + res0 = EbtFloat; + res1 = EbtFloat; + } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) || + (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) { + res0 = EbtFloat16; + res1 = EbtFloat16; + } else if (isTypeInt(type0) && isTypeInt(type1) && + (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) { + if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) || + (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) { + if (getTypeRank(type0) < getTypeRank(type1)) { + res0 = type1; + res1 = type1; + } else { + res0 = type0; + res1 = type0; + } + } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) { + res0 = type0; + res1 = type0; + } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) { + res0 = type1; + res1 = type1; + } else if (isTypeSignedInt(type0)) { + if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) { + res0 = type0; + res1 = type0; + } else { + res0 = getCorrespondingUnsignedType(type0); + res1 = getCorrespondingUnsignedType(type0); + } + } else if (isTypeSignedInt(type1)) { + if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) { + res0 = type1; + res1 = type1; + } else { + res0 = getCorrespondingUnsignedType(type1); + res1 = getCorrespondingUnsignedType(type1); + } + } + } + + return std::make_tuple(res0, res1); +} + +// +// Given a type, find what operation would fully construct it. +// +TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const +{ + TOperator op = EOpNull; + + if (type.getQualifier().isNonUniform()) + return EOpConstructNonuniform; + + if (type.isCoopMat()) + return EOpConstructCooperativeMatrix; + + switch (type.getBasicType()) { + case EbtStruct: + op = EOpConstructStruct; + break; + case EbtSampler: + if (type.getSampler().isCombined()) + op = EOpConstructTextureSampler; + break; + case EbtFloat: + if (type.isMatrix()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat2x2; break; + case 3: op = EOpConstructMat2x3; break; + case 4: op = EOpConstructMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat3x2; break; + case 3: op = EOpConstructMat3x3; break; + case 4: op = EOpConstructMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructMat4x2; break; + case 3: op = EOpConstructMat4x3; break; + case 4: op = EOpConstructMat4x4; break; + default: break; // some compilers want this + } + break; + default: break; // some compilers want this + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructFloat; break; + case 2: op = EOpConstructVec2; break; + case 3: op = EOpConstructVec3; break; + case 4: op = EOpConstructVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtInt: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat2x2; break; + case 3: op = EOpConstructIMat2x3; break; + case 4: op = EOpConstructIMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat3x2; break; + case 3: op = EOpConstructIMat3x3; break; + case 4: op = EOpConstructIMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructIMat4x2; break; + case 3: op = EOpConstructIMat4x3; break; + case 4: op = EOpConstructIMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt; break; + case 2: op = EOpConstructIVec2; break; + case 3: op = EOpConstructIVec3; break; + case 4: op = EOpConstructIVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtUint: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat2x2; break; + case 3: op = EOpConstructUMat2x3; break; + case 4: op = EOpConstructUMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat3x2; break; + case 3: op = EOpConstructUMat3x3; break; + case 4: op = EOpConstructUMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructUMat4x2; break; + case 3: op = EOpConstructUMat4x3; break; + case 4: op = EOpConstructUMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint; break; + case 2: op = EOpConstructUVec2; break; + case 3: op = EOpConstructUVec3; break; + case 4: op = EOpConstructUVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtBool: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat2x2; break; + case 3: op = EOpConstructBMat2x3; break; + case 4: op = EOpConstructBMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat3x2; break; + case 3: op = EOpConstructBMat3x3; break; + case 4: op = EOpConstructBMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructBMat4x2; break; + case 3: op = EOpConstructBMat4x3; break; + case 4: op = EOpConstructBMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructBool; break; + case 2: op = EOpConstructBVec2; break; + case 3: op = EOpConstructBVec3; break; + case 4: op = EOpConstructBVec4; break; + default: break; // some compilers want this + } + } + break; +#ifndef GLSLANG_WEB + case EbtDouble: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat2x2; break; + case 3: op = EOpConstructDMat2x3; break; + case 4: op = EOpConstructDMat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat3x2; break; + case 3: op = EOpConstructDMat3x3; break; + case 4: op = EOpConstructDMat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructDMat4x2; break; + case 3: op = EOpConstructDMat4x3; break; + case 4: op = EOpConstructDMat4x4; break; + default: break; // some compilers want this + } + break; + } + } else { + switch(type.getVectorSize()) { + case 1: op = EOpConstructDouble; break; + case 2: op = EOpConstructDVec2; break; + case 3: op = EOpConstructDVec3; break; + case 4: op = EOpConstructDVec4; break; + default: break; // some compilers want this + } + } + break; + case EbtFloat16: + if (type.getMatrixCols()) { + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat2x2; break; + case 3: op = EOpConstructF16Mat2x3; break; + case 4: op = EOpConstructF16Mat2x4; break; + default: break; // some compilers want this + } + break; + case 3: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat3x2; break; + case 3: op = EOpConstructF16Mat3x3; break; + case 4: op = EOpConstructF16Mat3x4; break; + default: break; // some compilers want this + } + break; + case 4: + switch (type.getMatrixRows()) { + case 2: op = EOpConstructF16Mat4x2; break; + case 3: op = EOpConstructF16Mat4x3; break; + case 4: op = EOpConstructF16Mat4x4; break; + default: break; // some compilers want this + } + break; + } + } + else { + switch (type.getVectorSize()) { + case 1: op = EOpConstructFloat16; break; + case 2: op = EOpConstructF16Vec2; break; + case 3: op = EOpConstructF16Vec3; break; + case 4: op = EOpConstructF16Vec4; break; + default: break; // some compilers want this + } + } + break; + case EbtInt8: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt8; break; + case 2: op = EOpConstructI8Vec2; break; + case 3: op = EOpConstructI8Vec3; break; + case 4: op = EOpConstructI8Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint8: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint8; break; + case 2: op = EOpConstructU8Vec2; break; + case 3: op = EOpConstructU8Vec3; break; + case 4: op = EOpConstructU8Vec4; break; + default: break; // some compilers want this + } + break; + case EbtInt16: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt16; break; + case 2: op = EOpConstructI16Vec2; break; + case 3: op = EOpConstructI16Vec3; break; + case 4: op = EOpConstructI16Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint16: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint16; break; + case 2: op = EOpConstructU16Vec2; break; + case 3: op = EOpConstructU16Vec3; break; + case 4: op = EOpConstructU16Vec4; break; + default: break; // some compilers want this + } + break; + case EbtInt64: + switch(type.getVectorSize()) { + case 1: op = EOpConstructInt64; break; + case 2: op = EOpConstructI64Vec2; break; + case 3: op = EOpConstructI64Vec3; break; + case 4: op = EOpConstructI64Vec4; break; + default: break; // some compilers want this + } + break; + case EbtUint64: + switch(type.getVectorSize()) { + case 1: op = EOpConstructUint64; break; + case 2: op = EOpConstructU64Vec2; break; + case 3: op = EOpConstructU64Vec3; break; + case 4: op = EOpConstructU64Vec4; break; + default: break; // some compilers want this + } + break; + case EbtReference: + op = EOpConstructReference; + break; +#endif + default: + break; + } + + return op; +} + +// +// Safe way to combine two nodes into an aggregate. Works with null pointers, +// a node that's not a aggregate yet, etc. +// +// Returns the resulting aggregate, unless nullptr was passed in for +// both existing nodes. +// +TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right) +{ + if (left == nullptr && right == nullptr) + return nullptr; + + TIntermAggregate* aggNode = nullptr; + if (left != nullptr) + aggNode = left->getAsAggregate(); + if (aggNode == nullptr || aggNode->getOp() != EOpNull) { + aggNode = new TIntermAggregate; + if (left != nullptr) + aggNode->getSequence().push_back(left); + } + + if (right != nullptr) + aggNode->getSequence().push_back(right); + + return aggNode; +} + +TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc) +{ + TIntermAggregate* aggNode = growAggregate(left, right); + if (aggNode) + aggNode->setLoc(loc); + + return aggNode; +} + +// +// Turn an existing node into an aggregate. +// +// Returns an aggregate, unless nullptr was passed in for the existing node. +// +TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node) +{ + if (node == nullptr) + return nullptr; + + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->getSequence().push_back(node); + aggNode->setLoc(node->getLoc()); + + return aggNode; +} + +TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc) +{ + if (node == nullptr) + return nullptr; + + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->getSequence().push_back(node); + aggNode->setLoc(loc); + + return aggNode; +} + +// +// Make an aggregate with an empty sequence. +// +TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc) +{ + TIntermAggregate* aggNode = new TIntermAggregate; + aggNode->setLoc(loc); + + return aggNode; +} + +// +// For "if" test nodes. There are three children; a condition, +// a true path, and a false path. The two paths are in the +// nodePair. +// +// Returns the selection node created. +// +TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc) +{ + // + // Don't prune the false path for compile-time constants; it's needed + // for static access analysis. + // + + TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2); + node->setLoc(loc); + + return node; +} + +TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc) +{ + // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators + // ... are not included in the operators that can create a constant expression. + // + // if (left->getType().getQualifier().storage == EvqConst && + // right->getType().getQualifier().storage == EvqConst) { + + // return right; + //} + + TIntermTyped *commaAggregate = growAggregate(left, right, loc); + commaAggregate->getAsAggregate()->setOperator(EOpComma); + commaAggregate->setType(right->getType()); + commaAggregate->getWritableType().getQualifier().makeTemporary(); + + return commaAggregate; +} + +TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc) +{ + TIntermMethod* method = new TIntermMethod(object, type, *name); + method->setLoc(loc); + + return method; +} + +// +// For "?:" test nodes. There are three children; a condition, +// a true path, and a false path. The two paths are specified +// as separate parameters. For vector 'cond', the true and false +// are not paths, but vectors to mix. +// +// Specialization constant operations include +// - The ternary operator ( ? : ) +// +// Returns the selection node created, or nullptr if one could not be. +// +TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, + const TSourceLoc& loc) +{ + // If it's void, go to the if-then-else selection() + if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) { + TIntermNodePair pair = { trueBlock, falseBlock }; + TIntermSelection* selection = addSelection(cond, pair, loc); + if (getSource() == EShSourceHlsl) + selection->setNoShortCircuit(); + + return selection; + } + + // + // Get compatible types. + // + auto children = addPairConversion(EOpSequence, trueBlock, falseBlock); + trueBlock = std::get<0>(children); + falseBlock = std::get<1>(children); + + if (trueBlock == nullptr || falseBlock == nullptr) + return nullptr; + + // Handle a vector condition as a mix + if (!cond->getType().isScalarOrVec1()) { + TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary, + cond->getType().getVectorSize()); + // smear true/false operands as needed + trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock); + falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock); + + // After conversion, types have to match. + if (falseBlock->getType() != trueBlock->getType()) + return nullptr; + + // make the mix operation + TIntermAggregate* mix = makeAggregate(loc); + mix = growAggregate(mix, falseBlock); + mix = growAggregate(mix, trueBlock); + mix = growAggregate(mix, cond); + mix->setType(targetVectorType); + mix->setOp(EOpMix); + + return mix; + } + + // Now have a scalar condition... + + // Convert true and false expressions to matching types + addBiShapeConversion(EOpMix, trueBlock, falseBlock); + + // After conversion, types have to match. + if (falseBlock->getType() != trueBlock->getType()) + return nullptr; + + // Eliminate the selection when the condition is a scalar and all operands are constant. + if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) { + if (cond->getAsConstantUnion()->getConstArray()[0].getBConst()) + return trueBlock; + else + return falseBlock; + } + + // + // Make a selection node. + // + TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType()); + node->setLoc(loc); + node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision); + + if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) || + (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() && + falseBlock->getQualifier().isConstant())) + node->getQualifier().makeSpecConstant(); + else + node->getQualifier().makeTemporary(); + + if (getSource() == EShSourceHlsl) + node->setNoShortCircuit(); + + return node; +} + +// +// Constant terminal nodes. Has a union that contains bool, float or int constants +// +// Returns the constant union node created. +// + +TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const +{ + TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t); + node->getQualifier().storage = EvqConst; + node->setLoc(loc); + if (literal) + node->setLiteral(); + + return node; +} +TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI8Const(i8); + + return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setUConst(u8); + + return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI16Const(i16); + + return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setU16Const(u16); + + return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setIConst(i); + + return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setUConst(u); + + return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setI64Const(i64); + + return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setU64Const(u64); + + return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setBConst(b); + + return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const +{ + assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16); + + TConstUnionArray unionArray(1); + unionArray[0].setDConst(d); + + return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal); +} + +TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const +{ + TConstUnionArray unionArray(1); + unionArray[0].setSConst(s); + + return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal); +} + +// Put vector swizzle selectors onto the given sequence +void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc) +{ + TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc); + sequence.push_back(constIntNode); +} + +// Put matrix swizzle selectors onto the given sequence +void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc) +{ + TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc); + sequence.push_back(constIntNode); + constIntNode = addConstantUnion(selector.coord2, loc); + sequence.push_back(constIntNode); +} + +// Make an aggregate node that has a sequence of all selectors. +template TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc); +template TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc); +template +TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors& selector, const TSourceLoc& loc) +{ + TIntermAggregate* node = new TIntermAggregate(EOpSequence); + + node->setLoc(loc); + TIntermSequence &sequenceVector = node->getSequence(); + + for (int i = 0; i < selector.size(); i++) + pushSelector(sequenceVector, selector[i], loc); + + return node; +} + +// +// Follow the left branches down to the root of an l-value +// expression (just "." and []). +// +// Return the base of the l-value (where following indexing quits working). +// Return nullptr if a chain following dereferences cannot be followed. +// +// 'swizzleOkay' says whether or not it is okay to consider a swizzle +// a valid part of the dereference chain. +// +const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay) +{ + do { + const TIntermBinary* binary = node->getAsBinaryNode(); + if (binary == nullptr) + return node; + TOperator op = binary->getOp(); + if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle) + return nullptr; + if (! swizzleOkay) { + if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle) + return nullptr; + if ((op == EOpIndexDirect || op == EOpIndexIndirect) && + (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) && + ! binary->getLeft()->getType().isArray()) + return nullptr; + } + node = node->getAsBinaryNode()->getLeft(); + } while (true); +} + +// +// Create while and do-while loop nodes. +// +TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, + const TSourceLoc& loc) +{ + TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst); + node->setLoc(loc); + + return node; +} + +// +// Create a for-loop sequence. +// +TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test, + TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node) +{ + node = new TIntermLoop(body, test, terminal, testFirst); + node->setLoc(loc); + + // make a sequence of the initializer and statement, but try to reuse the + // aggregate already created for whatever is in the initializer, if there is one + TIntermAggregate* loopSequence = (initializer == nullptr || + initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc) + : initializer->getAsAggregate(); + if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence) + loopSequence->setOp(EOpNull); + loopSequence = growAggregate(loopSequence, node); + loopSequence->setOperator(EOpSequence); + + return loopSequence; +} + +// +// Add branches. +// +TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc) +{ + return addBranch(branchOp, nullptr, loc); +} + +TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc) +{ + TIntermBranch* node = new TIntermBranch(branchOp, expression); + node->setLoc(loc); + + return node; +} + +// Propagate precision from formal function return type to actual return type, +// and on to its subtree. +void TIntermBranch::updatePrecision(TPrecisionQualifier parentPrecision) +{ + TIntermTyped* exp = getExpression(); + if (exp == nullptr) + return; + + if (exp->getBasicType() == EbtInt || exp->getBasicType() == EbtUint || + exp->getBasicType() == EbtFloat || exp->getBasicType() == EbtFloat16) { + if (parentPrecision != EpqNone && exp->getQualifier().precision == EpqNone) { + exp->propagatePrecision(parentPrecision); + } + } +} + +// +// This is to be executed after the final root is put on top by the parsing +// process. +// +bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/) +{ + if (root == nullptr) + return true; + + // Finish off the top-level sequence + TIntermAggregate* aggRoot = root->getAsAggregate(); + if (aggRoot && aggRoot->getOp() == EOpNull) + aggRoot->setOperator(EOpSequence); + +#ifndef GLSLANG_WEB + // Propagate 'noContraction' label in backward from 'precise' variables. + glslang::PropagateNoContraction(*this); + + switch (textureSamplerTransformMode) { + case EShTexSampTransKeep: + break; + case EShTexSampTransUpgradeTextureRemoveSampler: + performTextureUpgradeAndSamplerRemovalTransformation(root); + break; + case EShTexSampTransCount: + assert(0); + break; + } +#endif + + return true; +} + +void TIntermediate::addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage language, TSymbolTable& symbolTable) +{ + // Add top-level nodes for declarations that must be checked cross + // compilation unit by a linker, yet might not have been referenced + // by the AST. + // + // Almost entirely, translation of symbols is driven by what's present + // in the AST traversal, not by translating the symbol table. + // + // However, there are some special cases: + // - From the specification: "Special built-in inputs gl_VertexID and + // gl_InstanceID are also considered active vertex attributes." + // - Linker-based type mismatch error reporting needs to see all + // uniforms/ins/outs variables and blocks. + // - ftransform() can make gl_Vertex and gl_ModelViewProjectionMatrix active. + // + + // if (ftransformUsed) { + // TODO: 1.1 lowering functionality: track ftransform() usage + // addSymbolLinkageNode(root, symbolTable, "gl_Vertex"); + // addSymbolLinkageNode(root, symbolTable, "gl_ModelViewProjectionMatrix"); + //} + + if (language == EShLangVertex) { + // the names won't be found in the symbol table unless the versions are right, + // so version logic does not need to be repeated here + addSymbolLinkageNode(linkage, symbolTable, "gl_VertexID"); + addSymbolLinkageNode(linkage, symbolTable, "gl_InstanceID"); + } + + // Add a child to the root node for the linker objects + linkage->setOperator(EOpLinkerObjects); + treeRoot = growAggregate(treeRoot, linkage); +} + +// +// Add the given name or symbol to the list of nodes at the end of the tree used +// for link-time checking and external linkage. +// + +void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable& symbolTable, const TString& name) +{ + TSymbol* symbol = symbolTable.find(name); + if (symbol) + addSymbolLinkageNode(linkage, *symbol->getAsVariable()); +} + +void TIntermediate::addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol& symbol) +{ + const TVariable* variable = symbol.getAsVariable(); + if (! variable) { + // This must be a member of an anonymous block, and we need to add the whole block + const TAnonMember* anon = symbol.getAsAnonMember(); + variable = &anon->getAnonContainer(); + } + TIntermSymbol* node = addSymbol(*variable); + linkage = growAggregate(linkage, node); +} + +// +// Add a caller->callee relationship to the call graph. +// Assumes the strings are unique per signature. +// +void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& caller, const TString& callee) +{ + // Duplicates are okay, but faster to not keep them, and they come grouped by caller, + // as long as new ones are push on the same end we check on for duplicates + for (TGraph::const_iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->caller != caller) + break; + if (call->callee == callee) + return; + } + + callGraph.push_front(TCall(caller, callee)); +} + +// +// This deletes the tree. +// +void TIntermediate::removeTree() +{ + if (treeRoot) + RemoveAllTreeNodes(treeRoot); +} + +// +// Implement the part of KHR_vulkan_glsl that lists the set of operations +// that can result in a specialization constant operation. +// +// "5.x Specialization Constant Operations" +// +// Only some operations discussed in this section may be applied to a +// specialization constant and still yield a result that is as +// specialization constant. The operations allowed are listed below. +// When a specialization constant is operated on with one of these +// operators and with another constant or specialization constant, the +// result is implicitly a specialization constant. +// +// - int(), uint(), and bool() constructors for type conversions +// from any of the following types to any of the following types: +// * int +// * uint +// * bool +// - vector versions of the above conversion constructors +// - allowed implicit conversions of the above +// - swizzles (e.g., foo.yx) +// - The following when applied to integer or unsigned integer types: +// * unary negative ( - ) +// * binary operations ( + , - , * , / , % ) +// * shift ( <<, >> ) +// * bitwise operations ( & , | , ^ ) +// - The following when applied to integer or unsigned integer scalar types: +// * comparison ( == , != , > , >= , < , <= ) +// - The following when applied to the Boolean scalar type: +// * not ( ! ) +// * logical operations ( && , || , ^^ ) +// * comparison ( == , != )" +// +// This function just handles binary and unary nodes. Construction +// rules are handled in construction paths that are not covered by the unary +// and binary paths, while required conversions will still show up here +// as unary converters in the from a construction operator. +// +bool TIntermediate::isSpecializationOperation(const TIntermOperator& node) const +{ + // The operations resulting in floating point are quite limited + // (However, some floating-point operations result in bool, like ">", + // so are handled later.) + if (node.getType().isFloatingDomain()) { + switch (node.getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + case EOpConvFloatToDouble: + case EOpConvDoubleToFloat: + case EOpConvFloat16ToFloat: + case EOpConvFloatToFloat16: + case EOpConvFloat16ToDouble: + case EOpConvDoubleToFloat16: + return true; + default: + return false; + } + } + + // Check for floating-point arguments + if (const TIntermBinary* bin = node.getAsBinaryNode()) + if (bin->getLeft() ->getType().isFloatingDomain() || + bin->getRight()->getType().isFloatingDomain()) + return false; + + // So, for now, we can assume everything left is non-floating-point... + + // Now check for integer/bool-based operations + switch (node.getOp()) { + + // dereference/swizzle + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + + // (u)int* -> bool + case EOpConvInt8ToBool: + case EOpConvInt16ToBool: + case EOpConvIntToBool: + case EOpConvInt64ToBool: + case EOpConvUint8ToBool: + case EOpConvUint16ToBool: + case EOpConvUintToBool: + case EOpConvUint64ToBool: + + // bool -> (u)int* + case EOpConvBoolToInt8: + case EOpConvBoolToInt16: + case EOpConvBoolToInt: + case EOpConvBoolToInt64: + case EOpConvBoolToUint8: + case EOpConvBoolToUint16: + case EOpConvBoolToUint: + case EOpConvBoolToUint64: + + // int8_t -> (u)int* + case EOpConvInt8ToInt16: + case EOpConvInt8ToInt: + case EOpConvInt8ToInt64: + case EOpConvInt8ToUint8: + case EOpConvInt8ToUint16: + case EOpConvInt8ToUint: + case EOpConvInt8ToUint64: + + // int16_t -> (u)int* + case EOpConvInt16ToInt8: + case EOpConvInt16ToInt: + case EOpConvInt16ToInt64: + case EOpConvInt16ToUint8: + case EOpConvInt16ToUint16: + case EOpConvInt16ToUint: + case EOpConvInt16ToUint64: + + // int32_t -> (u)int* + case EOpConvIntToInt8: + case EOpConvIntToInt16: + case EOpConvIntToInt64: + case EOpConvIntToUint8: + case EOpConvIntToUint16: + case EOpConvIntToUint: + case EOpConvIntToUint64: + + // int64_t -> (u)int* + case EOpConvInt64ToInt8: + case EOpConvInt64ToInt16: + case EOpConvInt64ToInt: + case EOpConvInt64ToUint8: + case EOpConvInt64ToUint16: + case EOpConvInt64ToUint: + case EOpConvInt64ToUint64: + + // uint8_t -> (u)int* + case EOpConvUint8ToInt8: + case EOpConvUint8ToInt16: + case EOpConvUint8ToInt: + case EOpConvUint8ToInt64: + case EOpConvUint8ToUint16: + case EOpConvUint8ToUint: + case EOpConvUint8ToUint64: + + // uint16_t -> (u)int* + case EOpConvUint16ToInt8: + case EOpConvUint16ToInt16: + case EOpConvUint16ToInt: + case EOpConvUint16ToInt64: + case EOpConvUint16ToUint8: + case EOpConvUint16ToUint: + case EOpConvUint16ToUint64: + + // uint32_t -> (u)int* + case EOpConvUintToInt8: + case EOpConvUintToInt16: + case EOpConvUintToInt: + case EOpConvUintToInt64: + case EOpConvUintToUint8: + case EOpConvUintToUint16: + case EOpConvUintToUint64: + + // uint64_t -> (u)int* + case EOpConvUint64ToInt8: + case EOpConvUint64ToInt16: + case EOpConvUint64ToInt: + case EOpConvUint64ToInt64: + case EOpConvUint64ToUint8: + case EOpConvUint64ToUint16: + case EOpConvUint64ToUint: + + // unary operations + case EOpNegative: + case EOpLogicalNot: + case EOpBitwiseNot: + + // binary operations + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpVectorTimesScalar: + case EOpDiv: + case EOpMod: + case EOpRightShift: + case EOpLeftShift: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpLogicalOr: + case EOpLogicalXor: + case EOpLogicalAnd: + case EOpEqual: + case EOpNotEqual: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + return true; + default: + return false; + } +} + +// Is the operation one that must propagate nonuniform? +bool TIntermediate::isNonuniformPropagating(TOperator op) const +{ + // "* All Operators in Section 5.1 (Operators), except for assignment, + // arithmetic assignment, and sequence + // * Component selection in Section 5.5 + // * Matrix components in Section 5.6 + // * Structure and Array Operations in Section 5.7, except for the length + // method." + switch (op) { + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + + case EOpNegative: + case EOpLogicalNot: + case EOpVectorLogicalNot: + case EOpBitwiseNot: + + case EOpAdd: + case EOpSub: + case EOpMul: + case EOpDiv: + case EOpMod: + case EOpRightShift: + case EOpLeftShift: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpEqual: + case EOpNotEqual: + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + case EOpVectorTimesScalar: + case EOpVectorTimesMatrix: + case EOpMatrixTimesVector: + case EOpMatrixTimesScalar: + + case EOpLogicalOr: + case EOpLogicalXor: + case EOpLogicalAnd: + + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + return true; + + default: + break; + } + + return false; +} + +//////////////////////////////////////////////////////////////// +// +// Member functions of the nodes used for building the tree. +// +//////////////////////////////////////////////////////////////// + +// +// Say whether or not an operation node changes the value of a variable. +// +// Returns true if state is modified. +// +bool TIntermOperator::modifiesState() const +{ + switch (op) { + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpVectorTimesMatrixAssign: + case EOpVectorTimesScalarAssign: + case EOpMatrixTimesScalarAssign: + case EOpMatrixTimesMatrixAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + return true; + default: + return false; + } +} + +// +// returns true if the operator is for one of the constructors +// +bool TIntermOperator::isConstructor() const +{ + return op > EOpConstructGuardStart && op < EOpConstructGuardEnd; +} + +// +// Make sure the type of an operator is appropriate for its +// combination of operation and operand type. This will invoke +// promoteUnary, promoteBinary, etc as needed. +// +// Returns false if nothing makes sense. +// +bool TIntermediate::promote(TIntermOperator* node) +{ + if (node == nullptr) + return false; + + if (node->getAsUnaryNode()) + return promoteUnary(*node->getAsUnaryNode()); + + if (node->getAsBinaryNode()) + return promoteBinary(*node->getAsBinaryNode()); + + if (node->getAsAggregate()) + return promoteAggregate(*node->getAsAggregate()); + + return false; +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteUnary(TIntermUnary& node) +{ + const TOperator op = node.getOp(); + TIntermTyped* operand = node.getOperand(); + + switch (op) { + case EOpLogicalNot: + // Convert operand to a boolean type + if (operand->getBasicType() != EbtBool) { + // Add constructor to boolean type. If that fails, we can't do it, so return false. + TIntermTyped* converted = addConversion(op, TType(EbtBool), operand); + if (converted == nullptr) + return false; + + // Use the result of converting the node to a bool. + node.setOperand(operand = converted); // also updates stack variable + } + break; + case EOpBitwiseNot: + if (!isTypeInt(operand->getBasicType())) + return false; + break; + case EOpNegative: + case EOpPostIncrement: + case EOpPostDecrement: + case EOpPreIncrement: + case EOpPreDecrement: + if (!isTypeInt(operand->getBasicType()) && + operand->getBasicType() != EbtFloat && + operand->getBasicType() != EbtFloat16 && + operand->getBasicType() != EbtDouble) + + return false; + break; + default: + // HLSL uses this path for initial function signature finding for built-ins + // taking a single argument, which generally don't participate in + // operator-based type promotion (type conversion will occur later). + // For now, scalar argument cases are relying on the setType() call below. + if (getSource() == EShSourceHlsl) + break; + + // GLSL only allows integer arguments for the cases identified above in the + // case statements. + if (operand->getBasicType() != EbtFloat) + return false; + } + + node.setType(operand->getType()); + node.getWritableType().getQualifier().makeTemporary(); + + return true; +} + +// Propagate precision qualifiers *up* from children to parent. +void TIntermUnary::updatePrecision() +{ + if (getBasicType() == EbtInt || getBasicType() == EbtUint || + getBasicType() == EbtFloat || getBasicType() == EbtFloat16) { + if (operand->getQualifier().precision > getQualifier().precision) + getQualifier().precision = operand->getQualifier().precision; + } +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteBinary(TIntermBinary& node) +{ + TOperator op = node.getOp(); + TIntermTyped* left = node.getLeft(); + TIntermTyped* right = node.getRight(); + + // Arrays and structures have to be exact matches. + if ((left->isArray() || right->isArray() || left->getBasicType() == EbtStruct || right->getBasicType() == EbtStruct) + && left->getType() != right->getType()) + return false; + + // Base assumption: just make the type the same as the left + // operand. Only deviations from this will be coded. + node.setType(left->getType()); + node.getWritableType().getQualifier().clear(); + + // Composite and opaque types don't having pending operator changes, e.g., + // array, structure, and samplers. Just establish final type and correctness. + if (left->isArray() || left->getBasicType() == EbtStruct || left->getBasicType() == EbtSampler) { + switch (op) { + case EOpEqual: + case EOpNotEqual: + if (left->getBasicType() == EbtSampler) { + // can't compare samplers + return false; + } else { + // Promote to conditional + node.setType(TType(EbtBool)); + } + + return true; + + case EOpAssign: + // Keep type from above + + return true; + + default: + return false; + } + } + + // + // We now have only scalars, vectors, and matrices to worry about. + // + + // HLSL implicitly promotes bool -> int for numeric operations. + // (Implicit conversions to make the operands match each other's types were already done.) + if (getSource() == EShSourceHlsl && + (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool)) { + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpRightShift: + case EOpLeftShift: + + case EOpMod: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMul: + if (left->getBasicType() == EbtBool) + left = createConversion(EbtInt, left); + if (right->getBasicType() == EbtBool) + right = createConversion(EbtInt, right); + if (left == nullptr || right == nullptr) + return false; + node.setLeft(left); + node.setRight(right); + + // Update the original base assumption on result type.. + node.setType(left->getType()); + node.getWritableType().getQualifier().clear(); + + break; + + default: + break; + } + } + + // Do general type checks against individual operands (comparing left and right is coming up, checking mixed shapes after that) + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + // Relational comparisons need numeric types and will promote to scalar Boolean. + if (left->getBasicType() == EbtBool) + return false; + + node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize())); + break; + + case EOpEqual: + case EOpNotEqual: + if (getSource() == EShSourceHlsl) { + const int resultWidth = std::max(left->getVectorSize(), right->getVectorSize()); + + // In HLSL, == or != on vectors means component-wise comparison. + if (resultWidth > 1) { + op = (op == EOpEqual) ? EOpVectorEqual : EOpVectorNotEqual; + node.setOp(op); + } + + node.setType(TType(EbtBool, EvqTemporary, resultWidth)); + } else { + // All the above comparisons result in a bool (but not the vector compares) + node.setType(TType(EbtBool)); + } + break; + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + // logical ops operate only on Booleans or vectors of Booleans. + if (left->getBasicType() != EbtBool || left->isMatrix()) + return false; + + if (getSource() == EShSourceGlsl) { + // logical ops operate only on scalar Booleans and will promote to scalar Boolean. + if (left->isVector()) + return false; + } + + node.setType(TType(EbtBool, EvqTemporary, left->getVectorSize())); + break; + + case EOpRightShift: + case EOpLeftShift: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + + case EOpMod: + case EOpModAssign: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + if (getSource() == EShSourceHlsl) + break; + + // Check for integer-only operands. + if (!isTypeInt(left->getBasicType()) && !isTypeInt(right->getBasicType())) + return false; + if (left->isMatrix() || right->isMatrix()) + return false; + + break; + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMul: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpDivAssign: + // check for non-Boolean operands + if (left->getBasicType() == EbtBool || right->getBasicType() == EbtBool) + return false; + + default: + break; + } + + // Compare left and right, and finish with the cases where the operand types must match + switch (op) { + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + + case EOpEqual: + case EOpNotEqual: + case EOpVectorEqual: + case EOpVectorNotEqual: + + case EOpLogicalAnd: + case EOpLogicalOr: + case EOpLogicalXor: + return left->getType() == right->getType(); + + case EOpMod: + case EOpModAssign: + + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + case EOpAdd: + case EOpSub: + case EOpDiv: + + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + // Quick out in case the types do match + if (left->getType() == right->getType()) + return true; + + // Fall through + + case EOpMul: + case EOpMulAssign: + // At least the basic type has to match + if (left->getBasicType() != right->getBasicType()) + return false; + + default: + break; + } + + if (left->getType().isCoopMat() || right->getType().isCoopMat()) { + if (left->getType().isCoopMat() && right->getType().isCoopMat() && + *left->getType().getTypeParameters() != *right->getType().getTypeParameters()) { + return false; + } + switch (op) { + case EOpMul: + case EOpMulAssign: + if (left->getType().isCoopMat() && right->getType().isCoopMat()) { + return false; + } + if (op == EOpMulAssign && right->getType().isCoopMat()) { + return false; + } + node.setOp(op == EOpMulAssign ? EOpMatrixTimesScalarAssign : EOpMatrixTimesScalar); + if (right->getType().isCoopMat()) { + node.setType(right->getType()); + } + return true; + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpAssign: + // These require both to be cooperative matrices + if (!left->getType().isCoopMat() || !right->getType().isCoopMat()) { + return false; + } + return true; + default: + break; + } + return false; + } + + // Finish handling the case, for all ops, where both operands are scalars. + if (left->isScalar() && right->isScalar()) + return true; + + // Finish handling the case, for all ops, where there are two vectors of different sizes + if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize() && right->getVectorSize() > 1) + return false; + + // + // We now have a mix of scalars, vectors, or matrices, for non-relational operations. + // + + // Can these two operands be combined, what is the resulting type? + TBasicType basicType = left->getBasicType(); + switch (op) { + case EOpMul: + if (!left->isMatrix() && right->isMatrix()) { + if (left->isVector()) { + if (left->getVectorSize() != right->getMatrixRows()) + return false; + node.setOp(op = EOpVectorTimesMatrix); + node.setType(TType(basicType, EvqTemporary, right->getMatrixCols())); + } else { + node.setOp(op = EOpMatrixTimesScalar); + node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), right->getMatrixRows())); + } + } else if (left->isMatrix() && !right->isMatrix()) { + if (right->isVector()) { + if (left->getMatrixCols() != right->getVectorSize()) + return false; + node.setOp(op = EOpMatrixTimesVector); + node.setType(TType(basicType, EvqTemporary, left->getMatrixRows())); + } else { + node.setOp(op = EOpMatrixTimesScalar); + } + } else if (left->isMatrix() && right->isMatrix()) { + if (left->getMatrixCols() != right->getMatrixRows()) + return false; + node.setOp(op = EOpMatrixTimesMatrix); + node.setType(TType(basicType, EvqTemporary, 0, right->getMatrixCols(), left->getMatrixRows())); + } else if (! left->isMatrix() && ! right->isMatrix()) { + if (left->isVector() && right->isVector()) { + ; // leave as component product + } else if (left->isVector() || right->isVector()) { + node.setOp(op = EOpVectorTimesScalar); + if (right->isVector()) + node.setType(TType(basicType, EvqTemporary, right->getVectorSize())); + } + } else { + return false; + } + break; + case EOpMulAssign: + if (! left->isMatrix() && right->isMatrix()) { + if (left->isVector()) { + if (left->getVectorSize() != right->getMatrixRows() || left->getVectorSize() != right->getMatrixCols()) + return false; + node.setOp(op = EOpVectorTimesMatrixAssign); + } else { + return false; + } + } else if (left->isMatrix() && !right->isMatrix()) { + if (right->isVector()) { + return false; + } else { + node.setOp(op = EOpMatrixTimesScalarAssign); + } + } else if (left->isMatrix() && right->isMatrix()) { + if (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixCols() != right->getMatrixRows()) + return false; + node.setOp(op = EOpMatrixTimesMatrixAssign); + } else if (!left->isMatrix() && !right->isMatrix()) { + if (left->isVector() && right->isVector()) { + // leave as component product + } else if (left->isVector() || right->isVector()) { + if (! left->isVector()) + return false; + node.setOp(op = EOpVectorTimesScalarAssign); + } + } else { + return false; + } + break; + + case EOpRightShift: + case EOpLeftShift: + case EOpRightShiftAssign: + case EOpLeftShiftAssign: + if (right->isVector() && (! left->isVector() || right->getVectorSize() != left->getVectorSize())) + return false; + break; + + case EOpAssign: + if (left->getVectorSize() != right->getVectorSize() || left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows()) + return false; + // fall through + + case EOpAdd: + case EOpSub: + case EOpDiv: + case EOpMod: + case EOpAnd: + case EOpInclusiveOr: + case EOpExclusiveOr: + case EOpAddAssign: + case EOpSubAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + + if ((left->isMatrix() && right->isVector()) || + (left->isVector() && right->isMatrix()) || + left->getBasicType() != right->getBasicType()) + return false; + if (left->isMatrix() && right->isMatrix() && (left->getMatrixCols() != right->getMatrixCols() || left->getMatrixRows() != right->getMatrixRows())) + return false; + if (left->isVector() && right->isVector() && left->getVectorSize() != right->getVectorSize()) + return false; + if (right->isVector() || right->isMatrix()) { + node.getWritableType().shallowCopy(right->getType()); + node.getWritableType().getQualifier().makeTemporary(); + } + break; + + default: + return false; + } + + // + // One more check for assignment. + // + switch (op) { + // The resulting type has to match the left operand. + case EOpAssign: + case EOpAddAssign: + case EOpSubAssign: + case EOpMulAssign: + case EOpDivAssign: + case EOpModAssign: + case EOpAndAssign: + case EOpInclusiveOrAssign: + case EOpExclusiveOrAssign: + case EOpLeftShiftAssign: + case EOpRightShiftAssign: + if (node.getType() != left->getType()) + return false; + break; + default: + break; + } + + return true; +} + +// +// See TIntermediate::promote +// +bool TIntermediate::promoteAggregate(TIntermAggregate& node) +{ + TOperator op = node.getOp(); + TIntermSequence& args = node.getSequence(); + const int numArgs = static_cast(args.size()); + + // Presently, only hlsl does intrinsic promotions. + if (getSource() != EShSourceHlsl) + return true; + + // set of opcodes that can be promoted in this manner. + switch (op) { + case EOpAtan: + case EOpClamp: + case EOpCross: + case EOpDistance: + case EOpDot: + case EOpDst: + case EOpFaceForward: + // case EOpFindMSB: TODO: + // case EOpFindLSB: TODO: + case EOpFma: + case EOpMod: + case EOpFrexp: + case EOpLdexp: + case EOpMix: + case EOpLit: + case EOpMax: + case EOpMin: + case EOpModf: + // case EOpGenMul: TODO: + case EOpPow: + case EOpReflect: + case EOpRefract: + // case EOpSinCos: TODO: + case EOpSmoothStep: + case EOpStep: + break; + default: + return true; + } + + // TODO: array and struct behavior + + // Try converting all nodes to the given node's type + TIntermSequence convertedArgs(numArgs, nullptr); + + // Try to convert all types to the nonConvArg type. + for (int nonConvArg = 0; nonConvArg < numArgs; ++nonConvArg) { + // Try converting all args to this arg's type + for (int convArg = 0; convArg < numArgs; ++convArg) { + convertedArgs[convArg] = addConversion(op, args[nonConvArg]->getAsTyped()->getType(), + args[convArg]->getAsTyped()); + } + + // If we successfully converted all the args, use the result. + if (std::all_of(convertedArgs.begin(), convertedArgs.end(), + [](const TIntermNode* node) { return node != nullptr; })) { + + std::swap(args, convertedArgs); + return true; + } + } + + return false; +} + +// Propagate precision qualifiers *up* from children to parent, and then +// back *down* again to the children's subtrees. +void TIntermBinary::updatePrecision() +{ + if (getBasicType() == EbtInt || getBasicType() == EbtUint || + getBasicType() == EbtFloat || getBasicType() == EbtFloat16) { + getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision); + if (getQualifier().precision != EpqNone) { + left->propagatePrecision(getQualifier().precision); + right->propagatePrecision(getQualifier().precision); + } + } +} + +// Recursively propagate precision qualifiers *down* the subtree of the current node, +// until reaching a node that already has a precision qualifier or otherwise does +// not participate in precision propagation. +void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision) +{ + if (getQualifier().precision != EpqNone || + (getBasicType() != EbtInt && getBasicType() != EbtUint && + getBasicType() != EbtFloat && getBasicType() != EbtFloat16)) + return; + + getQualifier().precision = newPrecision; + + TIntermBinary* binaryNode = getAsBinaryNode(); + if (binaryNode) { + binaryNode->getLeft()->propagatePrecision(newPrecision); + binaryNode->getRight()->propagatePrecision(newPrecision); + + return; + } + + TIntermUnary* unaryNode = getAsUnaryNode(); + if (unaryNode) { + unaryNode->getOperand()->propagatePrecision(newPrecision); + + return; + } + + TIntermAggregate* aggregateNode = getAsAggregate(); + if (aggregateNode) { + TIntermSequence operands = aggregateNode->getSequence(); + for (unsigned int i = 0; i < operands.size(); ++i) { + TIntermTyped* typedNode = operands[i]->getAsTyped(); + if (! typedNode) + break; + typedNode->propagatePrecision(newPrecision); + } + + return; + } + + TIntermSelection* selectionNode = getAsSelectionNode(); + if (selectionNode) { + TIntermTyped* typedNode = selectionNode->getTrueBlock()->getAsTyped(); + if (typedNode) { + typedNode->propagatePrecision(newPrecision); + typedNode = selectionNode->getFalseBlock()->getAsTyped(); + if (typedNode) + typedNode->propagatePrecision(newPrecision); + } + + return; + } +} + +TIntermTyped* TIntermediate::promoteConstantUnion(TBasicType promoteTo, TIntermConstantUnion* node) const +{ + const TConstUnionArray& rightUnionArray = node->getConstArray(); + int size = node->getType().computeNumComponents(); + + TConstUnionArray leftUnionArray(size); + + for (int i=0; i < size; i++) { + +#define PROMOTE(Set, CType, Get) leftUnionArray[i].Set(static_cast(rightUnionArray[i].Get())) +#define PROMOTE_TO_BOOL(Get) leftUnionArray[i].setBConst(rightUnionArray[i].Get() != 0) + +#ifdef GLSLANG_WEB +#define TO_ALL(Get) \ + switch (promoteTo) { \ + case EbtFloat: PROMOTE(setDConst, double, Get); break; \ + case EbtInt: PROMOTE(setIConst, int, Get); break; \ + case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \ + case EbtBool: PROMOTE_TO_BOOL(Get); break; \ + default: return node; \ + } +#else +#define TO_ALL(Get) \ + switch (promoteTo) { \ + case EbtFloat16: PROMOTE(setDConst, double, Get); break; \ + case EbtFloat: PROMOTE(setDConst, double, Get); break; \ + case EbtDouble: PROMOTE(setDConst, double, Get); break; \ + case EbtInt8: PROMOTE(setI8Const, char, Get); break; \ + case EbtInt16: PROMOTE(setI16Const, short, Get); break; \ + case EbtInt: PROMOTE(setIConst, int, Get); break; \ + case EbtInt64: PROMOTE(setI64Const, long long, Get); break; \ + case EbtUint8: PROMOTE(setU8Const, unsigned char, Get); break; \ + case EbtUint16: PROMOTE(setU16Const, unsigned short, Get); break; \ + case EbtUint: PROMOTE(setUConst, unsigned int, Get); break; \ + case EbtUint64: PROMOTE(setU64Const, unsigned long long, Get); break; \ + case EbtBool: PROMOTE_TO_BOOL(Get); break; \ + default: return node; \ + } +#endif + + switch (node->getType().getBasicType()) { + case EbtFloat: TO_ALL(getDConst); break; + case EbtInt: TO_ALL(getIConst); break; + case EbtUint: TO_ALL(getUConst); break; + case EbtBool: TO_ALL(getBConst); break; +#ifndef GLSLANG_WEB + case EbtFloat16: TO_ALL(getDConst); break; + case EbtDouble: TO_ALL(getDConst); break; + case EbtInt8: TO_ALL(getI8Const); break; + case EbtInt16: TO_ALL(getI16Const); break; + case EbtInt64: TO_ALL(getI64Const); break; + case EbtUint8: TO_ALL(getU8Const); break; + case EbtUint16: TO_ALL(getU16Const); break; + case EbtUint64: TO_ALL(getU64Const); break; +#endif + default: return node; + } + } + + const TType& t = node->getType(); + + return addConstantUnion(leftUnionArray, TType(promoteTo, t.getQualifier().storage, t.getVectorSize(), t.getMatrixCols(), t.getMatrixRows()), + node->getLoc()); +} + +void TIntermAggregate::setPragmaTable(const TPragmaTable& pTable) +{ + assert(pragmaTable == nullptr); + pragmaTable = new TPragmaTable; + *pragmaTable = pTable; +} + +// If either node is a specialization constant, while the other is +// a constant (or specialization constant), the result is still +// a specialization constant. +bool TIntermediate::specConstantPropagates(const TIntermTyped& node1, const TIntermTyped& node2) +{ + return (node1.getType().getQualifier().isSpecConstant() && node2.getType().getQualifier().isConstant()) || + (node2.getType().getQualifier().isSpecConstant() && node1.getType().getQualifier().isConstant()); +} + +struct TextureUpgradeAndSamplerRemovalTransform : public TIntermTraverser { + void visitSymbol(TIntermSymbol* symbol) override { + if (symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isTexture()) { + symbol->getWritableType().getSampler().setCombined(true); + } + } + bool visitAggregate(TVisit, TIntermAggregate* ag) override { + using namespace std; + TIntermSequence& seq = ag->getSequence(); + TQualifierList& qual = ag->getQualifierList(); + + // qual and seq are indexed using the same indices, so we have to modify both in lock-step + assert(seq.size() == qual.size() || qual.empty()); + + size_t write = 0; + for (size_t i = 0; i < seq.size(); ++i) { + TIntermSymbol* symbol = seq[i]->getAsSymbolNode(); + if (symbol && symbol->getBasicType() == EbtSampler && symbol->getType().getSampler().isPureSampler()) { + // remove pure sampler variables + continue; + } + + TIntermNode* result = seq[i]; + + // replace constructors with sampler/textures + TIntermAggregate *constructor = seq[i]->getAsAggregate(); + if (constructor && constructor->getOp() == EOpConstructTextureSampler) { + if (!constructor->getSequence().empty()) + result = constructor->getSequence()[0]; + } + + // write new node & qualifier + seq[write] = result; + if (!qual.empty()) + qual[write] = qual[i]; + write++; + } + + seq.resize(write); + if (!qual.empty()) + qual.resize(write); + + return true; + } +}; + +void TIntermediate::performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root) +{ + TextureUpgradeAndSamplerRemovalTransform transform; + root->traverse(&transform); +} + +const char* TIntermediate::getResourceName(TResourceType res) +{ + switch (res) { + case EResSampler: return "shift-sampler-binding"; + case EResTexture: return "shift-texture-binding"; + case EResImage: return "shift-image-binding"; + case EResUbo: return "shift-UBO-binding"; + case EResSsbo: return "shift-ssbo-binding"; + case EResUav: return "shift-uav-binding"; + default: + assert(0); // internal error: should only be called with valid resource types. + return nullptr; + } +} + + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/LiveTraverser.h b/src/third_party/glslang/glslang/MachineIndependent/LiveTraverser.h new file mode 100644 index 0000000..9b39b59 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/LiveTraverser.h @@ -0,0 +1,168 @@ +// +// Copyright (C) 2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +#include "../Include/Common.h" +#include "reflection.h" +#include "localintermediate.h" + +#include "gl_types.h" + +#include +#include + +namespace glslang { + +// +// The traverser: mostly pass through, except +// - processing function-call nodes to push live functions onto the stack of functions to process +// - processing selection nodes to trim semantically dead code +// +// This is in the glslang namespace directly so it can be a friend of TReflection. +// This can be derived from to implement reflection database traversers or +// binding mappers: anything that wants to traverse the live subset of the tree. +// + +class TLiveTraverser : public TIntermTraverser { +public: + TLiveTraverser(const TIntermediate& i, bool traverseAll = false, + bool preVisit = true, bool inVisit = false, bool postVisit = false) : + TIntermTraverser(preVisit, inVisit, postVisit), + intermediate(i), traverseAll(traverseAll) + { } + + // + // Given a function name, find its subroot in the tree, and push it onto the stack of + // functions left to process. + // + void pushFunction(const TString& name) + { + TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence(); + for (unsigned int f = 0; f < globals.size(); ++f) { + TIntermAggregate* candidate = globals[f]->getAsAggregate(); + if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) { + destinations.push_back(candidate); + break; + } + } + } + + void pushGlobalReference(const TString& name) + { + TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence(); + for (unsigned int f = 0; f < globals.size(); ++f) { + TIntermAggregate* candidate = globals[f]->getAsAggregate(); + if (candidate && candidate->getOp() == EOpSequence && + candidate->getSequence().size() == 1 && + candidate->getSequence()[0]->getAsBinaryNode()) { + TIntermBinary* binary = candidate->getSequence()[0]->getAsBinaryNode(); + TIntermSymbol* symbol = binary->getLeft()->getAsSymbolNode(); + if (symbol && symbol->getQualifier().storage == EvqGlobal && + symbol->getName() == name) { + destinations.push_back(candidate); + break; + } + } + } + } + + typedef std::list TDestinationStack; + TDestinationStack destinations; + +protected: + // To catch which function calls are not dead, and hence which functions must be visited. + virtual bool visitAggregate(TVisit, TIntermAggregate* node) + { + if (!traverseAll) + if (node->getOp() == EOpFunctionCall) + addFunctionCall(node); + + return true; // traverse this subtree + } + + // To prune semantically dead paths. + virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node) + { + if (traverseAll) + return true; // traverse all code + + TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion(); + if (constant) { + // cull the path that is dead + if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock()) + node->getTrueBlock()->traverse(this); + if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock()) + node->getFalseBlock()->traverse(this); + + return false; // don't traverse any more, we did it all above + } else + return true; // traverse the whole subtree + } + + // Track live functions as well as uniforms, so that we don't visit dead functions + // and only visit each function once. + void addFunctionCall(TIntermAggregate* call) + { + // just use the map to ensure we process each function at most once + if (liveFunctions.find(call->getName()) == liveFunctions.end()) { + liveFunctions.insert(call->getName()); + pushFunction(call->getName()); + } + } + + void addGlobalReference(const TString& name) + { + // just use the map to ensure we process each global at most once + if (liveGlobals.find(name) == liveGlobals.end()) { + liveGlobals.insert(name); + pushGlobalReference(name); + } + } + + const TIntermediate& intermediate; + typedef std::unordered_set TLiveFunctions; + TLiveFunctions liveFunctions; + typedef std::unordered_set TLiveGlobals; + TLiveGlobals liveGlobals; + bool traverseAll; + +private: + // prevent copy & copy construct + TLiveTraverser(TLiveTraverser&); + TLiveTraverser& operator=(TLiveTraverser&); +}; + +} // namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp b/src/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp new file mode 100644 index 0000000..b464009 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/ParseContextBase.cpp @@ -0,0 +1,641 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// Implement the TParseContextBase class. + +#include + +#include "ParseHelper.h" + +extern int yyparse(glslang::TParseContext*); + +namespace glslang { + +// +// Used to output syntax, parsing, and semantic errors. +// + +void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReason, + const char* szToken, + const char* szExtraInfoFormat, + TPrefixType prefix, va_list args) +{ + const int maxSize = MaxTokenLength + 200; + char szExtraInfo[maxSize]; + + safe_vsprintf(szExtraInfo, maxSize, szExtraInfoFormat, args); + + infoSink.info.prefix(prefix); + infoSink.info.location(loc); + infoSink.info << "'" << szToken << "' : " << szReason << " " << szExtraInfo << "\n"; + + if (prefix == EPrefixError) { + ++numErrors; + } +} + +#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL) + +void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + if (messages & EShMsgOnlyPreprocessor) + return; + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args); + va_end(args); + + if ((messages & EShMsgCascadingErrors) == 0) + currentScanner->setEndOfInput(); +} + +void C_DECL TParseContextBase::warn(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + if (suppressWarnings()) + return; + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args); + va_end(args); +} + +void C_DECL TParseContextBase::ppError(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args); + va_end(args); + + if ((messages & EShMsgCascadingErrors) == 0) + currentScanner->setEndOfInput(); +} + +void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) +{ + va_list args; + va_start(args, szExtraInfoFormat); + outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args); + va_end(args); +} + +#endif + +// +// Both test and if necessary, spit out an error, to see if the node is really +// an l-value that can be operated on this way. +// +// Returns true if there was an error. +// +bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + TIntermBinary* binaryNode = node->getAsBinaryNode(); + + if (binaryNode) { + switch(binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: // fall through + case EOpIndexDirectStruct: // fall through + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + return lValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + error(loc, " l-value required", op, "", ""); + + return true; + } + + const char* symbol = nullptr; + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode != nullptr) + symbol = symNode->getName().c_str(); + + const char* message = nullptr; + switch (node->getQualifier().storage) { + case EvqConst: message = "can't modify a const"; break; + case EvqConstReadOnly: message = "can't modify a const"; break; + case EvqUniform: message = "can't modify a uniform"; break; +#ifndef GLSLANG_WEB + case EvqBuffer: + if (node->getQualifier().isReadOnly()) + message = "can't modify a readonly buffer"; + if (node->getQualifier().isShaderRecord()) + message = "can't modify a shaderrecordnv qualified buffer"; + break; + case EvqHitAttr: + if (language != EShLangIntersect) + message = "cannot modify hitAttributeNV in this stage"; + break; +#endif + + default: + // + // Type that can't be written to? + // + switch (node->getBasicType()) { + case EbtSampler: + message = "can't modify a sampler"; + break; + case EbtVoid: + message = "can't modify void"; + break; +#ifndef GLSLANG_WEB + case EbtAtomicUint: + message = "can't modify an atomic_uint"; + break; + case EbtAccStruct: + message = "can't modify accelerationStructureNV"; + break; + case EbtRayQuery: + message = "can't modify rayQueryEXT"; + break; +#endif + default: + break; + } + } + + if (message == nullptr && binaryNode == nullptr && symNode == nullptr) { + error(loc, " l-value required", op, "", ""); + + return true; + } + + // + // Everything else is okay, no error. + // + if (message == nullptr) + return false; + + // + // If we get here, we have an error and a message. + // + if (symNode) + error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message); + else + error(loc, " l-value required", op, "(%s)", message); + + return true; +} + +// Test for and give an error if the node can't be read from. +void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + if (! node) + return; + + TIntermBinary* binaryNode = node->getAsBinaryNode(); + if (binaryNode) { + switch(binaryNode->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + case EOpVectorSwizzle: + case EOpMatrixSwizzle: + rValueErrorCheck(loc, op, binaryNode->getLeft()); + default: + break; + } + + return; + } + + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode && symNode->getQualifier().isWriteOnly()) + error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str()); +} + +// Add 'symbol' to the list of deferred linkage symbols, which +// are later processed in finish(), at which point the symbol +// must still be valid. +// It is okay if the symbol's type will be subsequently edited; +// the modifications will be tracked. +// Order is preserved, to avoid creating novel forward references. +void TParseContextBase::trackLinkage(TSymbol& symbol) +{ + if (!parsingBuiltins) + linkageSymbols.push_back(&symbol); +} + +// Ensure index is in bounds, correct if necessary. +// Give an error if not. +void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index) +{ + const auto sizeIsSpecializationExpression = [&type]() { + return type.containsSpecializationSize() && + type.getArraySizes()->getOuterNode() != nullptr && + type.getArraySizes()->getOuterNode()->getAsSymbolNode() == nullptr; }; + + if (index < 0) { + error(loc, "", "[", "index out of range '%d'", index); + index = 0; + } else if (type.isArray()) { + if (type.isSizedArray() && !sizeIsSpecializationExpression() && + index >= type.getOuterArraySize()) { + error(loc, "", "[", "array index out of range '%d'", index); + index = type.getOuterArraySize() - 1; + } + } else if (type.isVector()) { + if (index >= type.getVectorSize()) { + error(loc, "", "[", "vector index out of range '%d'", index); + index = type.getVectorSize() - 1; + } + } else if (type.isMatrix()) { + if (index >= type.getMatrixCols()) { + error(loc, "", "[", "matrix index out of range '%d'", index); + index = type.getMatrixCols() - 1; + } + } +} + +// Make a shared symbol have a non-shared version that can be edited by the current +// compile, such that editing its type will not change the shared version and will +// effect all nodes already sharing it (non-shallow type), +// or adopting its full type after being edited (shallow type). +void TParseContextBase::makeEditable(TSymbol*& symbol) +{ + // copyUp() does a deep copy of the type. + symbol = symbolTable.copyUp(symbol); + + // Save it (deferred, so it can be edited first) in the AST for linker use. + if (symbol) + trackLinkage(*symbol); +} + +// Return a writable version of the variable 'name'. +// +// Return nullptr if 'name' is not found. This should mean +// something is seriously wrong (e.g., compiler asking self for +// built-in that doesn't exist). +TVariable* TParseContextBase::getEditableVariable(const char* name) +{ + bool builtIn; + TSymbol* symbol = symbolTable.find(name, &builtIn); + + assert(symbol != nullptr); + if (symbol == nullptr) + return nullptr; + + if (builtIn) + makeEditable(symbol); + + return symbol->getAsVariable(); +} + +// Select the best matching function for 'call' from 'candidateList'. +// +// Assumptions +// +// There is no exact match, so a selection algorithm needs to run. That is, the +// language-specific handler should check for exact match first, to +// decide what to do, before calling this selector. +// +// Input +// +// * list of candidate signatures to select from +// * the call +// * a predicate function convertible(from, to) that says whether or not type +// 'from' can implicitly convert to type 'to' (it includes the case of what +// the calling language would consider a matching type with no conversion +// needed) +// * a predicate function better(from1, from2, to1, to2) that says whether or +// not a conversion from <-> to2 is considered better than a conversion +// from <-> to1 (both in and out directions need testing, as declared by the +// formal parameter) +// +// Output +// +// * best matching candidate (or none, if no viable candidates found) +// * whether there was a tie for the best match (ambiguous overload selection, +// caller's choice for how to report) +// +const TFunction* TParseContextBase::selectFunction( + const TVector candidateList, + const TFunction& call, + std::function convertible, + std::function better, + /* output */ bool& tie) +{ +// +// Operation +// +// 1. Prune the input list of candidates down to a list of viable candidates, +// where each viable candidate has +// +// * at least as many parameters as there are calling arguments, with any +// remaining parameters being optional or having default values +// * each parameter is true under convertible(A, B), where A is the calling +// type for in and B is the formal type, and in addition, for out B is the +// calling type and A is the formal type +// +// 2. If there are no viable candidates, return with no match. +// +// 3. If there is only one viable candidate, it is the best match. +// +// 4. If there are multiple viable candidates, select the first viable candidate +// as the incumbent. Compare the incumbent to the next viable candidate, and if +// that candidate is better (bullets below), make it the incumbent. Repeat, with +// a linear walk through the viable candidate list. The final incumbent will be +// returned as the best match. A viable candidate is better than the incumbent if +// +// * it has a function argument with a better(...) conversion than the incumbent, +// for all directions needed by in and out +// * the incumbent has no argument with a better(...) conversion then the +// candidate, for either in or out (as needed) +// +// 5. Check for ambiguity by comparing the best match against all other viable +// candidates. If any other viable candidate has a function argument with a +// better(...) conversion than the best candidate (for either in or out +// directions), return that there was a tie for best. +// + + tie = false; + + // 1. prune to viable... + TVector viableCandidates; + for (auto it = candidateList.begin(); it != candidateList.end(); ++it) { + const TFunction& candidate = *(*it); + + // to even be a potential match, number of arguments must be >= the number of + // fixed (non-default) parameters, and <= the total (including parameter with defaults). + if (call.getParamCount() < candidate.getFixedParamCount() || + call.getParamCount() > candidate.getParamCount()) + continue; + + // see if arguments are convertible + bool viable = true; + + // The call can have fewer parameters than the candidate, if some have defaults. + const int paramCount = std::min(call.getParamCount(), candidate.getParamCount()); + for (int param = 0; param < paramCount; ++param) { + if (candidate[param].type->getQualifier().isParamInput()) { + if (! convertible(*call[param].type, *candidate[param].type, candidate.getBuiltInOp(), param)) { + viable = false; + break; + } + } + if (candidate[param].type->getQualifier().isParamOutput()) { + if (! convertible(*candidate[param].type, *call[param].type, candidate.getBuiltInOp(), param)) { + viable = false; + break; + } + } + } + + if (viable) + viableCandidates.push_back(&candidate); + } + + // 2. none viable... + if (viableCandidates.size() == 0) + return nullptr; + + // 3. only one viable... + if (viableCandidates.size() == 1) + return viableCandidates.front(); + + // 4. find best... + const auto betterParam = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool { + // is call -> can2 better than call -> can1 for any parameter + bool hasBetterParam = false; + for (int param = 0; param < call.getParamCount(); ++param) { + if (better(*call[param].type, *can1[param].type, *can2[param].type)) { + hasBetterParam = true; + break; + } + } + return hasBetterParam; + }; + + const auto equivalentParams = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool { + // is call -> can2 equivalent to call -> can1 for all the call parameters? + for (int param = 0; param < call.getParamCount(); ++param) { + if (better(*call[param].type, *can1[param].type, *can2[param].type) || + better(*call[param].type, *can2[param].type, *can1[param].type)) + return false; + } + return true; + }; + + const TFunction* incumbent = viableCandidates.front(); + for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) { + const TFunction& candidate = *(*it); + if (betterParam(*incumbent, candidate) && ! betterParam(candidate, *incumbent)) + incumbent = &candidate; + } + + // 5. ambiguity... + for (auto it = viableCandidates.begin(); it != viableCandidates.end(); ++it) { + if (incumbent == *it) + continue; + const TFunction& candidate = *(*it); + + // In the case of default parameters, it may have an identical initial set, which is + // also ambiguous + if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate)) + tie = true; + } + + return incumbent; +} + +// +// Look at a '.' field selector string and change it into numerical selectors +// for a vector or scalar. +// +// Always return some form of swizzle, so the result is always usable. +// +void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TString& compString, int vecSize, + TSwizzleSelectors& selector) +{ + // Too long? + if (compString.size() > MaxSwizzleSelectors) + error(loc, "vector swizzle too long", compString.c_str(), ""); + + // Use this to test that all swizzle characters are from the same swizzle-namespace-set + enum { + exyzw, + ergba, + estpq, + } fieldSet[MaxSwizzleSelectors]; + + // Decode the swizzle string. + int size = std::min(MaxSwizzleSelectors, (int)compString.size()); + for (int i = 0; i < size; ++i) { + switch (compString[i]) { + case 'x': + selector.push_back(0); + fieldSet[i] = exyzw; + break; + case 'r': + selector.push_back(0); + fieldSet[i] = ergba; + break; + case 's': + selector.push_back(0); + fieldSet[i] = estpq; + break; + + case 'y': + selector.push_back(1); + fieldSet[i] = exyzw; + break; + case 'g': + selector.push_back(1); + fieldSet[i] = ergba; + break; + case 't': + selector.push_back(1); + fieldSet[i] = estpq; + break; + + case 'z': + selector.push_back(2); + fieldSet[i] = exyzw; + break; + case 'b': + selector.push_back(2); + fieldSet[i] = ergba; + break; + case 'p': + selector.push_back(2); + fieldSet[i] = estpq; + break; + + case 'w': + selector.push_back(3); + fieldSet[i] = exyzw; + break; + case 'a': + selector.push_back(3); + fieldSet[i] = ergba; + break; + case 'q': + selector.push_back(3); + fieldSet[i] = estpq; + break; + + default: + error(loc, "unknown swizzle selection", compString.c_str(), ""); + break; + } + } + + // Additional error checking. + for (int i = 0; i < selector.size(); ++i) { + if (selector[i] >= vecSize) { + error(loc, "vector swizzle selection out of range", compString.c_str(), ""); + selector.resize(i); + break; + } + + if (i > 0 && fieldSet[i] != fieldSet[i-1]) { + error(loc, "vector swizzle selectors not from the same set", compString.c_str(), ""); + selector.resize(i); + break; + } + } + + // Ensure it is valid. + if (selector.size() == 0) + selector.push_back(0); +} + +#ifdef ENABLE_HLSL +// +// Make the passed-in variable information become a member of the +// global uniform block. If this doesn't exist yet, make it. +// +void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList) +{ + // Make the global block, if not yet made. + if (globalUniformBlock == nullptr) { + TQualifier blockQualifier; + blockQualifier.clear(); + blockQualifier.storage = EvqUniform; + TType blockType(new TTypeList, *NewPoolTString(getGlobalUniformBlockName()), blockQualifier); + setUniformBlockDefaults(blockType); + globalUniformBlock = new TVariable(NewPoolTString(""), blockType, true); + firstNewMember = 0; + } + + // Update with binding and set + globalUniformBlock->getWritableType().getQualifier().layoutBinding = globalUniformBinding; + globalUniformBlock->getWritableType().getQualifier().layoutSet = globalUniformSet; + + // Add the requested member as a member to the global block. + TType* type = new TType; + type->shallowCopy(memberType); + type->setFieldName(memberName); + if (typeList) + type->setStruct(typeList); + TTypeLoc typeLoc = {type, loc}; + globalUniformBlock->getType().getWritableStruct()->push_back(typeLoc); + + // Insert into the symbol table. + if (firstNewMember == 0) { + // This is the first request; we need a normal symbol table insert + if (symbolTable.insert(*globalUniformBlock)) + trackLinkage(*globalUniformBlock); + else + error(loc, "failed to insert the global constant buffer", "uniform", ""); + } else { + // This is a follow-on request; we need to amend the first insert + symbolTable.amend(*globalUniformBlock, firstNewMember); + } + + ++firstNewMember; +} +#endif + +void TParseContextBase::finish() +{ + if (parsingBuiltins) + return; + + // Transfer the linkage symbols to AST nodes, preserving order. + TIntermAggregate* linkage = new TIntermAggregate; + for (auto i = linkageSymbols.begin(); i != linkageSymbols.end(); ++i) + intermediate.addSymbolLinkageNode(linkage, **i); + intermediate.addSymbolLinkageNodes(linkage, getLanguage(), symbolTable); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp b/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp new file mode 100644 index 0000000..86a5a37 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.cpp @@ -0,0 +1,8626 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2015 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// Copyright (C) 2017, 2019 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "ParseHelper.h" +#include "Scan.h" + +#include "../OSDependent/osinclude.h" +#include + +#include "preprocessor/PpContext.h" + +extern int yyparse(glslang::TParseContext*); + +namespace glslang { + +TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, + int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + TInfoSink& infoSink, bool forwardCompatible, EShMessages messages, + const TString* entryPoint) : + TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, + infoSink, forwardCompatible, messages, entryPoint), + inMain(false), + blockName(nullptr), + limits(resources.limits) +#ifndef GLSLANG_WEB + , + atomicUintOffsets(nullptr), anyIndexLimits(false) +#endif +{ + // decide whether precision qualifiers should be ignored or respected + if (isEsProfile() || spvVersion.vulkan > 0) { + precisionManager.respectPrecisionQualifiers(); + if (! parsingBuiltins && language == EShLangFragment && !isEsProfile() && spvVersion.vulkan > 0) + precisionManager.warnAboutDefaults(); + } + + setPrecisionDefaults(); + + globalUniformDefaults.clear(); + globalUniformDefaults.layoutMatrix = ElmColumnMajor; + globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared; + + globalBufferDefaults.clear(); + globalBufferDefaults.layoutMatrix = ElmColumnMajor; + globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared; + + // use storage buffer on SPIR-V 1.3 and up + if (spvVersion.spv >= EShTargetSpv_1_3) + intermediate.setUseStorageBuffer(); + + globalInputDefaults.clear(); + globalOutputDefaults.clear(); + +#ifndef GLSLANG_WEB + // "Shaders in the transform + // feedback capturing mode have an initial global default of + // layout(xfb_buffer = 0) out;" + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry) + globalOutputDefaults.layoutXfbBuffer = 0; + + if (language == EShLangGeometry) + globalOutputDefaults.layoutStream = 0; +#endif + + if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main") + infoSink.info.message(EPrefixError, "Source entry point must be \"main\""); +} + +TParseContext::~TParseContext() +{ +#ifndef GLSLANG_WEB + delete [] atomicUintOffsets; +#endif +} + +// Set up all default precisions as needed by the current environment. +// Intended just as a TParseContext constructor helper. +void TParseContext::setPrecisionDefaults() +{ + // Set all precision defaults to EpqNone, which is correct for all types + // when not obeying precision qualifiers, and correct for types that don't + // have defaults (thus getting an error on use) when obeying precision + // qualifiers. + + for (int type = 0; type < EbtNumTypes; ++type) + defaultPrecision[type] = EpqNone; + + for (int type = 0; type < maxSamplerIndex; ++type) + defaultSamplerPrecision[type] = EpqNone; + + // replace with real precision defaults for those that have them + if (obeyPrecisionQualifiers()) { + if (isEsProfile()) { + // Most don't have defaults, a few default to lowp. + TSampler sampler; + sampler.set(EbtFloat, Esd2D); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + sampler.set(EbtFloat, EsdCube); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + sampler.set(EbtFloat, Esd2D); + sampler.setExternal(true); + defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; + } + + // If we are parsing built-in computational variables/functions, it is meaningful to record + // whether the built-in has no precision qualifier, as that ambiguity + // is used to resolve the precision from the supplied arguments/operands instead. + // So, we don't actually want to replace EpqNone with a default precision for built-ins. + if (! parsingBuiltins) { + if (isEsProfile() && language == EShLangFragment) { + defaultPrecision[EbtInt] = EpqMedium; + defaultPrecision[EbtUint] = EpqMedium; + } else { + defaultPrecision[EbtInt] = EpqHigh; + defaultPrecision[EbtUint] = EpqHigh; + defaultPrecision[EbtFloat] = EpqHigh; + } + + if (!isEsProfile()) { + // Non-ES profile + // All sampler precisions default to highp. + for (int type = 0; type < maxSamplerIndex; ++type) + defaultSamplerPrecision[type] = EpqHigh; + } + } + + defaultPrecision[EbtSampler] = EpqLow; + defaultPrecision[EbtAtomicUint] = EpqHigh; + } +} + +void TParseContext::setLimits(const TBuiltInResource& r) +{ + resources = r; + intermediate.setLimits(r); + +#ifndef GLSLANG_WEB + anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing || + ! limits.generalConstantMatrixVectorIndexing || + ! limits.generalSamplerIndexing || + ! limits.generalUniformIndexing || + ! limits.generalVariableIndexing || + ! limits.generalVaryingIndexing; + + + // "Each binding point tracks its own current default offset for + // inheritance of subsequent variables using the same binding. The initial state of compilation is that all + // binding points have an offset of 0." + atomicUintOffsets = new int[resources.maxAtomicCounterBindings]; + for (int b = 0; b < resources.maxAtomicCounterBindings; ++b) + atomicUintOffsets[b] = 0; +#endif +} + +// +// Parse an array of strings using yyparse, going through the +// preprocessor to tokenize the shader strings, then through +// the GLSL scanner. +// +// Returns true for successful acceptance of the shader, false if any errors. +// +bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError) +{ + currentScanner = &input; + ppContext.setInput(input, versionWillBeError); + yyparse(this); + + finish(); + + return numErrors == 0; +} + +// This is called from bison when it has a parse (syntax) error +// Note though that to stop cascading errors, we set EOF, which +// will usually cause a syntax error, so be more accurate that +// compilation is terminating. +void TParseContext::parserError(const char* s) +{ + if (! getScanner()->atEndOfInput() || numErrors == 0) + error(getCurrentLoc(), "", "", s, ""); + else + error(getCurrentLoc(), "compilation terminated", "", ""); +} + +void TParseContext::handlePragma(const TSourceLoc& loc, const TVector& tokens) +{ +#ifndef GLSLANG_WEB + if (pragmaCallback) + pragmaCallback(loc.line, tokens); + + if (tokens.size() == 0) + return; + + if (tokens[0].compare("optimize") == 0) { + if (tokens.size() != 4) { + error(loc, "optimize pragma syntax is incorrect", "#pragma", ""); + return; + } + + if (tokens[1].compare("(") != 0) { + error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", ""); + return; + } + + if (tokens[2].compare("on") == 0) + contextPragma.optimize = true; + else if (tokens[2].compare("off") == 0) + contextPragma.optimize = false; + else { + if(relaxedErrors()) + // If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma. + warn(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", ""); + return; + } + + if (tokens[3].compare(")") != 0) { + error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", ""); + return; + } + } else if (tokens[0].compare("debug") == 0) { + if (tokens.size() != 4) { + error(loc, "debug pragma syntax is incorrect", "#pragma", ""); + return; + } + + if (tokens[1].compare("(") != 0) { + error(loc, "\"(\" expected after 'debug' keyword", "#pragma", ""); + return; + } + + if (tokens[2].compare("on") == 0) + contextPragma.debug = true; + else if (tokens[2].compare("off") == 0) + contextPragma.debug = false; + else { + if(relaxedErrors()) + // If an implementation does not recognize the tokens following #pragma, then it will ignore that pragma. + warn(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", ""); + return; + } + + if (tokens[3].compare(")") != 0) { + error(loc, "\")\" expected to end 'debug' pragma", "#pragma", ""); + return; + } + } else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + intermediate.setUseStorageBuffer(); + } else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + intermediate.setUseVulkanMemoryModel(); + } else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) { + if (tokens.size() != 1) + error(loc, "extra tokens", "#pragma", ""); + if (spvVersion.spv < glslang::EShTargetSpv_1_3) + error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", ""); + intermediate.setUseVariablePointers(); + } else if (tokens[0].compare("once") == 0) { + warn(loc, "not implemented", "#pragma once", ""); + } else if (tokens[0].compare("glslang_binary_double_output") == 0) + intermediate.setBinaryDoubleOutput(); +#endif +} + +// +// Handle seeing a variable identifier in the grammar. +// +TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string) +{ + TIntermTyped* node = nullptr; + + // Error check for requiring specific extensions present. + if (symbol && symbol->getNumExtensions()) + requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str()); + +#ifndef GLSLANG_WEB + if (symbol && symbol->isReadOnly()) { + // All shared things containing an unsized array must be copied up + // on first use, so that all future references will share its array structure, + // so that editing the implicit size will effect all nodes consuming it, + // and so that editing the implicit size won't change the shared one. + // + // If this is a variable or a block, check it and all it contains, but if this + // is a member of an anonymous block, check the whole block, as the whole block + // will need to be copied up if it contains an unsized array. + // + // This check is being done before the block-name check further down, so guard + // for that too. + if (!symbol->getType().isUnusableName()) { + if (symbol->getType().containsUnsizedArray() || + (symbol->getAsAnonMember() && + symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray())) + makeEditable(symbol); + } + } +#endif + + const TVariable* variable; + const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr; + if (anon) { + // It was a member of an anonymous container. + + // Create a subtree for its dereference. + variable = anon->getAnonContainer().getAsVariable(); + TIntermTyped* container = intermediate.addSymbol(*variable, loc); + TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc); + node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc); + + node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type); + if (node->getType().hiddenMember()) + error(loc, "member of nameless block was not redeclared", string->c_str(), ""); + } else { + // Not a member of an anonymous container. + + // The symbol table search was done in the lexical phase. + // See if it was a variable. + variable = symbol ? symbol->getAsVariable() : nullptr; + if (variable) { + if (variable->getType().isUnusableName()) { + error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), ""); + variable = nullptr; + } + } else { + if (symbol) + error(loc, "variable name expected", string->c_str(), ""); + } + + // Recovery, if it wasn't found or was not a variable. + if (! variable) + variable = new TVariable(string, TType(EbtVoid)); + + if (variable->getType().getQualifier().isFrontEndConstant()) + node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc); + else + node = intermediate.addSymbol(*variable, loc); + } + + if (variable->getType().getQualifier().isIo()) + intermediate.addIoAccessed(*string); + + if (variable->getType().isReference() && + variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) { + intermediate.setUseVulkanMemoryModel(); + } + + return node; +} + +// +// Handle seeing a base[index] dereference in the grammar. +// +TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index) +{ + int indexValue = 0; + if (index->getQualifier().isFrontEndConstant()) + indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst(); + + // basic type checks... + variableCheck(base); + + if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() && + ! base->isReference()) { + if (base->getAsSymbolNode()) + error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), ""); + else + error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", ""); + + // Insert dummy error-recovery result + return intermediate.addConstantUnion(0.0, EbtFloat, loc); + } + + if (!base->isArray() && base->isVector()) { + if (base->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16"); + if (base->getType().contains16BitInt()) + requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16"); + if (base->getType().contains8BitInt()) + requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8"); + } + + // check for constant folding + if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) { + // both base and index are front-end constants + checkIndex(loc, base->getType(), indexValue); + return intermediate.foldDereference(base, indexValue, loc); + } + + // at least one of base and index is not a front-end constant variable... + TIntermTyped* result = nullptr; + +#ifndef GLSLANG_WEB + if (base->isReference() && ! base->isArray()) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing"); + if (base->getType().getReferentType()->containsUnsizedArray()) { + error(loc, "cannot index reference to buffer containing an unsized array", "", ""); + result = nullptr; + } else { + result = intermediate.addBinaryMath(EOpAdd, base, index, loc); + if (result != nullptr) + result->setType(base->getType()); + } + if (result == nullptr) { + error(loc, "cannot index buffer reference", "", ""); + result = intermediate.addConstantUnion(0.0, EbtFloat, loc); + } + return result; + } + if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) + handleIoResizeArrayAccess(loc, base); +#endif + + if (index->getQualifier().isFrontEndConstant()) + checkIndex(loc, base->getType(), indexValue); + + if (index->getQualifier().isFrontEndConstant()) { +#ifndef GLSLANG_WEB + if (base->getType().isUnsizedArray()) { + base->getWritableType().updateImplicitArraySize(indexValue + 1); + // For 2D per-view builtin arrays, update the inner dimension size in parent type + if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) { + TIntermBinary* binaryNode = base->getAsBinaryNode(); + if (binaryNode) { + TType& leftType = binaryNode->getLeft()->getWritableType(); + TArraySizes& arraySizes = *leftType.getArraySizes(); + assert(arraySizes.getNumDims() == 2); + arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1)); + } + } + } else +#endif + checkIndex(loc, base->getType(), indexValue); + result = intermediate.addIndex(EOpIndexDirect, base, index, loc); + } else { +#ifndef GLSLANG_WEB + if (base->getType().isUnsizedArray()) { + // we have a variable index into an unsized array, which is okay, + // depending on the situation + if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) + error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable"); + else { + // it is okay for a run-time sized array + checkRuntimeSizable(loc, *base); + } + base->getWritableType().setArrayVariablyIndexed(); + } +#endif + if (base->getBasicType() == EbtBlock) { + if (base->getQualifier().storage == EvqBuffer) + requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array"); + else if (base->getQualifier().storage == EvqUniform) + profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "variable indexing uniform block array"); + else { + // input/output blocks either don't exist or can't be variably indexed + } + } else if (language == EShLangFragment && base->getQualifier().isPipeOutput()) + requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array"); + else if (base->getBasicType() == EbtSampler && version >= 130) { + const char* explanation = "variable indexing sampler array"; + requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation); + profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation); + profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation); + } + + result = intermediate.addIndex(EOpIndexIndirect, base, index, loc); + } + + // Insert valid dereferenced result type + TType newType(base->getType(), 0); + if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) { + newType.getQualifier().storage = EvqConst; + // If base or index is a specialization constant, the result should also be a specialization constant. + if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) { + newType.getQualifier().makeSpecConstant(); + } + } else { + newType.getQualifier().storage = EvqTemporary; + newType.getQualifier().specConstant = false; + } + result->setType(newType); + +#ifndef GLSLANG_WEB + inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); + + // Propagate nonuniform + if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform()) + result->getWritableType().getQualifier().nonUniform = true; + + if (anyIndexLimits) + handleIndexLimits(loc, base, index); +#endif + + return result; +} + +#ifndef GLSLANG_WEB + +// for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms +void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index) +{ + if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) || + (! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) || + (! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) || + (! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) || + (! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() && + ! base->getType().getQualifier().isPipeInput() && + ! base->getType().getQualifier().isPipeOutput() && + ! base->getType().getQualifier().isConstant()) || + (! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() || + base->getType().getQualifier().isPipeOutput()))) { + // it's too early to know what the inductive variables are, save it for post processing + needsIndexLimitationChecking.push_back(index); + } +} + +// Make a shared symbol have a non-shared version that can be edited by the current +// compile, such that editing its type will not change the shared version and will +// effect all nodes sharing it. +void TParseContext::makeEditable(TSymbol*& symbol) +{ + TParseContextBase::makeEditable(symbol); + + // See if it's tied to IO resizing + if (isIoResizeArray(symbol->getType())) + ioArraySymbolResizeList.push_back(symbol); +} + +// Return true if this is a geometry shader input array or tessellation control output array +// or mesh shader output array. +bool TParseContext::isIoResizeArray(const TType& type) const +{ + return type.isArray() && + ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) || + (language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut && + ! type.getQualifier().patch) || + (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn && + type.getQualifier().pervertexNV) || + (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut && + !type.getQualifier().perTaskNV)); +} + +// If an array is not isIoResizeArray() but is an io array, make sure it has the right size +void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type) +{ + if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel()) + return; + + assert(! isIoResizeArray(type)); + + if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch) + return; + + if (language == EShLangTessControl || language == EShLangTessEvaluation) { + if (type.getOuterArraySize() != resources.maxPatchVertices) { + if (type.isSizedArray()) + error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", ""); + type.changeOuterArraySize(resources.maxPatchVertices); + } + } +} + +// Issue any errors if the non-array object is missing arrayness WRT +// shader I/O that has array requirements. +// All arrayness checking is handled in array paths, this is for +void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (! type.isArray() && ! symbolTable.atBuiltInLevel()) { + if (type.getQualifier().isArrayedIo(language) && !type.getQualifier().layoutPassthrough) + error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str()); + } +} + +// Handle a dereference of a geometry shader input array or tessellation control output array. +// See ioArraySymbolResizeList comment in ParseHelper.h. +// +void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base) +{ + TIntermSymbol* symbolNode = base->getAsSymbolNode(); + assert(symbolNode); + if (! symbolNode) + return; + + // fix array size, if it can be fixed and needs to be fixed (will allow variable indexing) + if (symbolNode->getType().isUnsizedArray()) { + int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier()); + if (newSize > 0) + symbolNode->getWritableType().changeOuterArraySize(newSize); + } +} + +// If there has been an input primitive declaration (geometry shader) or an output +// number of vertices declaration(tessellation shader), make sure all input array types +// match it in size. Types come either from nodes in the AST or symbols in the +// symbol table. +// +// Types without an array size will be given one. +// Types already having a size that is wrong will get an error. +// +void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly) +{ + int requiredSize = 0; + TString featureString; + size_t listSize = ioArraySymbolResizeList.size(); + size_t i = 0; + + // If tailOnly = true, only check the last array symbol in the list. + if (tailOnly) { + i = listSize - 1; + } + for (bool firstIteration = true; i < listSize; ++i) { + TType &type = ioArraySymbolResizeList[i]->getWritableType(); + + // As I/O array sizes don't change, fetch requiredSize only once, + // except for mesh shaders which could have different I/O array sizes based on type qualifiers. + if (firstIteration || (language == EShLangMeshNV)) { + requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString); + if (requiredSize == 0) + break; + firstIteration = false; + } + + checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type, + ioArraySymbolResizeList[i]->getName()); + } +} + +int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const +{ + int expectedSize = 0; + TString str = "unknown"; + unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0; + + if (language == EShLangGeometry) { + expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive()); + str = TQualifier::getGeometryString(intermediate.getInputPrimitive()); + } + else if (language == EShLangTessControl) { + expectedSize = maxVertices; + str = "vertices"; + } else if (language == EShLangFragment) { + // Number of vertices for Fragment shader is always three. + expectedSize = 3; + str = "vertices"; + } else if (language == EShLangMeshNV) { + unsigned int maxPrimitives = + intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0; + if (qualifier.builtIn == EbvPrimitiveIndicesNV) { + expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive()); + str = "max_primitives*"; + str += TQualifier::getGeometryString(intermediate.getOutputPrimitive()); + } + else if (qualifier.isPerPrimitive()) { + expectedSize = maxPrimitives; + str = "max_primitives"; + } + else { + expectedSize = maxVertices; + str = "max_vertices"; + } + } + if (featureString) + *featureString = str; + return expectedSize; +} + +void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name) +{ + if (type.isUnsizedArray()) + type.changeOuterArraySize(requiredSize); + else if (type.getOuterArraySize() != requiredSize) { + if (language == EShLangGeometry) + error(loc, "inconsistent input primitive for array size of", feature, name.c_str()); + else if (language == EShLangTessControl) + error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str()); + else if (language == EShLangFragment) { + if (type.getOuterArraySize() > requiredSize) + error(loc, " cannot be greater than 3 for pervertexNV", feature, name.c_str()); + } + else if (language == EShLangMeshNV) + error(loc, "inconsistent output array size of", feature, name.c_str()); + else + assert(0); + } +} + +#endif // GLSLANG_WEB + +// Handle seeing a binary node with a math operation. +// Returns nullptr if not semantically allowed. +TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right) +{ + rValueErrorCheck(loc, str, left->getAsTyped()); + rValueErrorCheck(loc, str, right->getAsTyped()); + + bool allowed = true; + switch (op) { + // TODO: Bring more source language-specific checks up from intermediate.cpp + // to the specific parse helpers for that source language. + case EOpLessThan: + case EOpGreaterThan: + case EOpLessThanEqual: + case EOpGreaterThanEqual: + if (! left->isScalar() || ! right->isScalar()) + allowed = false; + break; + default: + break; + } + + if (((left->getType().contains16BitFloat() || right->getType().contains16BitFloat()) && !float16Arithmetic()) || + ((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) || + ((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) { + allowed = false; + } + + TIntermTyped* result = nullptr; + if (allowed) { + if ((left->isReference() || right->isReference())) + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference math"); + result = intermediate.addBinaryMath(op, left, right, loc); + } + + if (result == nullptr) + binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString()); + + return result; +} + +// Handle seeing a unary node with a math operation. +TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode) +{ + rValueErrorCheck(loc, str, childNode); + + bool allowed = true; + if ((childNode->getType().contains16BitFloat() && !float16Arithmetic()) || + (childNode->getType().contains16BitInt() && !int16Arithmetic()) || + (childNode->getType().contains8BitInt() && !int8Arithmetic())) { + allowed = false; + } + + TIntermTyped* result = nullptr; + if (allowed) + result = intermediate.addUnaryMath(op, childNode, loc); + + if (result) + return result; + else + unaryOpError(loc, str, childNode->getCompleteString()); + + return childNode; +} + +// +// Handle seeing a base.field dereference in the grammar. +// +TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field) +{ + variableCheck(base); + + // + // .length() can't be resolved until we later see the function-calling syntax. + // Save away the name in the AST for now. Processing is completed in + // handleLengthMethod(). + // + if (field == "length") { + if (base->isArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length"); + profileRequires(loc, EEsProfile, 300, nullptr, ".length"); + } else if (base->isVector() || base->isMatrix()) { + const char* feature = ".length() on vectors and matrices"; + requireProfile(loc, ~EEsProfile, feature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature); + } else if (!base->getType().isCoopMat()) { + error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString().c_str()); + + return base; + } + + return intermediate.addMethod(base, TType(EbtInt), &field, loc); + } + + // It's not .length() if we get to here. + + if (base->isArray()) { + error(loc, "cannot apply to an array:", ".", field.c_str()); + + return base; + } + + if (base->getType().isCoopMat()) { + error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str()); + return base; + } + + // It's neither an array nor .length() if we get here, + // leaving swizzles and struct/block dereferences. + + TIntermTyped* result = base; + if ((base->isVector() || base->isScalar()) && + (base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) { + result = handleDotSwizzle(loc, base, field); + } else if (base->isStruct() || base->isReference()) { + const TTypeList* fields = base->isReference() ? + base->getType().getReferentType()->getStruct() : + base->getType().getStruct(); + bool fieldFound = false; + int member; + for (member = 0; member < (int)fields->size(); ++member) { + if ((*fields)[member].type->getFieldName() == field) { + fieldFound = true; + break; + } + } + if (fieldFound) { + if (base->getType().getQualifier().isFrontEndConstant()) + result = intermediate.foldDereference(base, member, loc); + else { + blockMemberExtensionCheck(loc, base, member, field); + TIntermTyped* index = intermediate.addConstantUnion(member, loc); + result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc); + result->setType(*(*fields)[member].type); + if ((*fields)[member].type->getQualifier().isIo()) + intermediate.addIoAccessed(field); + } + inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); + } else + error(loc, "no such field in structure", field.c_str(), ""); + } else + error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str()); + + // Propagate noContraction up the dereference chain + if (base->getQualifier().isNoContraction()) + result->getWritableType().getQualifier().setNoContraction(); + + // Propagate nonuniform + if (base->getQualifier().isNonUniform()) + result->getWritableType().getQualifier().nonUniform = true; + + return result; +} + +// +// Handle seeing a base.swizzle, a subset of base.identifier in the grammar. +// +TIntermTyped* TParseContext::handleDotSwizzle(const TSourceLoc& loc, TIntermTyped* base, const TString& field) +{ + TIntermTyped* result = base; + if (base->isScalar()) { + const char* dotFeature = "scalar swizzle"; + requireProfile(loc, ~EEsProfile, dotFeature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature); + } + + TSwizzleSelectors selectors; + parseSwizzleSelector(loc, field, base->getVectorSize(), selectors); + + if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16"); + if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt()) + requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16"); + if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt()) + requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8"); + + if (base->isScalar()) { + if (selectors.size() == 1) + return result; + else { + TType type(base->getBasicType(), EvqTemporary, selectors.size()); + // Swizzle operations propagate specialization-constantness + if (base->getQualifier().isSpecConstant()) + type.getQualifier().makeSpecConstant(); + return addConstructor(loc, base, type); + } + } + + if (base->getType().getQualifier().isFrontEndConstant()) + result = intermediate.foldSwizzle(base, selectors, loc); + else { + if (selectors.size() == 1) { + TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc); + result = intermediate.addIndex(EOpIndexDirect, base, index, loc); + result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision)); + } else { + TIntermTyped* index = intermediate.addSwizzle(selectors, loc); + result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc); + result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size())); + } + // Swizzle operations propagate specialization-constantness + if (base->getType().getQualifier().isSpecConstant()) + result->getWritableType().getQualifier().makeSpecConstant(); + } + + return result; +} + +void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName) +{ + // a block that needs extension checking is either 'base', or if arrayed, + // one level removed to the left + const TIntermSymbol* baseSymbol = nullptr; + if (base->getAsBinaryNode() == nullptr) + baseSymbol = base->getAsSymbolNode(); + else + baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode(); + if (baseSymbol == nullptr) + return; + const TSymbol* symbol = symbolTable.find(baseSymbol->getName()); + if (symbol == nullptr) + return; + const TVariable* variable = symbol->getAsVariable(); + if (variable == nullptr) + return; + if (!variable->hasMemberExtensions()) + return; + + // We now have a variable that is the base of a dot reference + // with members that need extension checking. + if (variable->getNumMemberExtensions(member) > 0) + requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str()); +} + +// +// Handle seeing a function declarator in the grammar. This is the precursor +// to recognizing a function prototype or function definition. +// +TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype) +{ + // ES can't declare prototypes inside functions + if (! symbolTable.atGlobalLevel()) + requireProfile(loc, ~EEsProfile, "local function declaration"); + + // + // Multiple declarations of the same function name are allowed. + // + // If this is a definition, the definition production code will check for redefinitions + // (we don't know at this point if it's a definition or not). + // + // Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match. + // - except ES 100, which only allows a single prototype + // + // ES 100 does not allow redefining, but does allow overloading of built-in functions. + // ES 300 does not allow redefining or overloading of built-in functions. + // + bool builtIn; + TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn); + if (symbol && symbol->getAsFunction() && builtIn) + requireProfile(loc, ~EEsProfile, "redefinition of built-in function"); + const TFunction* prevDec = symbol ? symbol->getAsFunction() : 0; + if (prevDec) { + if (prevDec->isPrototyped() && prototype) + profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function"); + if (prevDec->getType() != function.getType()) + error(loc, "overloaded functions must have the same return type", function.getName().c_str(), ""); + for (int i = 0; i < prevDec->getParamCount(); ++i) { + if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage) + error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1); + + if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision) + error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1); + } + } + + arrayObjectCheck(loc, function.getType(), "array in function return type"); + + if (prototype) { + // All built-in functions are defined, even though they don't have a body. + // Count their prototype as a definition instead. + if (symbolTable.atBuiltInLevel()) + function.setDefined(); + else { + if (prevDec && ! builtIn) + symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const + function.setPrototyped(); + } + } + + // This insert won't actually insert it if it's a duplicate signature, but it will still check for + // other forms of name collisions. + if (! symbolTable.insert(function)) + error(loc, "function name is redeclaration of existing name", function.getName().c_str(), ""); + + // + // If this is a redeclaration, it could also be a definition, + // in which case, we need to use the parameter names from this one, and not the one that's + // being redeclared. So, pass back this declaration, not the one in the symbol table. + // + return &function; +} + +// +// Handle seeing the function prototype in front of a function definition in the grammar. +// The body is handled after this function returns. +// +TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function) +{ + currentCaller = function.getMangledName(); + TSymbol* symbol = symbolTable.find(function.getMangledName()); + TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr; + + if (! prevDec) + error(loc, "can't find function", function.getName().c_str(), ""); + // Note: 'prevDec' could be 'function' if this is the first time we've seen function + // as it would have just been put in the symbol table. Otherwise, we're looking up + // an earlier occurrence. + + if (prevDec && prevDec->isDefined()) { + // Then this function already has a body. + error(loc, "function already has a body", function.getName().c_str(), ""); + } + if (prevDec && ! prevDec->isDefined()) { + prevDec->setDefined(); + + // Remember the return type for later checking for RETURN statements. + currentFunctionType = &(prevDec->getType()); + } else + currentFunctionType = new TType(EbtVoid); + functionReturnsValue = false; + + // Check for entry point + if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) { + intermediate.setEntryPointMangledName(function.getMangledName().c_str()); + intermediate.incrementEntryPointCount(); + inMain = true; + } else + inMain = false; + + // + // Raise error message if main function takes any parameters or returns anything other than void + // + if (inMain) { + if (function.getParamCount() > 0) + error(loc, "function cannot take any parameter(s)", function.getName().c_str(), ""); + if (function.getType().getBasicType() != EbtVoid) + error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value"); + } + + // + // New symbol table scope for body of function plus its arguments + // + symbolTable.push(); + + // + // Insert parameters into the symbol table. + // If the parameter has no name, it's not an error, just don't insert it + // (could be used for unused args). + // + // Also, accumulate the list of parameters into the HIL, so lower level code + // knows where to find parameters. + // + TIntermAggregate* paramNodes = new TIntermAggregate; + for (int i = 0; i < function.getParamCount(); i++) { + TParameter& param = function[i]; + if (param.name != nullptr) { + TVariable *variable = new TVariable(param.name, *param.type); + + // Insert the parameters with name in the symbol table. + if (! symbolTable.insert(*variable)) + error(loc, "redefinition", variable->getName().c_str(), ""); + else { + // Transfer ownership of name pointer to symbol table. + param.name = nullptr; + + // Add the parameter to the HIL + paramNodes = intermediate.growAggregate(paramNodes, + intermediate.addSymbol(*variable, loc), + loc); + } + } else + paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc); + } + intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc); + loopNestingLevel = 0; + statementNestingLevel = 0; + controlFlowNestingLevel = 0; + postEntryPointReturn = false; + + return paramNodes; +} + +// +// Handle seeing function call syntax in the grammar, which could be any of +// - .length() method +// - constructor +// - a call to a built-in function mapped to an operator +// - a call to a built-in function that will remain a function call (e.g., texturing) +// - user function +// - subroutine call (not implemented yet) +// +TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments) +{ + TIntermTyped* result = nullptr; + + if (function->getBuiltInOp() == EOpArrayLength) + result = handleLengthMethod(loc, function, arguments); + else if (function->getBuiltInOp() != EOpNull) { + // + // Then this should be a constructor. + // Don't go through the symbol table for constructors. + // Their parameters will be verified algorithmically. + // + TType type(EbtVoid); // use this to get the type back + if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) { + // + // It's a constructor, of type 'type'. + // + result = addConstructor(loc, arguments, type); + if (result == nullptr) + error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), ""); + } + } else { + // + // Find it in the symbol table. + // + const TFunction* fnCandidate; + bool builtIn; + fnCandidate = findFunction(loc, *function, builtIn); + if (fnCandidate) { + // This is a declared function that might map to + // - a built-in operator, + // - a built-in function not mapped to an operator, or + // - a user function. + + // Error check for a function requiring specific extensions present. + if (builtIn && fnCandidate->getNumExtensions()) + requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str()); + + if (builtIn && fnCandidate->getType().contains16BitFloat()) + requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage"); + if (builtIn && fnCandidate->getType().contains16BitInt()) + requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); + if (builtIn && fnCandidate->getType().contains8BitInt()) + requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); + + if (arguments != nullptr) { + // Make sure qualifications work for these arguments. + TIntermAggregate* aggregate = arguments->getAsAggregate(); + for (int i = 0; i < fnCandidate->getParamCount(); ++i) { + // At this early point there is a slight ambiguity between whether an aggregate 'arguments' + // is the single argument itself or its children are the arguments. Only one argument + // means take 'arguments' itself as the one argument. + TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments); + TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier(); + if (formalQualifier.isParamOutput()) { + if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped())) + error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", ""); + } + const TType& argType = arg->getAsTyped()->getType(); + const TQualifier& argQualifier = argType.getQualifier(); + if (argQualifier.isMemory() && (argType.containsOpaque() || argType.isReference())) { + const char* message = "argument cannot drop memory qualifier when passed to formal parameter"; +#ifndef GLSLANG_WEB + if (argQualifier.volatil && ! formalQualifier.volatil) + error(arguments->getLoc(), message, "volatile", ""); + if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "coherent", ""); + if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "devicecoherent", ""); + if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "queuefamilycoherent", ""); + if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "workgroupcoherent", ""); + if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) + error(arguments->getLoc(), message, "subgroupcoherent", ""); + if (argQualifier.readonly && ! formalQualifier.readonly) + error(arguments->getLoc(), message, "readonly", ""); + if (argQualifier.writeonly && ! formalQualifier.writeonly) + error(arguments->getLoc(), message, "writeonly", ""); + // Don't check 'restrict', it is different than the rest: + // "...but only restrict can be taken away from a calling argument, by a formal parameter that + // lacks the restrict qualifier..." +#endif + } + if (!builtIn && argQualifier.getFormat() != formalQualifier.getFormat()) { + // we have mismatched formats, which should only be allowed if writeonly + // and at least one format is unknown + if (!formalQualifier.isWriteOnly() || (formalQualifier.getFormat() != ElfNone && + argQualifier.getFormat() != ElfNone)) + error(arguments->getLoc(), "image formats must match", "format", ""); + } + if (builtIn && arg->getAsTyped()->getType().contains16BitFloat()) + requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage"); + if (builtIn && arg->getAsTyped()->getType().contains16BitInt()) + requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); + if (builtIn && arg->getAsTyped()->getType().contains8BitInt()) + requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); + + // TODO 4.5 functionality: A shader will fail to compile + // if the value passed to the memargument of an atomic memory function does not correspond to a buffer or + // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the + // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or + // shared variable. + } + + // Convert 'in' arguments + addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node + } + + if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) { + // A function call mapped to a built-in operation. + result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate); + } else { + // This is a function call not mapped to built-in operator. + // It could still be a built-in function, but only if PureOperatorBuiltins == false. + result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc); + TIntermAggregate* call = result->getAsAggregate(); + call->setName(fnCandidate->getMangledName()); + + // this is how we know whether the given function is a built-in function or a user-defined function + // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also + // if builtIn == true, it's definitely a built-in function with EOpNull + if (! builtIn) { + call->setUserDefined(); + if (symbolTable.atGlobalLevel()) { + requireProfile(loc, ~EEsProfile, "calling user function from global scope"); + intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName()); + } else + intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName()); + } + +#ifndef GLSLANG_WEB + if (builtIn) + nonOpBuiltInCheck(loc, *fnCandidate, *call); + else +#endif + userFunctionCallCheck(loc, *call); + } + + // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore. + // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output. + // Also, build the qualifier list for user function calls, which are always called with an aggregate. + if (result->getAsAggregate()) { + TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList(); + for (int i = 0; i < fnCandidate->getParamCount(); ++i) { + TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage; + qualifierList.push_back(qual); + } + result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate()); + } + + if (result->getAsTyped()->getType().isCoopMat() && + !result->getAsTyped()->getType().isParameterized()) { + assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd); + + result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType()); + } + } + } + + // generic error recovery + // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades + if (result == nullptr) + result = intermediate.addConstantUnion(0.0, EbtFloat, loc); + + return result; +} + +TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments, + const TFunction& function) +{ + checkLocation(loc, function.getBuiltInOp()); + TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(), + function.getParamCount() == 1, + arguments, function.getType()); + if (result != nullptr && obeyPrecisionQualifiers()) + computeBuiltinPrecisions(*result, function); + + if (result == nullptr) { + if (arguments == nullptr) + error(loc, " wrong operand type", "Internal Error", + "built in unary operator function. Type: %s", ""); + else + error(arguments->getLoc(), " wrong operand type", "Internal Error", + "built in unary operator function. Type: %s", + static_cast(arguments)->getCompleteString().c_str()); + } else if (result->getAsOperator()) + builtInOpCheck(loc, function, *result->getAsOperator()); + + return result; +} + +// "The operation of a built-in function can have a different precision +// qualification than the precision qualification of the resulting value. +// These two precision qualifications are established as follows. +// +// The precision qualification of the operation of a built-in function is +// based on the precision qualification of its input arguments and formal +// parameters: When a formal parameter specifies a precision qualifier, +// that is used, otherwise, the precision qualification of the calling +// argument is used. The highest precision of these will be the precision +// qualification of the operation of the built-in function. Generally, +// this is applied across all arguments to a built-in function, with the +// exceptions being: +// - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits' +// arguments. +// - interpolateAt* functions only look at the 'interpolant' argument. +// +// The precision qualification of the result of a built-in function is +// determined in one of the following ways: +// +// - For the texture sampling, image load, and image store functions, +// the precision of the return type matches the precision of the +// sampler type +// +// Otherwise: +// +// - For prototypes that do not specify a resulting precision qualifier, +// the precision will be the same as the precision of the operation. +// +// - For prototypes that do specify a resulting precision qualifier, +// the specified precision qualifier is the precision qualification of +// the result." +// +void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function) +{ + TPrecisionQualifier operationPrecision = EpqNone; + TPrecisionQualifier resultPrecision = EpqNone; + + TIntermOperator* opNode = node.getAsOperator(); + if (opNode == nullptr) + return; + + if (TIntermUnary* unaryNode = node.getAsUnaryNode()) { + operationPrecision = std::max(function[0].type->getQualifier().precision, + unaryNode->getOperand()->getType().getQualifier().precision); + if (function.getType().getBasicType() != EbtBool) + resultPrecision = function.getType().getQualifier().precision == EpqNone ? + operationPrecision : + function.getType().getQualifier().precision; + } else if (TIntermAggregate* agg = node.getAsAggregate()) { + TIntermSequence& sequence = agg->getSequence(); + unsigned int numArgs = (unsigned int)sequence.size(); + switch (agg->getOp()) { + case EOpBitfieldExtract: + numArgs = 1; + break; + case EOpBitfieldInsert: + numArgs = 2; + break; + case EOpInterpolateAtCentroid: + case EOpInterpolateAtOffset: + case EOpInterpolateAtSample: + numArgs = 1; + break; + case EOpDebugPrintf: + numArgs = 0; + break; + default: + break; + } + // find the maximum precision from the arguments and parameters + for (unsigned int arg = 0; arg < numArgs; ++arg) { + operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision); + operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision); + } + // compute the result precision + if (agg->isSampling() || + agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore || + agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod) + resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision; + else if (function.getType().getBasicType() != EbtBool) + resultPrecision = function.getType().getQualifier().precision == EpqNone ? + operationPrecision : + function.getType().getQualifier().precision; + } + + // Propagate precision through this node and its children. That algorithm stops + // when a precision is found, so start by clearing this subroot precision + opNode->getQualifier().precision = EpqNone; + if (operationPrecision != EpqNone) { + opNode->propagatePrecision(operationPrecision); + opNode->setOperationPrecision(operationPrecision); + } + // Now, set the result precision, which might not match + opNode->getQualifier().precision = resultPrecision; +} + +TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value) +{ +#ifndef GLSLANG_WEB + storage16BitAssignmentCheck(loc, value->getType(), "return"); +#endif + + functionReturnsValue = true; + TIntermBranch* branch = nullptr; + if (currentFunctionType->getBasicType() == EbtVoid) { + error(loc, "void function cannot return a value", "return", ""); + branch = intermediate.addBranch(EOpReturn, loc); + } else if (*currentFunctionType != value->getType()) { + TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value); + if (converted) { + if (*currentFunctionType != converted->getType()) + error(loc, "cannot convert return value to function return type", "return", ""); + if (version < 420) + warn(loc, "type conversion on return values was not explicitly allowed until version 420", + "return", ""); + branch = intermediate.addBranch(EOpReturn, converted, loc); + } else { + error(loc, "type does not match, or is not convertible to, the function's return type", "return", ""); + branch = intermediate.addBranch(EOpReturn, value, loc); + } + } else + branch = intermediate.addBranch(EOpReturn, value, loc); + + branch->updatePrecision(currentFunctionType->getQualifier().precision); + return branch; +} + +// See if the operation is being done in an illegal location. +void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op) +{ +#ifndef GLSLANG_WEB + switch (op) { + case EOpBarrier: + if (language == EShLangTessControl) { + if (controlFlowNestingLevel > 0) + error(loc, "tessellation control barrier() cannot be placed within flow control", "", ""); + if (! inMain) + error(loc, "tessellation control barrier() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", ""); + } + break; + case EOpBeginInvocationInterlock: + if (language != EShLangFragment) + error(loc, "beginInvocationInterlockARB() must be in a fragment shader", "", ""); + if (! inMain) + error(loc, "beginInvocationInterlockARB() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "beginInvocationInterlockARB() cannot be placed after a return from main()", "", ""); + if (controlFlowNestingLevel > 0) + error(loc, "beginInvocationInterlockARB() cannot be placed within flow control", "", ""); + + if (beginInvocationInterlockCount > 0) + error(loc, "beginInvocationInterlockARB() must only be called once", "", ""); + if (endInvocationInterlockCount > 0) + error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); + + beginInvocationInterlockCount++; + + // default to pixel_interlock_ordered + if (intermediate.getInterlockOrdering() == EioNone) + intermediate.setInterlockOrdering(EioPixelInterlockOrdered); + break; + case EOpEndInvocationInterlock: + if (language != EShLangFragment) + error(loc, "endInvocationInterlockARB() must be in a fragment shader", "", ""); + if (! inMain) + error(loc, "endInvocationInterlockARB() must be in main()", "", ""); + else if (postEntryPointReturn) + error(loc, "endInvocationInterlockARB() cannot be placed after a return from main()", "", ""); + if (controlFlowNestingLevel > 0) + error(loc, "endInvocationInterlockARB() cannot be placed within flow control", "", ""); + + if (endInvocationInterlockCount > 0) + error(loc, "endInvocationInterlockARB() must only be called once", "", ""); + if (beginInvocationInterlockCount == 0) + error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); + + endInvocationInterlockCount++; + break; + default: + break; + } +#endif +} + +// Finish processing object.length(). This started earlier in handleDotDereference(), where +// the ".length" part was recognized and semantically checked, and finished here where the +// function syntax "()" is recognized. +// +// Return resulting tree node. +TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode) +{ + int length = 0; + + if (function->getParamCount() > 0) + error(loc, "method does not accept any arguments", function->getName().c_str(), ""); + else { + const TType& type = intermNode->getAsTyped()->getType(); + if (type.isArray()) { + if (type.isUnsizedArray()) { +#ifndef GLSLANG_WEB + if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) { + // We could be between a layout declaration that gives a built-in io array implicit size and + // a user redeclaration of that array, meaning we have to substitute its implicit size here + // without actually redeclaring the array. (It is an error to use a member before the + // redeclaration, but not an error to use the array name itself.) + const TString& name = intermNode->getAsSymbolNode()->getName(); + if (name == "gl_in" || name == "gl_out" || name == "gl_MeshVerticesNV" || + name == "gl_MeshPrimitivesNV") { + length = getIoArrayImplicitSize(type.getQualifier()); + } + } +#endif + if (length == 0) { +#ifndef GLSLANG_WEB + if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) + error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier"); + else if (isRuntimeLength(*intermNode->getAsTyped())) { + // Create a unary op and let the back end handle it + return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); + } else +#endif + error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method"); + } + } else if (type.getOuterArrayNode()) { + // If the array's outer size is specified by an intermediate node, it means the array's length + // was specified by a specialization constant. In such a case, we should return the node of the + // specialization constants to represent the length. + return type.getOuterArrayNode(); + } else + length = type.getOuterArraySize(); + } else if (type.isMatrix()) + length = type.getMatrixCols(); + else if (type.isVector()) + length = type.getVectorSize(); + else if (type.isCoopMat()) + return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); + else { + // we should not get here, because earlier semantic checking should have prevented this path + error(loc, ".length()", "unexpected use of .length()", ""); + } + } + + if (length == 0) + length = 1; + + return intermediate.addConstantUnion(length, loc); +} + +// +// Add any needed implicit conversions for function-call arguments to input parameters. +// +void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const +{ +#ifndef GLSLANG_WEB + TIntermAggregate* aggregate = arguments->getAsAggregate(); + + // Process each argument's conversion + for (int i = 0; i < function.getParamCount(); ++i) { + // At this early point there is a slight ambiguity between whether an aggregate 'arguments' + // is the single argument itself or its children are the arguments. Only one argument + // means take 'arguments' itself as the one argument. + TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped()); + if (*function[i].type != arg->getType()) { + if (function[i].type->getQualifier().isParamInput() && + !function[i].type->isCoopMat()) { + // In-qualified arguments just need an extra node added above the argument to + // convert to the correct type. + arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg); + if (arg) { + if (function.getParamCount() == 1) + arguments = arg; + else { + if (aggregate) + aggregate->getSequence()[i] = arg; + else + arguments = arg; + } + } + } + } + } +#endif +} + +// +// Add any needed implicit output conversions for function-call arguments. This +// can require a new tree topology, complicated further by whether the function +// has a return value. +// +// Returns a node of a subtree that evaluates to the return value of the function. +// +TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const +{ +#ifdef GLSLANG_WEB + return &intermNode; +#else + TIntermSequence& arguments = intermNode.getSequence(); + + // Will there be any output conversions? + bool outputConversions = false; + for (int i = 0; i < function.getParamCount(); ++i) { + if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) { + outputConversions = true; + break; + } + } + + if (! outputConversions) + return &intermNode; + + // Setup for the new tree, if needed: + // + // Output conversions need a different tree topology. + // Out-qualified arguments need a temporary of the correct type, with the call + // followed by an assignment of the temporary to the original argument: + // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...) + // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet) + // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment. + TIntermTyped* conversionTree = nullptr; + TVariable* tempRet = nullptr; + if (intermNode.getBasicType() != EbtVoid) { + // do the "tempRet = function(...), " bit from above + tempRet = makeInternalVariable("tempReturn", intermNode.getType()); + TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); + conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc()); + } else + conversionTree = &intermNode; + + conversionTree = intermediate.makeAggregate(conversionTree); + + // Process each argument's conversion + for (int i = 0; i < function.getParamCount(); ++i) { + if (*function[i].type != arguments[i]->getAsTyped()->getType()) { + if (function[i].type->getQualifier().isParamOutput()) { + // Out-qualified arguments need to use the topology set up above. + // do the " ...(tempArg, ...), arg = tempArg" bit from above + TType paramType; + paramType.shallowCopy(*function[i].type); + if (arguments[i]->getAsTyped()->getType().isParameterized() && + !paramType.isParameterized()) { + paramType.shallowCopy(arguments[i]->getAsTyped()->getType()); + paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters()); + } + TVariable* tempArg = makeInternalVariable("tempArg", paramType); + tempArg->getWritableType().getQualifier().makeTemporary(); + TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc()); + TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc()); + conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc()); + // replace the argument with another node for the same tempArg variable + arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc()); + } + } + } + + // Finalize the tree topology (see bigger comment above). + if (tempRet) { + // do the "..., tempRet" bit from above + TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); + conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc()); + } + conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc()); + + return conversionTree; +#endif +} + +TIntermTyped* TParseContext::addAssign(const TSourceLoc& loc, TOperator op, TIntermTyped* left, TIntermTyped* right) +{ + if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference()) + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "+= and -= on a buffer reference"); + + return intermediate.addAssign(op, left, right, loc); +} + +void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode) +{ + const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence(); + + //const int gl_SemanticsRelaxed = 0x0; + const int gl_SemanticsAcquire = 0x2; + const int gl_SemanticsRelease = 0x4; + const int gl_SemanticsAcquireRelease = 0x8; + const int gl_SemanticsMakeAvailable = 0x2000; + const int gl_SemanticsMakeVisible = 0x4000; + const int gl_SemanticsVolatile = 0x8000; + + //const int gl_StorageSemanticsNone = 0x0; + const int gl_StorageSemanticsBuffer = 0x40; + const int gl_StorageSemanticsShared = 0x100; + const int gl_StorageSemanticsImage = 0x800; + const int gl_StorageSemanticsOutput = 0x1000; + + + unsigned int semantics = 0, storageClassSemantics = 0; + unsigned int semantics2 = 0, storageClassSemantics2 = 0; + + const TIntermTyped* arg0 = (*argp)[0]->getAsTyped(); + const bool isMS = arg0->getBasicType() == EbtSampler && arg0->getType().getSampler().isMultiSample(); + + // Grab the semantics and storage class semantics from the operands, based on opcode + switch (callNode.getOp()) { + case EOpAtomicAdd: + case EOpAtomicMin: + case EOpAtomicMax: + case EOpAtomicAnd: + case EOpAtomicOr: + case EOpAtomicXor: + case EOpAtomicExchange: + case EOpAtomicStore: + storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpAtomicLoad: + storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpAtomicCompSwap: + storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + + case EOpImageAtomicAdd: + case EOpImageAtomicMin: + case EOpImageAtomicMax: + case EOpImageAtomicAnd: + case EOpImageAtomicOr: + case EOpImageAtomicXor: + case EOpImageAtomicExchange: + case EOpImageAtomicStore: + storageClassSemantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpImageAtomicLoad: + storageClassSemantics = (*argp)[isMS ? 4 : 3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 5 : 4]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpImageAtomicCompSwap: + storageClassSemantics = (*argp)[isMS ? 6 : 5]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[isMS ? 7 : 6]->getAsConstantUnion()->getConstArray()[0].getIConst(); + storageClassSemantics2 = (*argp)[isMS ? 8 : 7]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics2 = (*argp)[isMS ? 9 : 8]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + + case EOpBarrier: + storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + case EOpMemoryBarrier: + storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); + semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); + break; + default: + break; + } + + if ((semantics & gl_SemanticsAcquire) && + (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) { + error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsRelease) && + (callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { + error(loc, "gl_SemanticsRelease must not be used with (image) atomic load", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsAcquireRelease) && + (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore || + callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { + error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store", + fnCandidate.getName().c_str(), ""); + } + if (((semantics | semantics2) & ~(gl_SemanticsAcquire | + gl_SemanticsRelease | + gl_SemanticsAcquireRelease | + gl_SemanticsMakeAvailable | + gl_SemanticsMakeVisible | + gl_SemanticsVolatile))) { + error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), ""); + } + if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer | + gl_StorageSemanticsShared | + gl_StorageSemanticsImage | + gl_StorageSemanticsOutput))) { + error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), ""); + } + + if (callNode.getOp() == EOpMemoryBarrier) { + if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } else { + if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { + if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } + if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { + if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " + "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); + } + } + } + if (callNode.getOp() == EOpMemoryBarrier) { + if (storageClassSemantics == 0) { + error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); + } + } + if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) { + error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); + } + if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && + (semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsMakeAvailable) && + !(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { + error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsMakeVisible) && + !(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) { + error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease", + fnCandidate.getName().c_str(), ""); + } + if ((semantics & gl_SemanticsVolatile) && + (callNode.getOp() == EOpMemoryBarrier || callNode.getOp() == EOpBarrier)) { + error(loc, "gl_SemanticsVolatile must not be used with memoryBarrier or controlBarrier", + fnCandidate.getName().c_str(), ""); + } + if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && + ((semantics ^ semantics2) & gl_SemanticsVolatile)) { + error(loc, "semEqual and semUnequal must either both include gl_SemanticsVolatile or neither", + fnCandidate.getName().c_str(), ""); + } +} + +// +// Do additional checking of built-in function calls that is not caught +// by normal semantic checks on argument type, extension tagging, etc. +// +// Assumes there has been a semantically correct match to a built-in function prototype. +// +void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode) +{ + // Set up convenience accessors to the argument(s). There is almost always + // multiple arguments for the cases below, but when there might be one, + // check the unaryArg first. + const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference + const TIntermTyped* unaryArg = nullptr; + const TIntermTyped* arg0 = nullptr; + if (callNode.getAsAggregate()) { + argp = &callNode.getAsAggregate()->getSequence(); + if (argp->size() > 0) + arg0 = (*argp)[0]->getAsTyped(); + } else { + assert(callNode.getAsUnaryNode()); + unaryArg = callNode.getAsUnaryNode()->getOperand(); + arg0 = unaryArg; + } + + TString featureString; + const char* feature = nullptr; + switch (callNode.getOp()) { +#ifndef GLSLANG_WEB + case EOpTextureGather: + case EOpTextureGatherOffset: + case EOpTextureGatherOffsets: + { + // Figure out which variants are allowed by what extensions, + // and what arguments must be constant for which situations. + + featureString = fnCandidate.getName(); + featureString += "(...)"; + feature = featureString.c_str(); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + int compArg = -1; // track which argument, if any, is the constant component argument + switch (callNode.getOp()) { + case EOpTextureGather: + // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, + // otherwise, need GL_ARB_texture_gather. + if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 2; + } else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + break; + case EOpTextureGatherOffset: + // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument + if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) + profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "non-constant offset argument"); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + break; + case EOpTextureGatherOffsets: + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + // check for constant offsets + if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) + error(loc, "must be a compile-time constant:", feature, "offsets argument"); + break; + default: + break; + } + + if (compArg > 0 && compArg < fnCandidate.getParamCount()) { + if ((*argp)[compArg]->getAsConstantUnion()) { + int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (value < 0 || value > 3) + error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); + } else + error(loc, "must be a compile-time constant:", feature, "component argument"); + } + + bool bias = false; + if (callNode.getOp() == EOpTextureGather) + bias = fnCandidate.getParamCount() > 3; + else if (callNode.getOp() == EOpTextureGatherOffset || + callNode.getOp() == EOpTextureGatherOffsets) + bias = fnCandidate.getParamCount() > 4; + + if (bias) { + featureString = fnCandidate.getName(); + featureString += "with bias argument"; + feature = featureString.c_str(); + profileRequires(loc, ~EEsProfile, 450, nullptr, feature); + requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); + } + break; + } + case EOpSparseTextureGather: + case EOpSparseTextureGatherOffset: + case EOpSparseTextureGatherOffsets: + { + bool bias = false; + if (callNode.getOp() == EOpSparseTextureGather) + bias = fnCandidate.getParamCount() > 4; + else if (callNode.getOp() == EOpSparseTextureGatherOffset || + callNode.getOp() == EOpSparseTextureGatherOffsets) + bias = fnCandidate.getParamCount() > 5; + + if (bias) { + featureString = fnCandidate.getName(); + featureString += "with bias argument"; + feature = featureString.c_str(); + profileRequires(loc, ~EEsProfile, 450, nullptr, feature); + requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); + } + + break; + } + + case EOpSparseTextureGatherLod: + case EOpSparseTextureGatherLodOffset: + case EOpSparseTextureGatherLodOffsets: + { + requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str()); + break; + } + + case EOpSwizzleInvocations: + { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "offset", ""); + else { + unsigned offset[4] = {}; + offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); + offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); + offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst(); + if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3) + error(loc, "components must be in the range [0, 3]", "offset", ""); + } + + break; + } + + case EOpSwizzleInvocationsMasked: + { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "mask", ""); + else { + unsigned mask[3] = {}; + mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); + mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); + if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31) + error(loc, "components must be in the range [0, 31]", "mask", ""); + } + + break; + } +#endif + + case EOpTextureOffset: + case EOpTextureFetchOffset: + case EOpTextureProjOffset: + case EOpTextureLodOffset: + case EOpTextureProjLodOffset: + case EOpTextureGradOffset: + case EOpTextureProjGradOffset: + { + // Handle texture-offset limits checking + // Pick which argument has to hold constant offsets + int arg = -1; + switch (callNode.getOp()) { + case EOpTextureOffset: arg = 2; break; + case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().isRect()) ? 2 : 3; break; + case EOpTextureProjOffset: arg = 2; break; + case EOpTextureLodOffset: arg = 3; break; + case EOpTextureProjLodOffset: arg = 3; break; + case EOpTextureGradOffset: arg = 4; break; + case EOpTextureProjGradOffset: arg = 4; break; + default: + assert(0); + break; + } + + if (arg > 0) { + +#ifndef GLSLANG_WEB + bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && + arg0->getType().getSampler().shadow; + if (f16ShadowCompare) + ++arg; +#endif + if (! (*argp)[arg]->getAsTyped()->getQualifier().isConstant()) + error(loc, "argument must be compile-time constant", "texel offset", ""); + else if ((*argp)[arg]->getAsConstantUnion()) { + const TType& type = (*argp)[arg]->getAsTyped()->getType(); + for (int c = 0; c < type.getVectorSize(); ++c) { + int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); + if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) + error(loc, "value is out of range:", "texel offset", + "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); + } + } + } + + break; + } + +#ifndef GLSLANG_WEB + case EOpTrace: + if (!(*argp)[10]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "payload number", ""); + break; + case EOpExecuteCallable: + if (!(*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "callable data number", ""); + break; + + case EOpRayQueryGetIntersectionType: + case EOpRayQueryGetIntersectionT: + case EOpRayQueryGetIntersectionInstanceCustomIndex: + case EOpRayQueryGetIntersectionInstanceId: + case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: + case EOpRayQueryGetIntersectionGeometryIndex: + case EOpRayQueryGetIntersectionPrimitiveIndex: + case EOpRayQueryGetIntersectionBarycentrics: + case EOpRayQueryGetIntersectionFrontFace: + case EOpRayQueryGetIntersectionObjectRayDirection: + case EOpRayQueryGetIntersectionObjectRayOrigin: + case EOpRayQueryGetIntersectionObjectToWorld: + case EOpRayQueryGetIntersectionWorldToObject: + if (!(*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "committed", ""); + break; + + case EOpTextureQuerySamples: + case EOpImageQuerySamples: + // GL_ARB_shader_texture_image_samples + profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); + break; + + case EOpImageAtomicAdd: + case EOpImageAtomicMin: + case EOpImageAtomicMax: + case EOpImageAtomicAnd: + case EOpImageAtomicOr: + case EOpImageAtomicXor: + case EOpImageAtomicExchange: + case EOpImageAtomicCompSwap: + case EOpImageAtomicLoad: + case EOpImageAtomicStore: + { + // Make sure the image types have the correct layout() format and correct argument types + const TType& imageType = arg0->getType(); + if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { + if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) + error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); + } else { + bool isImageAtomicOnFloatAllowed = ((fnCandidate.getName().compare(0, 14, "imageAtomicAdd") == 0) || + (fnCandidate.getName().compare(0, 15, "imageAtomicLoad") == 0) || + (fnCandidate.getName().compare(0, 16, "imageAtomicStore") == 0) || + (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") == 0)); + if (imageType.getSampler().type == EbtFloat && isImageAtomicOnFloatAllowed && + (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0)) // imageAtomicExchange doesn't require GL_EXT_shader_atomic_float + requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str()); + if (!isImageAtomicOnFloatAllowed) + error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); + else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) + error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); + } + + const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4; + if (argp->size() > maxArgs) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + } + + break; + } + + case EOpAtomicAdd: + case EOpAtomicMin: + case EOpAtomicMax: + case EOpAtomicAnd: + case EOpAtomicOr: + case EOpAtomicXor: + case EOpAtomicExchange: + case EOpAtomicCompSwap: + case EOpAtomicLoad: + case EOpAtomicStore: + { + if (argp->size() > 3) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange || + callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpAtomicStore) && + (arg0->getType().isFloatingDomain())) { + requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str()); + } + } else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) { + const char* const extensions[2] = { E_GL_NV_shader_atomic_int64, + E_GL_EXT_shader_atomic_int64 }; + requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str()); + } else if ((callNode.getOp() == EOpAtomicAdd || callNode.getOp() == EOpAtomicExchange) && + (arg0->getType().isFloatingDomain())) { + requireExtensions(loc, 1, &E_GL_EXT_shader_atomic_float, fnCandidate.getName().c_str()); + } + break; + } + + case EOpInterpolateAtCentroid: + case EOpInterpolateAtSample: + case EOpInterpolateAtOffset: + case EOpInterpolateAtVertex: + // Make sure the first argument is an interpolant, or an array element of an interpolant + if (arg0->getType().getQualifier().storage != EvqVaryingIn) { + // It might still be an array element. + // + // We could check more, but the semantics of the first argument are already met; the + // only way to turn an array into a float/vec* is array dereference and swizzle. + // + // ES and desktop 4.3 and earlier: swizzles may not be used + // desktop 4.4 and later: swizzles may be used + bool swizzleOkay = (!isEsProfile()) && (version >= 440); + const TIntermTyped* base = TIntermediate::findLValueBase(arg0, swizzleOkay); + if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn) + error(loc, "first argument must be an interpolant, or interpolant-array element", fnCandidate.getName().c_str(), ""); + } + + if (callNode.getOp() == EOpInterpolateAtVertex) { + if (!arg0->getType().getQualifier().isExplicitInterpolation()) + error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", ""); + else { + if (! (*argp)[1]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "vertex index", ""); + else { + unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); + if (vertexIdx > 2) + error(loc, "must be in the range [0, 2]", "vertex index", ""); + } + } + } + break; + + case EOpEmitStreamVertex: + case EOpEndStreamPrimitive: + intermediate.setMultiStream(); + break; + + case EOpSubgroupClusteredAdd: + case EOpSubgroupClusteredMul: + case EOpSubgroupClusteredMin: + case EOpSubgroupClusteredMax: + case EOpSubgroupClusteredAnd: + case EOpSubgroupClusteredOr: + case EOpSubgroupClusteredXor: + // The as used in the subgroupClustered() operations must be: + // - An integral constant expression. + // - At least 1. + // - A power of 2. + if ((*argp)[1]->getAsConstantUnion() == nullptr) + error(loc, "argument must be compile-time constant", "cluster size", ""); + else { + int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (size < 1) + error(loc, "argument must be at least 1", "cluster size", ""); + else if (!IsPow2(size)) + error(loc, "argument must be a power of 2", "cluster size", ""); + } + break; + + case EOpSubgroupBroadcast: + case EOpSubgroupQuadBroadcast: + if (spvVersion.spv < EShTargetSpv_1_5) { + // must be an integral constant expression. + if ((*argp)[1]->getAsConstantUnion() == nullptr) + error(loc, "argument must be compile-time constant", "id", ""); + } + break; + + case EOpBarrier: + case EOpMemoryBarrier: + if (argp->size() > 0) { + requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); + memorySemanticsCheck(loc, fnCandidate, callNode); + } + break; + + case EOpMix: + if (profile == EEsProfile && version < 310) { + // Look for specific signatures + if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) { + requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, "specific signature of builtin mix"); + } + } + + if (profile != EEsProfile && version < 450) { + if ((*argp)[0]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[0]->getAsTyped()->getBasicType() != EbtDouble && + (*argp)[1]->getAsTyped()->getBasicType() != EbtFloat && + (*argp)[1]->getAsTyped()->getBasicType() != EbtDouble && + (*argp)[2]->getAsTyped()->getBasicType() == EbtBool) { + requireExtensions(loc, 1, &E_GL_EXT_shader_integer_mix, fnCandidate.getName().c_str()); + } + } + + break; +#endif + + default: + break; + } + + // Texture operations on texture objects (aside from texelFetch on a + // textureBuffer) require EXT_samplerless_texture_functions. + switch (callNode.getOp()) { + case EOpTextureQuerySize: + case EOpTextureQueryLevels: + case EOpTextureQuerySamples: + case EOpTextureFetch: + case EOpTextureFetchOffset: + { + const TSampler& sampler = fnCandidate[0].type->getSampler(); + + const bool isTexture = sampler.isTexture() && !sampler.isCombined(); + const bool isBuffer = sampler.isBuffer(); + const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset; + + if (isTexture && (!isBuffer || !isFetch)) + requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str()); + + break; + } + + default: + break; + } + + if (callNode.isSubgroup()) { + // these require SPIR-V 1.3 + if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3) + error(loc, "requires SPIR-V 1.3", "subgroup op", ""); + + // Check that if extended types are being used that the correct extensions are enabled. + if (arg0 != nullptr) { + const TType& type = arg0->getType(); + switch (type.getBasicType()) { + default: + break; + case EbtInt8: + case EbtUint8: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int8, type.getCompleteString().c_str()); + break; + case EbtInt16: + case EbtUint16: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int16, type.getCompleteString().c_str()); + break; + case EbtInt64: + case EbtUint64: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int64, type.getCompleteString().c_str()); + break; + case EbtFloat16: + requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_float16, type.getCompleteString().c_str()); + break; + } + } + } +} + +#ifndef GLSLANG_WEB + +extern bool PureOperatorBuiltins; + +// Deprecated! Use PureOperatorBuiltins == true instead, in which case this +// functionality is handled in builtInOpCheck() instead of here. +// +// Do additional checking of built-in function calls that were not mapped +// to built-in operations (e.g., texturing functions). +// +// Assumes there has been a semantically correct match to a built-in function. +// +void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode) +{ + // Further maintenance of this function is deprecated, because the "correct" + // future-oriented design is to not have to do string compares on function names. + + // If PureOperatorBuiltins == true, then all built-ins should be mapped + // to a TOperator, and this function would then never get called. + + assert(PureOperatorBuiltins == false); + + // built-in texturing functions get their return value precision from the precision of the sampler + if (fnCandidate.getType().getQualifier().precision == EpqNone && + fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler) + callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision; + + if (fnCandidate.getName().compare(0, 7, "texture") == 0) { + if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) { + TString featureString = fnCandidate.getName() + "(...)"; + const char* feature = featureString.c_str(); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + + int compArg = -1; // track which argument, if any, is the constant component argument + if (fnCandidate.getName().compare("textureGatherOffset") == 0) { + // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument + if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; + if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) + profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, + "non-constant offset argument"); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + } else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 3; + // check for constant offsets + int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; + if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) + error(loc, "must be a compile-time constant:", feature, "offsets argument"); + } else if (fnCandidate.getName().compare("textureGather") == 0) { + // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, + // otherwise, need GL_ARB_texture_gather. + if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); + if (! fnCandidate[0].type->getSampler().shadow) + compArg = 2; + } else + profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); + } + + if (compArg > 0 && compArg < fnCandidate.getParamCount()) { + if (callNode.getSequence()[compArg]->getAsConstantUnion()) { + int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (value < 0 || value > 3) + error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); + } else + error(loc, "must be a compile-time constant:", feature, "component argument"); + } + } else { + // this is only for functions not starting "textureGather"... + if (fnCandidate.getName().find("Offset") != TString::npos) { + + // Handle texture-offset limits checking + int arg = -1; + if (fnCandidate.getName().compare("textureOffset") == 0) + arg = 2; + else if (fnCandidate.getName().compare("texelFetchOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureProjOffset") == 0) + arg = 2; + else if (fnCandidate.getName().compare("textureLodOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureProjLodOffset") == 0) + arg = 3; + else if (fnCandidate.getName().compare("textureGradOffset") == 0) + arg = 4; + else if (fnCandidate.getName().compare("textureProjGradOffset") == 0) + arg = 4; + + if (arg > 0) { + if (! callNode.getSequence()[arg]->getAsConstantUnion()) + error(loc, "argument must be compile-time constant", "texel offset", ""); + else { + const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType(); + for (int c = 0; c < type.getVectorSize(); ++c) { + int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); + if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) + error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); + } + } + } + } + } + } + + // GL_ARB_shader_texture_image_samples + if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0) + profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); + + if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) { + const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType(); + if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { + if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) + error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); + } else { + if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0) + error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); + else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) + error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); + } + } +} + +#endif + +// +// Do any extra checking for a user function call. +// +void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode) +{ + TIntermSequence& arguments = callNode.getSequence(); + + for (int i = 0; i < (int)arguments.size(); ++i) + samplerConstructorLocationCheck(loc, "call argument", arguments[i]); +} + +// +// Emit an error if this is a sampler constructor +// +void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node) +{ + if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler) + error(loc, "sampler constructor must appear at point of use", token, ""); +} + +// +// Handle seeing a built-in constructor in a grammar production. +// +TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType) +{ + TType type(publicType); + type.getQualifier().precision = EpqNone; + + if (type.isArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor"); + profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor"); + } + + TOperator op = intermediate.mapTypeToConstructorOp(type); + + if (op == EOpNull) { + error(loc, "cannot construct this type", type.getBasicString(), ""); + op = EOpConstructFloat; + TType errorType(EbtFloat); + type.shallowCopy(errorType); + } + + TString empty(""); + + return new TFunction(&empty, type, op); +} + +// Handle seeing a precision qualifier in the grammar. +void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision) +{ + if (obeyPrecisionQualifiers()) + qualifier.precision = precision; +} + +// Check for messages to give on seeing a precision qualifier used in a +// declaration in the grammar. +void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier) +{ + if (precisionManager.shouldWarnAboutDefaults()) { + warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n" + " \"precision mediump int; precision highp float;\"", "", ""); + precisionManager.defaultWarningGiven(); + } +} + +// +// Same error message for all places assignments don't work. +// +void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right) +{ + error(loc, "", op, "cannot convert from '%s' to '%s'", + right.c_str(), left.c_str()); +} + +// +// Same error message for all places unary operations don't work. +// +void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand) +{ + error(loc, " wrong operand type", op, + "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)", + op, operand.c_str()); +} + +// +// Same error message for all binary operations don't work. +// +void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right) +{ + error(loc, " wrong operand types:", op, + "no operation '%s' exists that takes a left-hand operand of type '%s' and " + "a right operand of type '%s' (or there is no acceptable conversion)", + op, left.c_str(), right.c_str()); +} + +// +// A basic type of EbtVoid is a key that the name string was seen in the source, but +// it was not found as a variable in the symbol table. If so, give the error +// message and insert a dummy variable in the symbol table to prevent future errors. +// +void TParseContext::variableCheck(TIntermTyped*& nodePtr) +{ + TIntermSymbol* symbol = nodePtr->getAsSymbolNode(); + if (! symbol) + return; + + if (symbol->getType().getBasicType() == EbtVoid) { + const char *extraInfoFormat = ""; + if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") { + extraInfoFormat = "(Did you mean gl_VertexIndex?)"; + } else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") { + extraInfoFormat = "(Did you mean gl_InstanceIndex?)"; + } + error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat); + + // Add to symbol table to prevent future error messages on the same name + if (symbol->getName().size() > 0) { + TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat)); + symbolTable.insert(*fakeVariable); + + // substitute a symbol node for this new variable + nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc()); + } + } else { + switch (symbol->getQualifier().storage) { + case EvqPointCoord: + profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord"); + break; + default: break; // some compilers want this + } + } +} + +// +// Both test and if necessary, spit out an error, to see if the node is really +// an l-value that can be operated on this way. +// +// Returns true if there was an error. +// +bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + TIntermBinary* binaryNode = node->getAsBinaryNode(); + + if (binaryNode) { + bool errorReturn = false; + + switch(binaryNode->getOp()) { +#ifndef GLSLANG_WEB + case EOpIndexDirect: + case EOpIndexIndirect: + // ... tessellation control shader ... + // If a per-vertex output variable is used as an l-value, it is a + // compile-time or link-time error if the expression indicating the + // vertex index is not the identifier gl_InvocationID. + if (language == EShLangTessControl) { + const TType& leftType = binaryNode->getLeft()->getType(); + if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) { + // we have a per-vertex output + const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode(); + if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId) + error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", ""); + } + } + break; // left node is checked by base class +#endif + case EOpVectorSwizzle: + errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft()); + if (!errorReturn) { + int offset[4] = {0,0,0,0}; + + TIntermTyped* rightNode = binaryNode->getRight(); + TIntermAggregate *aggrNode = rightNode->getAsAggregate(); + + for (TIntermSequence::iterator p = aggrNode->getSequence().begin(); + p != aggrNode->getSequence().end(); p++) { + int value = (*p)->getAsTyped()->getAsConstantUnion()->getConstArray()[0].getIConst(); + offset[value]++; + if (offset[value] > 1) { + error(loc, " l-value of swizzle cannot have duplicate components", op, "", ""); + + return true; + } + } + } + + return errorReturn; + default: + break; + } + + if (errorReturn) { + error(loc, " l-value required", op, "", ""); + return true; + } + } + + if (binaryNode && binaryNode->getOp() == EOpIndexDirectStruct && binaryNode->getLeft()->isReference()) + return false; + + // Let the base class check errors + if (TParseContextBase::lValueErrorCheck(loc, op, node)) + return true; + + const char* symbol = nullptr; + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (symNode != nullptr) + symbol = symNode->getName().c_str(); + + const char* message = nullptr; + switch (node->getQualifier().storage) { + case EvqVaryingIn: message = "can't modify shader input"; break; + case EvqInstanceId: message = "can't modify gl_InstanceID"; break; + case EvqVertexId: message = "can't modify gl_VertexID"; break; + case EvqFace: message = "can't modify gl_FrontFace"; break; + case EvqFragCoord: message = "can't modify gl_FragCoord"; break; + case EvqPointCoord: message = "can't modify gl_PointCoord"; break; + case EvqFragDepth: + intermediate.setDepthReplacing(); + // "In addition, it is an error to statically write to gl_FragDepth in the fragment shader." + if (isEsProfile() && intermediate.getEarlyFragmentTests()) + message = "can't modify gl_FragDepth if using early_fragment_tests"; + break; + + default: + break; + } + + if (message == nullptr && binaryNode == nullptr && symNode == nullptr) { + error(loc, " l-value required", op, "", ""); + + return true; + } + + // + // Everything else is okay, no error. + // + if (message == nullptr) + return false; + + // + // If we get here, we have an error and a message. + // + if (symNode) + error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message); + else + error(loc, " l-value required", op, "(%s)", message); + + return true; +} + +// Test for and give an error if the node can't be read from. +void TParseContext::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) +{ + // Let the base class check errors + TParseContextBase::rValueErrorCheck(loc, op, node); + + TIntermSymbol* symNode = node->getAsSymbolNode(); + if (!(symNode && symNode->getQualifier().isWriteOnly())) // base class checks + if (symNode && symNode->getQualifier().isExplicitInterpolation()) + error(loc, "can't read from explicitly-interpolated object: ", op, symNode->getName().c_str()); +} + +// +// Both test, and if necessary spit out an error, to see if the node is really +// a constant. +// +void TParseContext::constantValueCheck(TIntermTyped* node, const char* token) +{ + if (! node->getQualifier().isConstant()) + error(node->getLoc(), "constant expression required", token, ""); +} + +// +// Both test, and if necessary spit out an error, to see if the node is really +// an integer. +// +void TParseContext::integerCheck(const TIntermTyped* node, const char* token) +{ + if ((node->getBasicType() == EbtInt || node->getBasicType() == EbtUint) && node->isScalar()) + return; + + error(node->getLoc(), "scalar integer expression required", token, ""); +} + +// +// Both test, and if necessary spit out an error, to see if we are currently +// globally scoped. +// +void TParseContext::globalCheck(const TSourceLoc& loc, const char* token) +{ + if (! symbolTable.atGlobalLevel()) + error(loc, "not allowed in nested scope", token, ""); +} + +// +// Reserved errors for GLSL. +// +void TParseContext::reservedErrorCheck(const TSourceLoc& loc, const TString& identifier) +{ + // "Identifiers starting with "gl_" are reserved for use by OpenGL, and may not be + // declared in a shader; this results in a compile-time error." + if (! symbolTable.atBuiltInLevel()) { + if (builtInName(identifier)) + error(loc, "identifiers starting with \"gl_\" are reserved", identifier.c_str(), ""); + + // "__" are not supposed to be an error. ES 300 (and desktop) added the clarification: + // "In addition, all identifiers containing two consecutive underscores (__) are + // reserved; using such a name does not itself result in an error, but may result + // in undefined behavior." + // however, before that, ES tests required an error. + if (identifier.find("__") != TString::npos) { + if (isEsProfile() && version < 300) + error(loc, "identifiers containing consecutive underscores (\"__\") are reserved, and an error if version < 300", identifier.c_str(), ""); + else + warn(loc, "identifiers containing consecutive underscores (\"__\") are reserved", identifier.c_str(), ""); + } + } +} + +// +// Reserved errors for the preprocessor. +// +void TParseContext::reservedPpErrorCheck(const TSourceLoc& loc, const char* identifier, const char* op) +{ + // "__" are not supposed to be an error. ES 300 (and desktop) added the clarification: + // "All macro names containing two consecutive underscores ( __ ) are reserved; + // defining such a name does not itself result in an error, but may result in + // undefined behavior. All macro names prefixed with "GL_" ("GL" followed by a + // single underscore) are also reserved, and defining such a name results in a + // compile-time error." + // however, before that, ES tests required an error. + if (strncmp(identifier, "GL_", 3) == 0) + ppError(loc, "names beginning with \"GL_\" can't be (un)defined:", op, identifier); + else if (strncmp(identifier, "defined", 8) == 0) + if (relaxedErrors()) + ppWarn(loc, "\"defined\" is (un)defined:", op, identifier); + else + ppError(loc, "\"defined\" can't be (un)defined:", op, identifier); + else if (strstr(identifier, "__") != 0) { + if (isEsProfile() && version >= 300 && + (strcmp(identifier, "__LINE__") == 0 || + strcmp(identifier, "__FILE__") == 0 || + strcmp(identifier, "__VERSION__") == 0)) + ppError(loc, "predefined names can't be (un)defined:", op, identifier); + else { + if (isEsProfile() && version < 300 && !relaxedErrors()) + ppError(loc, "names containing consecutive underscores are reserved, and an error if version < 300:", op, identifier); + else + ppWarn(loc, "names containing consecutive underscores are reserved:", op, identifier); + } + } +} + +// +// See if this version/profile allows use of the line-continuation character '\'. +// +// Returns true if a line continuation should be done. +// +bool TParseContext::lineContinuationCheck(const TSourceLoc& loc, bool endOfComment) +{ +#ifdef GLSLANG_WEB + return true; +#endif + + const char* message = "line continuation"; + + bool lineContinuationAllowed = (isEsProfile() && version >= 300) || + (!isEsProfile() && (version >= 420 || extensionTurnedOn(E_GL_ARB_shading_language_420pack))); + + if (endOfComment) { + if (lineContinuationAllowed) + warn(loc, "used at end of comment; the following line is still part of the comment", message, ""); + else + warn(loc, "used at end of comment, but this version does not provide line continuation", message, ""); + + return lineContinuationAllowed; + } + + if (relaxedErrors()) { + if (! lineContinuationAllowed) + warn(loc, "not allowed in this version", message, ""); + return true; + } else { + profileRequires(loc, EEsProfile, 300, nullptr, message); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, message); + } + + return lineContinuationAllowed; +} + +bool TParseContext::builtInName(const TString& identifier) +{ + return identifier.compare(0, 3, "gl_") == 0; +} + +// +// Make sure there is enough data and not too many arguments provided to the +// constructor to build something of the type of the constructor. Also returns +// the type of the constructor. +// +// Part of establishing type is establishing specialization-constness. +// We don't yet know "top down" whether type is a specialization constant, +// but a const constructor can becomes a specialization constant if any of +// its children are, subject to KHR_vulkan_glsl rules: +// +// - int(), uint(), and bool() constructors for type conversions +// from any of the following types to any of the following types: +// * int +// * uint +// * bool +// - vector versions of the above conversion constructors +// +// Returns true if there was an error in construction. +// +bool TParseContext::constructorError(const TSourceLoc& loc, TIntermNode* node, TFunction& function, TOperator op, TType& type) +{ + // See if the constructor does not establish the main type, only requalifies + // it, in which case the type comes from the argument instead of from the + // constructor function. + switch (op) { +#ifndef GLSLANG_WEB + case EOpConstructNonuniform: + if (node != nullptr && node->getAsTyped() != nullptr) { + type.shallowCopy(node->getAsTyped()->getType()); + type.getQualifier().makeTemporary(); + type.getQualifier().nonUniform = true; + } + break; +#endif + default: + type.shallowCopy(function.getType()); + break; + } + + // See if it's a matrix + bool constructingMatrix = false; + switch (op) { + case EOpConstructTextureSampler: + return constructorTextureSamplerError(loc, function); + case EOpConstructMat2x2: + case EOpConstructMat2x3: + case EOpConstructMat2x4: + case EOpConstructMat3x2: + case EOpConstructMat3x3: + case EOpConstructMat3x4: + case EOpConstructMat4x2: + case EOpConstructMat4x3: + case EOpConstructMat4x4: +#ifndef GLSLANG_WEB + case EOpConstructDMat2x2: + case EOpConstructDMat2x3: + case EOpConstructDMat2x4: + case EOpConstructDMat3x2: + case EOpConstructDMat3x3: + case EOpConstructDMat3x4: + case EOpConstructDMat4x2: + case EOpConstructDMat4x3: + case EOpConstructDMat4x4: + case EOpConstructF16Mat2x2: + case EOpConstructF16Mat2x3: + case EOpConstructF16Mat2x4: + case EOpConstructF16Mat3x2: + case EOpConstructF16Mat3x3: + case EOpConstructF16Mat3x4: + case EOpConstructF16Mat4x2: + case EOpConstructF16Mat4x3: + case EOpConstructF16Mat4x4: +#endif + constructingMatrix = true; + break; + default: + break; + } + + // + // Walk the arguments for first-pass checks and collection of information. + // + + int size = 0; + bool constType = true; + bool specConstType = false; // value is only valid if constType is true + bool full = false; + bool overFull = false; + bool matrixInMatrix = false; + bool arrayArg = false; + bool floatArgument = false; + for (int arg = 0; arg < function.getParamCount(); ++arg) { + if (function[arg].type->isArray()) { + if (function[arg].type->isUnsizedArray()) { + // Can't construct from an unsized array. + error(loc, "array argument must be sized", "constructor", ""); + return true; + } + arrayArg = true; + } + if (constructingMatrix && function[arg].type->isMatrix()) + matrixInMatrix = true; + + // 'full' will go to true when enough args have been seen. If we loop + // again, there is an extra argument. + if (full) { + // For vectors and matrices, it's okay to have too many components + // available, but not okay to have unused arguments. + overFull = true; + } + + size += function[arg].type->computeNumComponents(); + if (op != EOpConstructStruct && ! type.isArray() && size >= type.computeNumComponents()) + full = true; + + if (! function[arg].type->getQualifier().isConstant()) + constType = false; + if (function[arg].type->getQualifier().isSpecConstant()) + specConstType = true; + if (function[arg].type->isFloatingDomain()) + floatArgument = true; + if (type.isStruct()) { + if (function[arg].type->contains16BitFloat()) { + requireFloat16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type"); + } + if (function[arg].type->contains16BitInt()) { + requireInt16Arithmetic(loc, "constructor", "can't construct structure containing 16-bit type"); + } + if (function[arg].type->contains8BitInt()) { + requireInt8Arithmetic(loc, "constructor", "can't construct structure containing 8-bit type"); + } + } + } + if (op == EOpConstructNonuniform) + constType = false; + +#ifndef GLSLANG_WEB + switch (op) { + case EOpConstructFloat16: + case EOpConstructF16Vec2: + case EOpConstructF16Vec3: + case EOpConstructF16Vec4: + if (type.isArray()) + requireFloat16Arithmetic(loc, "constructor", "16-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireFloat16Arithmetic(loc, "constructor", "16-bit vectors only take vector types"); + break; + case EOpConstructUint16: + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructInt16: + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + if (type.isArray()) + requireInt16Arithmetic(loc, "constructor", "16-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireInt16Arithmetic(loc, "constructor", "16-bit vectors only take vector types"); + break; + case EOpConstructUint8: + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructInt8: + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + if (type.isArray()) + requireInt8Arithmetic(loc, "constructor", "8-bit arrays not supported"); + if (type.isVector() && function.getParamCount() != 1) + requireInt8Arithmetic(loc, "constructor", "8-bit vectors only take vector types"); + break; + default: + break; + } +#endif + + // inherit constness from children + if (constType) { + bool makeSpecConst; + // Finish pinning down spec-const semantics + if (specConstType) { + switch (op) { + case EOpConstructInt8: + case EOpConstructInt: + case EOpConstructUint: + case EOpConstructBool: + case EOpConstructBVec2: + case EOpConstructBVec3: + case EOpConstructBVec4: + case EOpConstructIVec2: + case EOpConstructIVec3: + case EOpConstructIVec4: + case EOpConstructUVec2: + case EOpConstructUVec3: + case EOpConstructUVec4: +#ifndef GLSLANG_WEB + case EOpConstructUint8: + case EOpConstructInt16: + case EOpConstructUint16: + case EOpConstructInt64: + case EOpConstructUint64: + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructI64Vec2: + case EOpConstructI64Vec3: + case EOpConstructI64Vec4: + case EOpConstructU64Vec2: + case EOpConstructU64Vec3: + case EOpConstructU64Vec4: +#endif + // This was the list of valid ones, if they aren't converting from float + // and aren't making an array. + makeSpecConst = ! floatArgument && ! type.isArray(); + break; + default: + // anything else wasn't white-listed in the spec as a conversion + makeSpecConst = false; + break; + } + } else + makeSpecConst = false; + + if (makeSpecConst) + type.getQualifier().makeSpecConstant(); + else if (specConstType) + type.getQualifier().makeTemporary(); + else + type.getQualifier().storage = EvqConst; + } + + if (type.isArray()) { + if (function.getParamCount() == 0) { + error(loc, "array constructor must have at least one argument", "constructor", ""); + return true; + } + + if (type.isUnsizedArray()) { + // auto adapt the constructor type to the number of arguments + type.changeOuterArraySize(function.getParamCount()); + } else if (type.getOuterArraySize() != function.getParamCount()) { + error(loc, "array constructor needs one argument per array element", "constructor", ""); + return true; + } + + if (type.isArrayOfArrays()) { + // Types have to match, but we're still making the type. + // Finish making the type, and the comparison is done later + // when checking for conversion. + TArraySizes& arraySizes = *type.getArraySizes(); + + // At least the dimensionalities have to match. + if (! function[0].type->isArray() || + arraySizes.getNumDims() != function[0].type->getArraySizes()->getNumDims() + 1) { + error(loc, "array constructor argument not correct type to construct array element", "constructor", ""); + return true; + } + + if (arraySizes.isInnerUnsized()) { + // "Arrays of arrays ..., and the size for any dimension is optional" + // That means we need to adopt (from the first argument) the other array sizes into the type. + for (int d = 1; d < arraySizes.getNumDims(); ++d) { + if (arraySizes.getDimSize(d) == UnsizedArraySize) { + arraySizes.setDimSize(d, function[0].type->getArraySizes()->getDimSize(d - 1)); + } + } + } + } + } + + if (arrayArg && op != EOpConstructStruct && ! type.isArrayOfArrays()) { + error(loc, "constructing non-array constituent from array argument", "constructor", ""); + return true; + } + + if (matrixInMatrix && ! type.isArray()) { + profileRequires(loc, ENoProfile, 120, nullptr, "constructing matrix from matrix"); + + // "If a matrix argument is given to a matrix constructor, + // it is a compile-time error to have any other arguments." + if (function.getParamCount() != 1) + error(loc, "matrix constructed from matrix can only have one argument", "constructor", ""); + return false; + } + + if (overFull) { + error(loc, "too many arguments", "constructor", ""); + return true; + } + + if (op == EOpConstructStruct && ! type.isArray() && (int)type.getStruct()->size() != function.getParamCount()) { + error(loc, "Number of constructor parameters does not match the number of structure fields", "constructor", ""); + return true; + } + + if ((op != EOpConstructStruct && size != 1 && size < type.computeNumComponents()) || + (op == EOpConstructStruct && size < type.computeNumComponents())) { + error(loc, "not enough data provided for construction", "constructor", ""); + return true; + } + + if (type.isCoopMat() && function.getParamCount() != 1) { + error(loc, "wrong number of arguments", "constructor", ""); + return true; + } + if (type.isCoopMat() && + !(function[0].type->isScalar() || function[0].type->isCoopMat())) { + error(loc, "Cooperative matrix constructor argument must be scalar or cooperative matrix", "constructor", ""); + return true; + } + + TIntermTyped* typed = node->getAsTyped(); + if (typed == nullptr) { + error(loc, "constructor argument does not have a type", "constructor", ""); + return true; + } + if (op != EOpConstructStruct && op != EOpConstructNonuniform && typed->getBasicType() == EbtSampler) { + error(loc, "cannot convert a sampler", "constructor", ""); + return true; + } + if (op != EOpConstructStruct && typed->isAtomic()) { + error(loc, "cannot convert an atomic_uint", "constructor", ""); + return true; + } + if (typed->getBasicType() == EbtVoid) { + error(loc, "cannot convert a void", "constructor", ""); + return true; + } + + return false; +} + +// Verify all the correct semantics for constructing a combined texture/sampler. +// Return true if the semantics are incorrect. +bool TParseContext::constructorTextureSamplerError(const TSourceLoc& loc, const TFunction& function) +{ + TString constructorName = function.getType().getBasicTypeString(); // TODO: performance: should not be making copy; interface needs to change + const char* token = constructorName.c_str(); + + // exactly two arguments needed + if (function.getParamCount() != 2) { + error(loc, "sampler-constructor requires two arguments", token, ""); + return true; + } + + // For now, not allowing arrayed constructors, the rest of this function + // is set up to allow them, if this test is removed: + if (function.getType().isArray()) { + error(loc, "sampler-constructor cannot make an array of samplers", token, ""); + return true; + } + + // first argument + // * the constructor's first argument must be a texture type + // * the dimensionality (1D, 2D, 3D, Cube, Rect, Buffer, MS, and Array) + // of the texture type must match that of the constructed sampler type + // (that is, the suffixes of the type of the first argument and the + // type of the constructor will be spelled the same way) + if (function[0].type->getBasicType() != EbtSampler || + ! function[0].type->getSampler().isTexture() || + function[0].type->isArray()) { + error(loc, "sampler-constructor first argument must be a scalar *texture* type", token, ""); + return true; + } + // simulate the first argument's impact on the result type, so it can be compared with the encapsulated operator!=() + TSampler texture = function.getType().getSampler(); + texture.setCombined(false); + texture.shadow = false; + if (texture != function[0].type->getSampler()) { + error(loc, "sampler-constructor first argument must be a *texture* type" + " matching the dimensionality and sampled type of the constructor", token, ""); + return true; + } + + // second argument + // * the constructor's second argument must be a scalar of type + // *sampler* or *samplerShadow* + if ( function[1].type->getBasicType() != EbtSampler || + ! function[1].type->getSampler().isPureSampler() || + function[1].type->isArray()) { + error(loc, "sampler-constructor second argument must be a scalar sampler or samplerShadow", token, ""); + return true; + } + + return false; +} + +// Checks to see if a void variable has been declared and raise an error message for such a case +// +// returns true in case of an error +// +bool TParseContext::voidErrorCheck(const TSourceLoc& loc, const TString& identifier, const TBasicType basicType) +{ + if (basicType == EbtVoid) { + error(loc, "illegal use of type 'void'", identifier.c_str(), ""); + return true; + } + + return false; +} + +// Checks to see if the node (for the expression) contains a scalar boolean expression or not +void TParseContext::boolCheck(const TSourceLoc& loc, const TIntermTyped* type) +{ + if (type->getBasicType() != EbtBool || type->isArray() || type->isMatrix() || type->isVector()) + error(loc, "boolean expression expected", "", ""); +} + +// This function checks to see if the node (for the expression) contains a scalar boolean expression or not +void TParseContext::boolCheck(const TSourceLoc& loc, const TPublicType& pType) +{ + if (pType.basicType != EbtBool || pType.arraySizes || pType.matrixCols > 1 || (pType.vectorSize > 1)) + error(loc, "boolean expression expected", "", ""); +} + +void TParseContext::samplerCheck(const TSourceLoc& loc, const TType& type, const TString& identifier, TIntermTyped* /*initializer*/) +{ + // Check that the appropriate extension is enabled if external sampler is used. + // There are two extensions. The correct one must be used based on GLSL version. + if (type.getBasicType() == EbtSampler && type.getSampler().isExternal()) { + if (version < 300) { + requireExtensions(loc, 1, &E_GL_OES_EGL_image_external, "samplerExternalOES"); + } else { + requireExtensions(loc, 1, &E_GL_OES_EGL_image_external_essl3, "samplerExternalOES"); + } + } + if (type.getSampler().isYuv()) { + requireExtensions(loc, 1, &E_GL_EXT_YUV_target, "__samplerExternal2DY2YEXT"); + } + + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtSampler)) + error(loc, "non-uniform struct contains a sampler or image:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtSampler && type.getQualifier().storage != EvqUniform) { + // non-uniform sampler + // not yet: okay if it has an initializer + // if (! initializer) + error(loc, "sampler/image types can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str()); + } +} + +#ifndef GLSLANG_WEB + +void TParseContext::atomicUintCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAtomicUint)) + error(loc, "non-uniform struct contains an atomic_uint:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtAtomicUint && type.getQualifier().storage != EvqUniform) + error(loc, "atomic_uints can only be used in uniform variables or function parameters:", type.getBasicTypeString().c_str(), identifier.c_str()); +} + +void TParseContext::accStructCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (type.getQualifier().storage == EvqUniform) + return; + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtAccStruct)) + error(loc, "non-uniform struct contains an accelerationStructureNV:", type.getBasicTypeString().c_str(), identifier.c_str()); + else if (type.getBasicType() == EbtAccStruct && type.getQualifier().storage != EvqUniform) + error(loc, "accelerationStructureNV can only be used in uniform variables or function parameters:", + type.getBasicTypeString().c_str(), identifier.c_str()); + +} + +#endif // GLSLANG_WEB + +void TParseContext::transparentOpaqueCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) +{ + if (parsingBuiltins) + return; + + if (type.getQualifier().storage != EvqUniform) + return; + + if (type.containsNonOpaque()) { + // Vulkan doesn't allow transparent uniforms outside of blocks + if (spvVersion.vulkan > 0) + vulkanRemoved(loc, "non-opaque uniforms outside a block"); + // OpenGL wants locations on these (unless they are getting automapped) + if (spvVersion.openGl > 0 && !type.getQualifier().hasLocation() && !intermediate.getAutoMapLocations()) + error(loc, "non-opaque uniform variables need a layout(location=L)", identifier.c_str(), ""); + } +} + +// +// Qualifier checks knowing the qualifier and that it is a member of a struct/block. +// +void TParseContext::memberQualifierCheck(glslang::TPublicType& publicType) +{ + globalQualifierFixCheck(publicType.loc, publicType.qualifier); + checkNoShaderLayouts(publicType.loc, publicType.shaderQualifiers); + if (publicType.qualifier.isNonUniform()) { + error(publicType.loc, "not allowed on block or structure members", "nonuniformEXT", ""); + publicType.qualifier.nonUniform = false; + } +} + +// +// Check/fix just a full qualifier (no variables or types yet, but qualifier is complete) at global level. +// +void TParseContext::globalQualifierFixCheck(const TSourceLoc& loc, TQualifier& qualifier) +{ + bool nonuniformOkay = false; + + // move from parameter/unknown qualifiers to pipeline in/out qualifiers + switch (qualifier.storage) { + case EvqIn: + profileRequires(loc, ENoProfile, 130, nullptr, "in for stage inputs"); + profileRequires(loc, EEsProfile, 300, nullptr, "in for stage inputs"); + qualifier.storage = EvqVaryingIn; + nonuniformOkay = true; + break; + case EvqOut: + profileRequires(loc, ENoProfile, 130, nullptr, "out for stage outputs"); + profileRequires(loc, EEsProfile, 300, nullptr, "out for stage outputs"); + qualifier.storage = EvqVaryingOut; + break; + case EvqInOut: + qualifier.storage = EvqVaryingIn; + error(loc, "cannot use 'inout' at global scope", "", ""); + break; + case EvqGlobal: + case EvqTemporary: + nonuniformOkay = true; + break; + default: + break; + } + + if (!nonuniformOkay && qualifier.isNonUniform()) + error(loc, "for non-parameter, can only apply to 'in' or no storage qualifier", "nonuniformEXT", ""); + + invariantCheck(loc, qualifier); +} + +// +// Check a full qualifier and type (no variable yet) at global level. +// +void TParseContext::globalQualifierTypeCheck(const TSourceLoc& loc, const TQualifier& qualifier, const TPublicType& publicType) +{ + if (! symbolTable.atGlobalLevel()) + return; + + if (!(publicType.userDef && publicType.userDef->isReference())) { + if (qualifier.isMemoryQualifierImageAndSSBOOnly() && ! publicType.isImage() && publicType.qualifier.storage != EvqBuffer) { + error(loc, "memory qualifiers cannot be used on this type", "", ""); + } else if (qualifier.isMemory() && (publicType.basicType != EbtSampler) && !publicType.qualifier.isUniformOrBuffer()) { + error(loc, "memory qualifiers cannot be used on this type", "", ""); + } + } + + if (qualifier.storage == EvqBuffer && + publicType.basicType != EbtBlock && + !qualifier.hasBufferReference()) + error(loc, "buffers can be declared only as blocks", "buffer", ""); + + if (qualifier.storage != EvqVaryingIn && publicType.basicType == EbtDouble && + extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) && language == EShLangVertex && + version < 400) { + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 410, E_GL_ARB_gpu_shader_fp64, "vertex-shader `double` type"); + } + if (qualifier.storage != EvqVaryingIn && qualifier.storage != EvqVaryingOut) + return; + + if (publicType.shaderQualifiers.hasBlendEquation()) + error(loc, "can only be applied to a standalone 'out'", "blend equation", ""); + + // now, knowing it is a shader in/out, do all the in/out semantic checks + + if (publicType.basicType == EbtBool && !parsingBuiltins) { + error(loc, "cannot be bool", GetStorageQualifierString(qualifier.storage), ""); + return; + } + + if (isTypeInt(publicType.basicType) || publicType.basicType == EbtDouble) + profileRequires(loc, EEsProfile, 300, nullptr, "shader input/output"); + + if (!qualifier.flat && !qualifier.isExplicitInterpolation() && !qualifier.isPervertexNV()) { + if (isTypeInt(publicType.basicType) || + publicType.basicType == EbtDouble || + (publicType.userDef && ( publicType.userDef->containsBasicType(EbtInt) + || publicType.userDef->containsBasicType(EbtUint) + || publicType.userDef->contains16BitInt() + || publicType.userDef->contains8BitInt() + || publicType.userDef->contains64BitInt() + || publicType.userDef->containsDouble()))) { + if (qualifier.storage == EvqVaryingIn && language == EShLangFragment) + error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage)); + else if (qualifier.storage == EvqVaryingOut && language == EShLangVertex && version == 300) + error(loc, "must be qualified as flat", TType::getBasicString(publicType.basicType), GetStorageQualifierString(qualifier.storage)); + } + } + + if (qualifier.isPatch() && qualifier.isInterpolation()) + error(loc, "cannot use interpolation qualifiers with patch", "patch", ""); + + if (qualifier.isTaskMemory() && publicType.basicType != EbtBlock) + error(loc, "taskNV variables can be declared only as blocks", "taskNV", ""); + + if (qualifier.storage == EvqVaryingIn) { + switch (language) { + case EShLangVertex: + if (publicType.basicType == EbtStruct) { + error(loc, "cannot be a structure or array", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (publicType.arraySizes) { + requireProfile(loc, ~EEsProfile, "vertex input arrays"); + profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays"); + } + if (publicType.basicType == EbtDouble) + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_vertex_attrib_64bit, "vertex-shader `double` type input"); + if (qualifier.isAuxiliary() || qualifier.isInterpolation() || qualifier.isMemory() || qualifier.invariant) + error(loc, "vertex input cannot be further qualified", "", ""); + break; + case EShLangFragment: + if (publicType.userDef) { + profileRequires(loc, EEsProfile, 300, nullptr, "fragment-shader struct input"); + profileRequires(loc, ~EEsProfile, 150, nullptr, "fragment-shader struct input"); + if (publicType.userDef->containsStructure()) + requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing structure"); + if (publicType.userDef->containsArray()) + requireProfile(loc, ~EEsProfile, "fragment-shader struct input containing an array"); + } + break; + case EShLangCompute: + if (! symbolTable.atBuiltInLevel()) + error(loc, "global storage input qualifier cannot be used in a compute shader", "in", ""); + break; +#ifndef GLSLANG_WEB + case EShLangTessControl: + if (qualifier.patch) + error(loc, "can only use on output in tessellation-control shader", "patch", ""); + break; +#endif + default: + break; + } + } else { + // qualifier.storage == EvqVaryingOut + switch (language) { + case EShLangVertex: + if (publicType.userDef) { + profileRequires(loc, EEsProfile, 300, nullptr, "vertex-shader struct output"); + profileRequires(loc, ~EEsProfile, 150, nullptr, "vertex-shader struct output"); + if (publicType.userDef->containsStructure()) + requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing structure"); + if (publicType.userDef->containsArray()) + requireProfile(loc, ~EEsProfile, "vertex-shader struct output containing an array"); + } + + break; + case EShLangFragment: + profileRequires(loc, EEsProfile, 300, nullptr, "fragment shader output"); + if (publicType.basicType == EbtStruct) { + error(loc, "cannot be a structure", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (publicType.matrixRows > 0) { + error(loc, "cannot be a matrix", GetStorageQualifierString(qualifier.storage), ""); + return; + } + if (qualifier.isAuxiliary()) + error(loc, "can't use auxiliary qualifier on a fragment output", "centroid/sample/patch", ""); + if (qualifier.isInterpolation()) + error(loc, "can't use interpolation qualifier on a fragment output", "flat/smooth/noperspective", ""); + if (publicType.basicType == EbtDouble || publicType.basicType == EbtInt64 || publicType.basicType == EbtUint64) + error(loc, "cannot contain a double, int64, or uint64", GetStorageQualifierString(qualifier.storage), ""); + break; + + case EShLangCompute: + error(loc, "global storage output qualifier cannot be used in a compute shader", "out", ""); + break; +#ifndef GLSLANG_WEB + case EShLangTessEvaluation: + if (qualifier.patch) + error(loc, "can only use on input in tessellation-evaluation shader", "patch", ""); + break; +#endif + default: + break; + } + } +} + +// +// Merge characteristics of the 'src' qualifier into the 'dst'. +// If there is duplication, issue error messages, unless 'force' +// is specified, which means to just override default settings. +// +// Also, when force is false, it will be assumed that 'src' follows +// 'dst', for the purpose of error checking order for versions +// that require specific orderings of qualifiers. +// +void TParseContext::mergeQualifiers(const TSourceLoc& loc, TQualifier& dst, const TQualifier& src, bool force) +{ + // Multiple auxiliary qualifiers (mostly done later by 'individual qualifiers') + if (src.isAuxiliary() && dst.isAuxiliary()) + error(loc, "can only have one auxiliary qualifier (centroid, patch, and sample)", "", ""); + + // Multiple interpolation qualifiers (mostly done later by 'individual qualifiers') + if (src.isInterpolation() && dst.isInterpolation()) + error(loc, "can only have one interpolation qualifier (flat, smooth, noperspective, __explicitInterpAMD)", "", ""); + + // Ordering + if (! force && ((!isEsProfile() && version < 420) || + (isEsProfile() && version < 310)) + && ! extensionTurnedOn(E_GL_ARB_shading_language_420pack)) { + // non-function parameters + if (src.isNoContraction() && (dst.invariant || dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "precise qualifier must appear first", "", ""); + if (src.invariant && (dst.isInterpolation() || dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "invariant qualifier must appear before interpolation, storage, and precision qualifiers ", "", ""); + else if (src.isInterpolation() && (dst.isAuxiliary() || dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "interpolation qualifiers must appear before storage and precision qualifiers", "", ""); + else if (src.isAuxiliary() && (dst.storage != EvqTemporary || dst.precision != EpqNone)) + error(loc, "Auxiliary qualifiers (centroid, patch, and sample) must appear before storage and precision qualifiers", "", ""); + else if (src.storage != EvqTemporary && (dst.precision != EpqNone)) + error(loc, "precision qualifier must appear as last qualifier", "", ""); + + // function parameters + if (src.isNoContraction() && (dst.storage == EvqConst || dst.storage == EvqIn || dst.storage == EvqOut)) + error(loc, "precise qualifier must appear first", "", ""); + if (src.storage == EvqConst && (dst.storage == EvqIn || dst.storage == EvqOut)) + error(loc, "in/out must appear before const", "", ""); + } + + // Storage qualification + if (dst.storage == EvqTemporary || dst.storage == EvqGlobal) + dst.storage = src.storage; + else if ((dst.storage == EvqIn && src.storage == EvqOut) || + (dst.storage == EvqOut && src.storage == EvqIn)) + dst.storage = EvqInOut; + else if ((dst.storage == EvqIn && src.storage == EvqConst) || + (dst.storage == EvqConst && src.storage == EvqIn)) + dst.storage = EvqConstReadOnly; + else if (src.storage != EvqTemporary && + src.storage != EvqGlobal) + error(loc, "too many storage qualifiers", GetStorageQualifierString(src.storage), ""); + + // Precision qualifiers + if (! force && src.precision != EpqNone && dst.precision != EpqNone) + error(loc, "only one precision qualifier allowed", GetPrecisionQualifierString(src.precision), ""); + if (dst.precision == EpqNone || (force && src.precision != EpqNone)) + dst.precision = src.precision; + +#ifndef GLSLANG_WEB + if (!force && ((src.coherent && (dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.devicecoherent && (dst.coherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.queuefamilycoherent && (dst.coherent || dst.devicecoherent || dst.workgroupcoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.workgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.subgroupcoherent || dst.shadercallcoherent)) || + (src.subgroupcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.shadercallcoherent)) || + (src.shadercallcoherent && (dst.coherent || dst.devicecoherent || dst.queuefamilycoherent || dst.workgroupcoherent || dst.subgroupcoherent)))) { + error(loc, "only one coherent/devicecoherent/queuefamilycoherent/workgroupcoherent/subgroupcoherent/shadercallcoherent qualifier allowed", + GetPrecisionQualifierString(src.precision), ""); + } +#endif + // Layout qualifiers + mergeObjectLayoutQualifiers(dst, src, false); + + // individual qualifiers + bool repeated = false; + #define MERGE_SINGLETON(field) repeated |= dst.field && src.field; dst.field |= src.field; + MERGE_SINGLETON(invariant); + MERGE_SINGLETON(centroid); + MERGE_SINGLETON(smooth); + MERGE_SINGLETON(flat); + MERGE_SINGLETON(specConstant); +#ifndef GLSLANG_WEB + MERGE_SINGLETON(noContraction); + MERGE_SINGLETON(nopersp); + MERGE_SINGLETON(explicitInterp); + MERGE_SINGLETON(perPrimitiveNV); + MERGE_SINGLETON(perViewNV); + MERGE_SINGLETON(perTaskNV); + MERGE_SINGLETON(patch); + MERGE_SINGLETON(sample); + MERGE_SINGLETON(coherent); + MERGE_SINGLETON(devicecoherent); + MERGE_SINGLETON(queuefamilycoherent); + MERGE_SINGLETON(workgroupcoherent); + MERGE_SINGLETON(subgroupcoherent); + MERGE_SINGLETON(shadercallcoherent); + MERGE_SINGLETON(nonprivate); + MERGE_SINGLETON(volatil); + MERGE_SINGLETON(restrict); + MERGE_SINGLETON(readonly); + MERGE_SINGLETON(writeonly); + MERGE_SINGLETON(nonUniform); +#endif + + if (repeated) + error(loc, "replicated qualifiers", "", ""); +} + +void TParseContext::setDefaultPrecision(const TSourceLoc& loc, TPublicType& publicType, TPrecisionQualifier qualifier) +{ + TBasicType basicType = publicType.basicType; + + if (basicType == EbtSampler) { + defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)] = qualifier; + + return; // all is well + } + + if (basicType == EbtInt || basicType == EbtFloat) { + if (publicType.isScalar()) { + defaultPrecision[basicType] = qualifier; + if (basicType == EbtInt) { + defaultPrecision[EbtUint] = qualifier; + precisionManager.explicitIntDefaultSeen(); + } else + precisionManager.explicitFloatDefaultSeen(); + + return; // all is well + } + } + + if (basicType == EbtAtomicUint) { + if (qualifier != EpqHigh) + error(loc, "can only apply highp to atomic_uint", "precision", ""); + + return; + } + + error(loc, "cannot apply precision statement to this type; use 'float', 'int' or a sampler type", TType::getBasicString(basicType), ""); +} + +// used to flatten the sampler type space into a single dimension +// correlates with the declaration of defaultSamplerPrecision[] +int TParseContext::computeSamplerTypeIndex(TSampler& sampler) +{ + int arrayIndex = sampler.arrayed ? 1 : 0; + int shadowIndex = sampler.shadow ? 1 : 0; + int externalIndex = sampler.isExternal() ? 1 : 0; + int imageIndex = sampler.isImageClass() ? 1 : 0; + int msIndex = sampler.isMultiSample() ? 1 : 0; + + int flattened = EsdNumDims * (EbtNumTypes * (2 * (2 * (2 * (2 * arrayIndex + msIndex) + imageIndex) + shadowIndex) + + externalIndex) + sampler.type) + sampler.dim; + assert(flattened < maxSamplerIndex); + + return flattened; +} + +TPrecisionQualifier TParseContext::getDefaultPrecision(TPublicType& publicType) +{ + if (publicType.basicType == EbtSampler) + return defaultSamplerPrecision[computeSamplerTypeIndex(publicType.sampler)]; + else + return defaultPrecision[publicType.basicType]; +} + +void TParseContext::precisionQualifierCheck(const TSourceLoc& loc, TBasicType baseType, TQualifier& qualifier) +{ + // Built-in symbols are allowed some ambiguous precisions, to be pinned down + // later by context. + if (! obeyPrecisionQualifiers() || parsingBuiltins) + return; + +#ifndef GLSLANG_WEB + if (baseType == EbtAtomicUint && qualifier.precision != EpqNone && qualifier.precision != EpqHigh) + error(loc, "atomic counters can only be highp", "atomic_uint", ""); +#endif + + if (baseType == EbtFloat || baseType == EbtUint || baseType == EbtInt || baseType == EbtSampler || baseType == EbtAtomicUint) { + if (qualifier.precision == EpqNone) { + if (relaxedErrors()) + warn(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), "substituting 'mediump'"); + else + error(loc, "type requires declaration of default precision qualifier", TType::getBasicString(baseType), ""); + qualifier.precision = EpqMedium; + defaultPrecision[baseType] = EpqMedium; + } + } else if (qualifier.precision != EpqNone) + error(loc, "type cannot have precision qualifier", TType::getBasicString(baseType), ""); +} + +void TParseContext::parameterTypeCheck(const TSourceLoc& loc, TStorageQualifier qualifier, const TType& type) +{ + if ((qualifier == EvqOut || qualifier == EvqInOut) && type.isOpaque()) + error(loc, "samplers and atomic_uints cannot be output parameters", type.getBasicTypeString().c_str(), ""); + if (!parsingBuiltins && type.contains16BitFloat()) + requireFloat16Arithmetic(loc, type.getBasicTypeString().c_str(), "float16 types can only be in uniform block or buffer storage"); + if (!parsingBuiltins && type.contains16BitInt()) + requireInt16Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int16 types can only be in uniform block or buffer storage"); + if (!parsingBuiltins && type.contains8BitInt()) + requireInt8Arithmetic(loc, type.getBasicTypeString().c_str(), "(u)int8 types can only be in uniform block or buffer storage"); +} + +bool TParseContext::containsFieldWithBasicType(const TType& type, TBasicType basicType) +{ + if (type.getBasicType() == basicType) + return true; + + if (type.getBasicType() == EbtStruct) { + const TTypeList& structure = *type.getStruct(); + for (unsigned int i = 0; i < structure.size(); ++i) { + if (containsFieldWithBasicType(*structure[i].type, basicType)) + return true; + } + } + + return false; +} + +// +// Do size checking for an array type's size. +// +void TParseContext::arraySizeCheck(const TSourceLoc& loc, TIntermTyped* expr, TArraySize& sizePair, const char *sizeType) +{ + bool isConst = false; + sizePair.node = nullptr; + + int size = 1; + + TIntermConstantUnion* constant = expr->getAsConstantUnion(); + if (constant) { + // handle true (non-specialization) constant + size = constant->getConstArray()[0].getIConst(); + isConst = true; + } else { + // see if it's a specialization constant instead + if (expr->getQualifier().isSpecConstant()) { + isConst = true; + sizePair.node = expr; + TIntermSymbol* symbol = expr->getAsSymbolNode(); + if (symbol && symbol->getConstArray().size() > 0) + size = symbol->getConstArray()[0].getIConst(); + } else if (expr->getAsUnaryNode() && + expr->getAsUnaryNode()->getOp() == glslang::EOpArrayLength && + expr->getAsUnaryNode()->getOperand()->getType().isCoopMat()) { + isConst = true; + size = 1; + sizePair.node = expr->getAsUnaryNode(); + } + } + + sizePair.size = size; + + if (! isConst || (expr->getBasicType() != EbtInt && expr->getBasicType() != EbtUint)) { + error(loc, sizeType, "", "must be a constant integer expression"); + return; + } + + if (size <= 0) { + error(loc, sizeType, "", "must be a positive integer"); + return; + } +} + +// +// See if this qualifier can be an array. +// +// Returns true if there is an error. +// +bool TParseContext::arrayQualifierError(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (qualifier.storage == EvqConst) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "const array"); + profileRequires(loc, EEsProfile, 300, nullptr, "const array"); + } + + if (qualifier.storage == EvqVaryingIn && language == EShLangVertex) { + requireProfile(loc, ~EEsProfile, "vertex input arrays"); + profileRequires(loc, ENoProfile, 150, nullptr, "vertex input arrays"); + } + + return false; +} + +// +// See if this qualifier and type combination can be an array. +// Assumes arrayQualifierError() was also called to catch the type-invariant tests. +// +// Returns true if there is an error. +// +bool TParseContext::arrayError(const TSourceLoc& loc, const TType& type) +{ + if (type.getQualifier().storage == EvqVaryingOut && language == EShLangVertex) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "vertex-shader array-of-array output"); + else if (type.isStruct()) + requireProfile(loc, ~EEsProfile, "vertex-shader array-of-struct output"); + } + if (type.getQualifier().storage == EvqVaryingIn && language == EShLangFragment) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array input"); + else if (type.isStruct()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-struct input"); + } + if (type.getQualifier().storage == EvqVaryingOut && language == EShLangFragment) { + if (type.isArrayOfArrays()) + requireProfile(loc, ~EEsProfile, "fragment-shader array-of-array output"); + } + + return false; +} + +// +// Require array to be completely sized +// +void TParseContext::arraySizeRequiredCheck(const TSourceLoc& loc, const TArraySizes& arraySizes) +{ + if (!parsingBuiltins && arraySizes.hasUnsized()) + error(loc, "array size required", "", ""); +} + +void TParseContext::structArrayCheck(const TSourceLoc& /*loc*/, const TType& type) +{ + const TTypeList& structure = *type.getStruct(); + for (int m = 0; m < (int)structure.size(); ++m) { + const TType& member = *structure[m].type; + if (member.isArray()) + arraySizeRequiredCheck(structure[m].loc, *member.getArraySizes()); + } +} + +void TParseContext::arraySizesCheck(const TSourceLoc& loc, const TQualifier& qualifier, TArraySizes* arraySizes, + const TIntermTyped* initializer, bool lastMember) +{ + assert(arraySizes); + + // always allow special built-in ins/outs sized to topologies + if (parsingBuiltins) + return; + + // initializer must be a sized array, in which case + // allow the initializer to set any unknown array sizes + if (initializer != nullptr) { + if (initializer->getType().isUnsizedArray()) + error(loc, "array initializer must be sized", "[]", ""); + return; + } + + // No environment allows any non-outer-dimension to be implicitly sized + if (arraySizes->isInnerUnsized()) { + error(loc, "only outermost dimension of an array of arrays can be implicitly sized", "[]", ""); + arraySizes->clearInnerUnsized(); + } + + if (arraySizes->isInnerSpecialization() && + (qualifier.storage != EvqTemporary && qualifier.storage != EvqGlobal && qualifier.storage != EvqShared && qualifier.storage != EvqConst)) + error(loc, "only outermost dimension of an array of arrays can be a specialization constant", "[]", ""); + +#ifndef GLSLANG_WEB + + // desktop always allows outer-dimension-unsized variable arrays, + if (!isEsProfile()) + return; + + // for ES, if size isn't coming from an initializer, it has to be explicitly declared now, + // with very few exceptions + + // implicitly-sized io exceptions: + switch (language) { + case EShLangGeometry: + if (qualifier.storage == EvqVaryingIn) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_geometry_shader, AEP_geometry_shader)) + return; + break; + case EShLangTessControl: + if ( qualifier.storage == EvqVaryingIn || + (qualifier.storage == EvqVaryingOut && ! qualifier.isPatch())) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader)) + return; + break; + case EShLangTessEvaluation: + if ((qualifier.storage == EvqVaryingIn && ! qualifier.isPatch()) || + qualifier.storage == EvqVaryingOut) + if ((isEsProfile() && version >= 320) || + extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader)) + return; + break; + case EShLangMeshNV: + if (qualifier.storage == EvqVaryingOut) + if ((isEsProfile() && version >= 320) || + extensionTurnedOn(E_GL_NV_mesh_shader)) + return; + break; + default: + break; + } + +#endif + + // last member of ssbo block exception: + if (qualifier.storage == EvqBuffer && lastMember) + return; + + arraySizeRequiredCheck(loc, *arraySizes); +} + +void TParseContext::arrayOfArrayVersionCheck(const TSourceLoc& loc, const TArraySizes* sizes) +{ + if (sizes == nullptr || sizes->getNumDims() == 1) + return; + + const char* feature = "arrays of arrays"; + + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, nullptr, feature); +} + +// +// Do all the semantic checking for declaring or redeclaring an array, with and +// without a size, and make the right changes to the symbol table. +// +void TParseContext::declareArray(const TSourceLoc& loc, const TString& identifier, const TType& type, TSymbol*& symbol) +{ + if (symbol == nullptr) { + bool currentScope; + symbol = symbolTable.find(identifier, nullptr, ¤tScope); + + if (symbol && builtInName(identifier) && ! symbolTable.atBuiltInLevel()) { + // bad shader (errors already reported) trying to redeclare a built-in name as an array + symbol = nullptr; + return; + } + if (symbol == nullptr || ! currentScope) { + // + // Successfully process a new definition. + // (Redeclarations have to take place at the same scope; otherwise they are hiding declarations) + // + symbol = new TVariable(&identifier, type); + symbolTable.insert(*symbol); + if (symbolTable.atGlobalLevel()) + trackLinkage(*symbol); + +#ifndef GLSLANG_WEB + if (! symbolTable.atBuiltInLevel()) { + if (isIoResizeArray(type)) { + ioArraySymbolResizeList.push_back(symbol); + checkIoArraysConsistency(loc, true); + } else + fixIoArraySize(loc, symbol->getWritableType()); + } +#endif + + return; + } + if (symbol->getAsAnonMember()) { + error(loc, "cannot redeclare a user-block member array", identifier.c_str(), ""); + symbol = nullptr; + return; + } + } + + // + // Process a redeclaration. + // + + if (symbol == nullptr) { + error(loc, "array variable name expected", identifier.c_str(), ""); + return; + } + + // redeclareBuiltinVariable() should have already done the copyUp() + TType& existingType = symbol->getWritableType(); + + if (! existingType.isArray()) { + error(loc, "redeclaring non-array as array", identifier.c_str(), ""); + return; + } + + if (! existingType.sameElementType(type)) { + error(loc, "redeclaration of array with a different element type", identifier.c_str(), ""); + return; + } + + if (! existingType.sameInnerArrayness(type)) { + error(loc, "redeclaration of array with a different array dimensions or sizes", identifier.c_str(), ""); + return; + } + +#ifndef GLSLANG_WEB + if (existingType.isSizedArray()) { + // be more leniant for input arrays to geometry shaders and tessellation control outputs, where the redeclaration is the same size + if (! (isIoResizeArray(type) && existingType.getOuterArraySize() == type.getOuterArraySize())) + error(loc, "redeclaration of array with size", identifier.c_str(), ""); + return; + } + + arrayLimitCheck(loc, identifier, type.getOuterArraySize()); + + existingType.updateArraySizes(type); + + if (isIoResizeArray(type)) + checkIoArraysConsistency(loc); +#endif +} + +#ifndef GLSLANG_WEB + +// Policy and error check for needing a runtime sized array. +void TParseContext::checkRuntimeSizable(const TSourceLoc& loc, const TIntermTyped& base) +{ + // runtime length implies runtime sizeable, so no problem + if (isRuntimeLength(base)) + return; + + // Check for last member of a bufferreference type, which is runtime sizeable + // but doesn't support runtime length + if (base.getType().getQualifier().storage == EvqBuffer) { + const TIntermBinary* binary = base.getAsBinaryNode(); + if (binary != nullptr && + binary->getOp() == EOpIndexDirectStruct && + binary->getLeft()->isReference()) { + + const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + const int memberCount = (int)binary->getLeft()->getType().getReferentType()->getStruct()->size(); + if (index == memberCount - 1) + return; + } + } + + // check for additional things allowed by GL_EXT_nonuniform_qualifier + if (base.getBasicType() == EbtSampler || base.getBasicType() == EbtAccStruct || base.getBasicType() == EbtRayQuery || + (base.getBasicType() == EbtBlock && base.getType().getQualifier().isUniformOrBuffer())) + requireExtensions(loc, 1, &E_GL_EXT_nonuniform_qualifier, "variable index"); + else + error(loc, "", "[", "array must be redeclared with a size before being indexed with a variable"); +} + +// Policy decision for whether a run-time .length() is allowed. +bool TParseContext::isRuntimeLength(const TIntermTyped& base) const +{ + if (base.getType().getQualifier().storage == EvqBuffer) { + // in a buffer block + const TIntermBinary* binary = base.getAsBinaryNode(); + if (binary != nullptr && binary->getOp() == EOpIndexDirectStruct) { + // is it the last member? + const int index = binary->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + + if (binary->getLeft()->isReference()) + return false; + + const int memberCount = (int)binary->getLeft()->getType().getStruct()->size(); + if (index == memberCount - 1) + return true; + } + } + + return false; +} + +// Check if mesh perviewNV attributes have a view dimension +// and resize it to gl_MaxMeshViewCountNV when implicitly sized. +void TParseContext::checkAndResizeMeshViewDim(const TSourceLoc& loc, TType& type, bool isBlockMember) +{ + // see if member is a per-view attribute + if (!type.getQualifier().isPerView()) + return; + + if ((isBlockMember && type.isArray()) || (!isBlockMember && type.isArrayOfArrays())) { + // since we don't have the maxMeshViewCountNV set during parsing builtins, we hardcode the value. + int maxViewCount = parsingBuiltins ? 4 : resources.maxMeshViewCountNV; + // For block members, outermost array dimension is the view dimension. + // For non-block members, outermost array dimension is the vertex/primitive dimension + // and 2nd outermost is the view dimension. + int viewDim = isBlockMember ? 0 : 1; + int viewDimSize = type.getArraySizes()->getDimSize(viewDim); + + if (viewDimSize != UnsizedArraySize && viewDimSize != maxViewCount) + error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", ""); + else if (viewDimSize == UnsizedArraySize) + type.getArraySizes()->setDimSize(viewDim, maxViewCount); + } + else { + error(loc, "requires a view array dimension", "perviewNV", ""); + } +} + +#endif // GLSLANG_WEB + +// Returns true if the first argument to the #line directive is the line number for the next line. +// +// Desktop, pre-version 3.30: "After processing this directive +// (including its new-line), the implementation will behave as if it is compiling at line number line+1 and +// source string number source-string-number." +// +// Desktop, version 3.30 and later, and ES: "After processing this directive +// (including its new-line), the implementation will behave as if it is compiling at line number line and +// source string number source-string-number. +bool TParseContext::lineDirectiveShouldSetNextLine() const +{ + return isEsProfile() || version >= 330; +} + +// +// Enforce non-initializer type/qualifier rules. +// +void TParseContext::nonInitConstCheck(const TSourceLoc& loc, TString& identifier, TType& type) +{ + // + // Make the qualifier make sense, given that there is not an initializer. + // + if (type.getQualifier().storage == EvqConst || + type.getQualifier().storage == EvqConstReadOnly) { + type.getQualifier().makeTemporary(); + error(loc, "variables with qualifier 'const' must be initialized", identifier.c_str(), ""); + } +} + +// +// See if the identifier is a built-in symbol that can be redeclared, and if so, +// copy the symbol table's read-only built-in variable to the current +// global level, where it can be modified based on the passed in type. +// +// Returns nullptr if no redeclaration took place; meaning a normal declaration still +// needs to occur for it, not necessarily an error. +// +// Returns a redeclared and type-modified variable if a redeclarated occurred. +// +TSymbol* TParseContext::redeclareBuiltinVariable(const TSourceLoc& loc, const TString& identifier, + const TQualifier& qualifier, const TShaderQualifiers& publicType) +{ +#ifndef GLSLANG_WEB + if (! builtInName(identifier) || symbolTable.atBuiltInLevel() || ! symbolTable.atGlobalLevel()) + return nullptr; + + bool nonEsRedecls = (!isEsProfile() && (version >= 130 || identifier == "gl_TexCoord")); + bool esRedecls = (isEsProfile() && + (version >= 320 || extensionsTurnedOn(Num_AEP_shader_io_blocks, AEP_shader_io_blocks))); + if (! esRedecls && ! nonEsRedecls) + return nullptr; + + // Special case when using GL_ARB_separate_shader_objects + bool ssoPre150 = false; // means the only reason this variable is redeclared is due to this combination + if (!isEsProfile() && version <= 140 && extensionTurnedOn(E_GL_ARB_separate_shader_objects)) { + if (identifier == "gl_Position" || + identifier == "gl_PointSize" || + identifier == "gl_ClipVertex" || + identifier == "gl_FogFragCoord") + ssoPre150 = true; + } + + // Potentially redeclaring a built-in variable... + + if (ssoPre150 || + (identifier == "gl_FragDepth" && ((nonEsRedecls && version >= 420) || esRedecls)) || + (identifier == "gl_FragCoord" && ((nonEsRedecls && version >= 150) || esRedecls)) || + identifier == "gl_ClipDistance" || + identifier == "gl_CullDistance" || + identifier == "gl_FrontColor" || + identifier == "gl_BackColor" || + identifier == "gl_FrontSecondaryColor" || + identifier == "gl_BackSecondaryColor" || + identifier == "gl_SecondaryColor" || + (identifier == "gl_Color" && language == EShLangFragment) || + (identifier == "gl_FragStencilRefARB" && (nonEsRedecls && version >= 140) + && language == EShLangFragment) || + identifier == "gl_SampleMask" || + identifier == "gl_Layer" || + identifier == "gl_PrimitiveIndicesNV" || + identifier == "gl_TexCoord") { + + // Find the existing symbol, if any. + bool builtIn; + TSymbol* symbol = symbolTable.find(identifier, &builtIn); + + // If the symbol was not found, this must be a version/profile/stage + // that doesn't have it. + if (! symbol) + return nullptr; + + // If it wasn't at a built-in level, then it's already been redeclared; + // that is, this is a redeclaration of a redeclaration; reuse that initial + // redeclaration. Otherwise, make the new one. + if (builtIn) + makeEditable(symbol); + + // Now, modify the type of the copy, as per the type of the current redeclaration. + + TQualifier& symbolQualifier = symbol->getWritableType().getQualifier(); + if (ssoPre150) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot redeclare after use", identifier.c_str(), ""); + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.isMemory() || qualifier.isAuxiliary() || (language == EShLangVertex && qualifier.storage != EvqVaryingOut) || + (language == EShLangFragment && qualifier.storage != EvqVaryingIn)) + error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str()); + if (! qualifier.smooth) + error(loc, "cannot change interpolation qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_FrontColor" || + identifier == "gl_BackColor" || + identifier == "gl_FrontSecondaryColor" || + identifier == "gl_BackSecondaryColor" || + identifier == "gl_SecondaryColor" || + identifier == "gl_Color") { + symbolQualifier.flat = qualifier.flat; + symbolQualifier.smooth = qualifier.smooth; + symbolQualifier.nopersp = qualifier.nopersp; + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.isMemory() || qualifier.isAuxiliary() || symbol->getType().getQualifier().storage != qualifier.storage) + error(loc, "cannot change storage, memory, or auxiliary qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_TexCoord" || + identifier == "gl_ClipDistance" || + identifier == "gl_CullDistance") { + if (qualifier.hasLayout() || qualifier.isMemory() || qualifier.isAuxiliary() || + qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + symbolQualifier.storage != qualifier.storage) + error(loc, "cannot change qualification of", "redeclaration", symbol->getName().c_str()); + } else if (identifier == "gl_FragCoord") { + if (intermediate.inIoAccessed("gl_FragCoord")) + error(loc, "cannot redeclare after use", "gl_FragCoord", ""); + if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + qualifier.isMemory() || qualifier.isAuxiliary()) + error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingIn) + error(loc, "cannot change input storage qualification of", "redeclaration", symbol->getName().c_str()); + if (! builtIn && (publicType.pixelCenterInteger != intermediate.getPixelCenterInteger() || + publicType.originUpperLeft != intermediate.getOriginUpperLeft())) + error(loc, "cannot redeclare with different qualification:", "redeclaration", symbol->getName().c_str()); + if (publicType.pixelCenterInteger) + intermediate.setPixelCenterInteger(); + if (publicType.originUpperLeft) + intermediate.setOriginUpperLeft(); + } else if (identifier == "gl_FragDepth") { + if (qualifier.nopersp != symbolQualifier.nopersp || qualifier.flat != symbolQualifier.flat || + qualifier.isMemory() || qualifier.isAuxiliary()) + error(loc, "can only change layout qualification of", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingOut) + error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str()); + if (publicType.layoutDepth != EldNone) { + if (intermediate.inIoAccessed("gl_FragDepth")) + error(loc, "cannot redeclare after use", "gl_FragDepth", ""); + if (! intermediate.setDepth(publicType.layoutDepth)) + error(loc, "all redeclarations must use the same depth layout on", "redeclaration", symbol->getName().c_str()); + } + } + else if ( + identifier == "gl_PrimitiveIndicesNV" || + identifier == "gl_FragStencilRefARB") { + if (qualifier.hasLayout()) + error(loc, "cannot apply layout qualifier to", "redeclaration", symbol->getName().c_str()); + if (qualifier.storage != EvqVaryingOut) + error(loc, "cannot change output storage qualification of", "redeclaration", symbol->getName().c_str()); + } + else if (identifier == "gl_SampleMask") { + if (!publicType.layoutOverrideCoverage) { + error(loc, "redeclaration only allowed for override_coverage layout", "redeclaration", symbol->getName().c_str()); + } + intermediate.setLayoutOverrideCoverage(); + } + else if (identifier == "gl_Layer") { + if (!qualifier.layoutViewportRelative && qualifier.layoutSecondaryViewportRelativeOffset == -2048) + error(loc, "redeclaration only allowed for viewport_relative or secondary_view_offset layout", "redeclaration", symbol->getName().c_str()); + symbolQualifier.layoutViewportRelative = qualifier.layoutViewportRelative; + symbolQualifier.layoutSecondaryViewportRelativeOffset = qualifier.layoutSecondaryViewportRelativeOffset; + } + + // TODO: semantics quality: separate smooth from nothing declared, then use IsInterpolation for several tests above + + return symbol; + } +#endif + + return nullptr; +} + +// +// Either redeclare the requested block, or give an error message why it can't be done. +// +// TODO: functionality: explicitly sizing members of redeclared blocks is not giving them an explicit size +void TParseContext::redeclareBuiltinBlock(const TSourceLoc& loc, TTypeList& newTypeList, const TString& blockName, + const TString* instanceName, TArraySizes* arraySizes) +{ +#ifndef GLSLANG_WEB + const char* feature = "built-in block redeclaration"; + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature); + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + + if (blockName != "gl_PerVertex" && blockName != "gl_PerFragment" && + blockName != "gl_MeshPerVertexNV" && blockName != "gl_MeshPerPrimitiveNV") { + error(loc, "cannot redeclare block: ", "block declaration", blockName.c_str()); + return; + } + + // Redeclaring a built-in block... + + if (instanceName && ! builtInName(*instanceName)) { + error(loc, "cannot redeclare a built-in block with a user name", instanceName->c_str(), ""); + return; + } + + // Blocks with instance names are easy to find, lookup the instance name, + // Anonymous blocks need to be found via a member. + bool builtIn; + TSymbol* block; + if (instanceName) + block = symbolTable.find(*instanceName, &builtIn); + else + block = symbolTable.find(newTypeList.front().type->getFieldName(), &builtIn); + + // If the block was not found, this must be a version/profile/stage + // that doesn't have it, or the instance name is wrong. + const char* errorName = instanceName ? instanceName->c_str() : newTypeList.front().type->getFieldName().c_str(); + if (! block) { + error(loc, "no declaration found for redeclaration", errorName, ""); + return; + } + // Built-in blocks cannot be redeclared more than once, which if happened, + // we'd be finding the already redeclared one here, rather than the built in. + if (! builtIn) { + error(loc, "can only redeclare a built-in block once, and before any use", blockName.c_str(), ""); + return; + } + + // Copy the block to make a writable version, to insert into the block table after editing. + block = symbolTable.copyUpDeferredInsert(block); + + if (block->getType().getBasicType() != EbtBlock) { + error(loc, "cannot redeclare a non block as a block", errorName, ""); + return; + } + + // Fix XFB stuff up, it applies to the order of the redeclaration, not + // the order of the original members. + if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) { + if (!currentBlockQualifier.hasXfbBuffer()) + currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + if (!currentBlockQualifier.hasStream()) + currentBlockQualifier.layoutStream = globalOutputDefaults.layoutStream; + fixXfbOffsets(currentBlockQualifier, newTypeList); + } + + // Edit and error check the container against the redeclaration + // - remove unused members + // - ensure remaining qualifiers/types match + + TType& type = block->getWritableType(); + + // if gl_PerVertex is redeclared for the purpose of passing through "gl_Position" + // for passthrough purpose, the redeclared block should have the same qualifers as + // the current one + if (currentBlockQualifier.layoutPassthrough) { + type.getQualifier().layoutPassthrough = currentBlockQualifier.layoutPassthrough; + type.getQualifier().storage = currentBlockQualifier.storage; + type.getQualifier().layoutStream = currentBlockQualifier.layoutStream; + type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + } + + TTypeList::iterator member = type.getWritableStruct()->begin(); + size_t numOriginalMembersFound = 0; + while (member != type.getStruct()->end()) { + // look for match + bool found = false; + TTypeList::const_iterator newMember; + TSourceLoc memberLoc; + memberLoc.init(); + for (newMember = newTypeList.begin(); newMember != newTypeList.end(); ++newMember) { + if (member->type->getFieldName() == newMember->type->getFieldName()) { + found = true; + memberLoc = newMember->loc; + break; + } + } + + if (found) { + ++numOriginalMembersFound; + // - ensure match between redeclared members' types + // - check for things that can't be changed + // - update things that can be changed + TType& oldType = *member->type; + const TType& newType = *newMember->type; + if (! newType.sameElementType(oldType)) + error(memberLoc, "cannot redeclare block member with a different type", member->type->getFieldName().c_str(), ""); + if (oldType.isArray() != newType.isArray()) + error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && ! oldType.sameArrayness(newType) && oldType.isSizedArray()) + error(memberLoc, "cannot change array size of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && newType.isArray()) + arrayLimitCheck(loc, member->type->getFieldName(), newType.getOuterArraySize()); + if (oldType.getQualifier().isPerView() && ! newType.getQualifier().isPerView()) + error(memberLoc, "missing perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerView() && newType.getQualifier().isPerView()) + error(memberLoc, "cannot add perviewNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (newType.getQualifier().isPerView()) { + if (oldType.getArraySizes()->getNumDims() != newType.getArraySizes()->getNumDims()) + error(memberLoc, "cannot change arrayness of redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! newType.isUnsizedArray() && newType.getOuterArraySize() != resources.maxMeshViewCountNV) + error(loc, "mesh view output array size must be gl_MaxMeshViewCountNV or implicitly sized", "[]", ""); + else if (newType.getArraySizes()->getNumDims() == 2) { + int innerDimSize = newType.getArraySizes()->getDimSize(1); + arrayLimitCheck(memberLoc, member->type->getFieldName(), innerDimSize); + oldType.getArraySizes()->setDimSize(1, innerDimSize); + } + } + if (oldType.getQualifier().isPerPrimitive() && ! newType.getQualifier().isPerPrimitive()) + error(memberLoc, "missing perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + else if (! oldType.getQualifier().isPerPrimitive() && newType.getQualifier().isPerPrimitive()) + error(memberLoc, "cannot add perprimitiveNV qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().isMemory()) + error(memberLoc, "cannot add memory qualifier to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().hasNonXfbLayout()) + error(memberLoc, "cannot add non-XFB layout to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().patch) + error(memberLoc, "cannot add patch to redeclared block member", member->type->getFieldName().c_str(), ""); + if (newType.getQualifier().hasXfbBuffer() && + newType.getQualifier().layoutXfbBuffer != currentBlockQualifier.layoutXfbBuffer) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", ""); + if (newType.getQualifier().hasStream() && + newType.getQualifier().layoutStream != currentBlockQualifier.layoutStream) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_stream", ""); + oldType.getQualifier().centroid = newType.getQualifier().centroid; + oldType.getQualifier().sample = newType.getQualifier().sample; + oldType.getQualifier().invariant = newType.getQualifier().invariant; + oldType.getQualifier().noContraction = newType.getQualifier().noContraction; + oldType.getQualifier().smooth = newType.getQualifier().smooth; + oldType.getQualifier().flat = newType.getQualifier().flat; + oldType.getQualifier().nopersp = newType.getQualifier().nopersp; + oldType.getQualifier().layoutXfbOffset = newType.getQualifier().layoutXfbOffset; + oldType.getQualifier().layoutXfbBuffer = newType.getQualifier().layoutXfbBuffer; + oldType.getQualifier().layoutXfbStride = newType.getQualifier().layoutXfbStride; + if (oldType.getQualifier().layoutXfbOffset != TQualifier::layoutXfbBufferEnd) { + // If any member has an xfb_offset, then the block's xfb_buffer inherents current xfb_buffer, + // and for xfb processing, the member needs it as well, along with xfb_stride. + type.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + oldType.getQualifier().layoutXfbBuffer = currentBlockQualifier.layoutXfbBuffer; + } + if (oldType.isUnsizedArray() && newType.isSizedArray()) + oldType.changeOuterArraySize(newType.getOuterArraySize()); + + // check and process the member's type, which will include managing xfb information + layoutTypeCheck(loc, oldType); + + // go to next member + ++member; + } else { + // For missing members of anonymous blocks that have been redeclared, + // hide the original (shared) declaration. + // Instance-named blocks can just have the member removed. + if (instanceName) + member = type.getWritableStruct()->erase(member); + else { + member->type->hideMember(); + ++member; + } + } + } + + if (spvVersion.vulkan > 0) { + // ...then streams apply to built-in blocks, instead of them being only on stream 0 + type.getQualifier().layoutStream = currentBlockQualifier.layoutStream; + } + + if (numOriginalMembersFound < newTypeList.size()) + error(loc, "block redeclaration has extra members", blockName.c_str(), ""); + if (type.isArray() != (arraySizes != nullptr) || + (type.isArray() && arraySizes != nullptr && type.getArraySizes()->getNumDims() != arraySizes->getNumDims())) + error(loc, "cannot change arrayness of redeclared block", blockName.c_str(), ""); + else if (type.isArray()) { + // At this point, we know both are arrays and both have the same number of dimensions. + + // It is okay for a built-in block redeclaration to be unsized, and keep the size of the + // original block declaration. + if (!arraySizes->isSized() && type.isSizedArray()) + arraySizes->changeOuterSize(type.getOuterArraySize()); + + // And, okay to be giving a size to the array, by the redeclaration + if (!type.isSizedArray() && arraySizes->isSized()) + type.changeOuterArraySize(arraySizes->getOuterSize()); + + // Now, they must match in all dimensions. + if (type.isSizedArray() && *type.getArraySizes() != *arraySizes) + error(loc, "cannot change array size of redeclared block", blockName.c_str(), ""); + } + + symbolTable.insert(*block); + + // Check for general layout qualifier errors + layoutObjectCheck(loc, *block); + + // Tracking for implicit sizing of array + if (isIoResizeArray(block->getType())) { + ioArraySymbolResizeList.push_back(block); + checkIoArraysConsistency(loc, true); + } else if (block->getType().isArray()) + fixIoArraySize(loc, block->getWritableType()); + + // Save it in the AST for linker use. + trackLinkage(*block); +#endif // GLSLANG_WEB +} + +void TParseContext::paramCheckFixStorage(const TSourceLoc& loc, const TStorageQualifier& qualifier, TType& type) +{ + switch (qualifier) { + case EvqConst: + case EvqConstReadOnly: + type.getQualifier().storage = EvqConstReadOnly; + break; + case EvqIn: + case EvqOut: + case EvqInOut: + type.getQualifier().storage = qualifier; + break; + case EvqGlobal: + case EvqTemporary: + type.getQualifier().storage = EvqIn; + break; + default: + type.getQualifier().storage = EvqIn; + error(loc, "storage qualifier not allowed on function parameter", GetStorageQualifierString(qualifier), ""); + break; + } +} + +void TParseContext::paramCheckFix(const TSourceLoc& loc, const TQualifier& qualifier, TType& type) +{ +#ifndef GLSLANG_WEB + if (qualifier.isMemory()) { + type.getQualifier().volatil = qualifier.volatil; + type.getQualifier().coherent = qualifier.coherent; + type.getQualifier().devicecoherent = qualifier.devicecoherent ; + type.getQualifier().queuefamilycoherent = qualifier.queuefamilycoherent; + type.getQualifier().workgroupcoherent = qualifier.workgroupcoherent; + type.getQualifier().subgroupcoherent = qualifier.subgroupcoherent; + type.getQualifier().shadercallcoherent = qualifier.shadercallcoherent; + type.getQualifier().nonprivate = qualifier.nonprivate; + type.getQualifier().readonly = qualifier.readonly; + type.getQualifier().writeonly = qualifier.writeonly; + type.getQualifier().restrict = qualifier.restrict; + } +#endif + + if (qualifier.isAuxiliary() || + qualifier.isInterpolation()) + error(loc, "cannot use auxiliary or interpolation qualifiers on a function parameter", "", ""); + if (qualifier.hasLayout()) + error(loc, "cannot use layout qualifiers on a function parameter", "", ""); + if (qualifier.invariant) + error(loc, "cannot use invariant qualifier on a function parameter", "", ""); + if (qualifier.isNoContraction()) { + if (qualifier.isParamOutput()) + type.getQualifier().setNoContraction(); + else + warn(loc, "qualifier has no effect on non-output parameters", "precise", ""); + } + if (qualifier.isNonUniform()) + type.getQualifier().nonUniform = qualifier.nonUniform; + + paramCheckFixStorage(loc, qualifier.storage, type); +} + +void TParseContext::nestedBlockCheck(const TSourceLoc& loc) +{ + if (structNestingLevel > 0) + error(loc, "cannot nest a block definition inside a structure or block", "", ""); + ++structNestingLevel; +} + +void TParseContext::nestedStructCheck(const TSourceLoc& loc) +{ + if (structNestingLevel > 0) + error(loc, "cannot nest a structure definition inside a structure or block", "", ""); + ++structNestingLevel; +} + +void TParseContext::arrayObjectCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + // Some versions don't allow comparing arrays or structures containing arrays + if (type.containsArray()) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, op); + profileRequires(loc, EEsProfile, 300, nullptr, op); + } +} + +void TParseContext::opaqueCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + if (containsFieldWithBasicType(type, EbtSampler)) + error(loc, "can't use with samplers or structs containing samplers", op, ""); +} + +void TParseContext::referenceCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ +#ifndef GLSLANG_WEB + if (containsFieldWithBasicType(type, EbtReference)) + error(loc, "can't use with reference types", op, ""); +#endif +} + +void TParseContext::storage16BitAssignmentCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ +#ifndef GLSLANG_WEB + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtFloat16)) + requireFloat16Arithmetic(loc, op, "can't use with structs containing float16"); + + if (type.isArray() && type.getBasicType() == EbtFloat16) + requireFloat16Arithmetic(loc, op, "can't use with arrays containing float16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt16)) + requireInt16Arithmetic(loc, op, "can't use with structs containing int16"); + + if (type.isArray() && type.getBasicType() == EbtInt16) + requireInt16Arithmetic(loc, op, "can't use with arrays containing int16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint16)) + requireInt16Arithmetic(loc, op, "can't use with structs containing uint16"); + + if (type.isArray() && type.getBasicType() == EbtUint16) + requireInt16Arithmetic(loc, op, "can't use with arrays containing uint16"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtInt8)) + requireInt8Arithmetic(loc, op, "can't use with structs containing int8"); + + if (type.isArray() && type.getBasicType() == EbtInt8) + requireInt8Arithmetic(loc, op, "can't use with arrays containing int8"); + + if (type.getBasicType() == EbtStruct && containsFieldWithBasicType(type, EbtUint8)) + requireInt8Arithmetic(loc, op, "can't use with structs containing uint8"); + + if (type.isArray() && type.getBasicType() == EbtUint8) + requireInt8Arithmetic(loc, op, "can't use with arrays containing uint8"); +#endif +} + +void TParseContext::specializationCheck(const TSourceLoc& loc, const TType& type, const char* op) +{ + if (type.containsSpecializationSize()) + error(loc, "can't use with types containing arrays sized with a specialization constant", op, ""); +} + +void TParseContext::structTypeCheck(const TSourceLoc& /*loc*/, TPublicType& publicType) +{ + const TTypeList& typeList = *publicType.userDef->getStruct(); + + // fix and check for member storage qualifiers and types that don't belong within a structure + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + if (memberQualifier.isAuxiliary() || + memberQualifier.isInterpolation() || + (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal)) + error(memberLoc, "cannot use storage or interpolation qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.isMemory()) + error(memberLoc, "cannot use memory qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.hasLayout()) { + error(memberLoc, "cannot use layout qualifiers on structure members", typeList[member].type->getFieldName().c_str(), ""); + memberQualifier.clearLayout(); + } + if (memberQualifier.invariant) + error(memberLoc, "cannot use invariant qualifier on structure members", typeList[member].type->getFieldName().c_str(), ""); + } +} + +// +// See if this loop satisfies the limitations for ES 2.0 (version 100) for loops in Appendex A: +// +// "The loop index has type int or float. +// +// "The for statement has the form: +// for ( init-declaration ; condition ; expression ) +// init-declaration has the form: type-specifier identifier = constant-expression +// condition has the form: loop-index relational_operator constant-expression +// where relational_operator is one of: > >= < <= == or != +// expression [sic] has one of the following forms: +// loop-index++ +// loop-index-- +// loop-index += constant-expression +// loop-index -= constant-expression +// +// The body is handled in an AST traversal. +// +void TParseContext::inductiveLoopCheck(const TSourceLoc& loc, TIntermNode* init, TIntermLoop* loop) +{ +#ifndef GLSLANG_WEB + // loop index init must exist and be a declaration, which shows up in the AST as an aggregate of size 1 of the declaration + bool badInit = false; + if (! init || ! init->getAsAggregate() || init->getAsAggregate()->getSequence().size() != 1) + badInit = true; + TIntermBinary* binaryInit = 0; + if (! badInit) { + // get the declaration assignment + binaryInit = init->getAsAggregate()->getSequence()[0]->getAsBinaryNode(); + if (! binaryInit) + badInit = true; + } + if (badInit) { + error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", ""); + return; + } + + // loop index must be type int or float + if (! binaryInit->getType().isScalar() || (binaryInit->getBasicType() != EbtInt && binaryInit->getBasicType() != EbtFloat)) { + error(loc, "inductive loop requires a scalar 'int' or 'float' loop index", "limitations", ""); + return; + } + + // init is the form "loop-index = constant" + if (binaryInit->getOp() != EOpAssign || ! binaryInit->getLeft()->getAsSymbolNode() || ! binaryInit->getRight()->getAsConstantUnion()) { + error(loc, "inductive-loop init-declaration requires the form \"type-specifier loop-index = constant-expression\"", "limitations", ""); + return; + } + + // get the unique id of the loop index + int loopIndex = binaryInit->getLeft()->getAsSymbolNode()->getId(); + inductiveLoopIds.insert(loopIndex); + + // condition's form must be "loop-index relational-operator constant-expression" + bool badCond = ! loop->getTest(); + if (! badCond) { + TIntermBinary* binaryCond = loop->getTest()->getAsBinaryNode(); + badCond = ! binaryCond; + if (! badCond) { + switch (binaryCond->getOp()) { + case EOpGreaterThan: + case EOpGreaterThanEqual: + case EOpLessThan: + case EOpLessThanEqual: + case EOpEqual: + case EOpNotEqual: + break; + default: + badCond = true; + } + } + if (binaryCond && (! binaryCond->getLeft()->getAsSymbolNode() || + binaryCond->getLeft()->getAsSymbolNode()->getId() != loopIndex || + ! binaryCond->getRight()->getAsConstantUnion())) + badCond = true; + } + if (badCond) { + error(loc, "inductive-loop condition requires the form \"loop-index constant-expression\"", "limitations", ""); + return; + } + + // loop-index++ + // loop-index-- + // loop-index += constant-expression + // loop-index -= constant-expression + bool badTerminal = ! loop->getTerminal(); + if (! badTerminal) { + TIntermUnary* unaryTerminal = loop->getTerminal()->getAsUnaryNode(); + TIntermBinary* binaryTerminal = loop->getTerminal()->getAsBinaryNode(); + if (unaryTerminal || binaryTerminal) { + switch(loop->getTerminal()->getAsOperator()->getOp()) { + case EOpPostDecrement: + case EOpPostIncrement: + case EOpAddAssign: + case EOpSubAssign: + break; + default: + badTerminal = true; + } + } else + badTerminal = true; + if (binaryTerminal && (! binaryTerminal->getLeft()->getAsSymbolNode() || + binaryTerminal->getLeft()->getAsSymbolNode()->getId() != loopIndex || + ! binaryTerminal->getRight()->getAsConstantUnion())) + badTerminal = true; + if (unaryTerminal && (! unaryTerminal->getOperand()->getAsSymbolNode() || + unaryTerminal->getOperand()->getAsSymbolNode()->getId() != loopIndex)) + badTerminal = true; + } + if (badTerminal) { + error(loc, "inductive-loop termination requires the form \"loop-index++, loop-index--, loop-index += constant-expression, or loop-index -= constant-expression\"", "limitations", ""); + return; + } + + // the body + inductiveLoopBodyCheck(loop->getBody(), loopIndex, symbolTable); +#endif +} + +#ifndef GLSLANG_WEB +// Do limit checks for built-in arrays. +void TParseContext::arrayLimitCheck(const TSourceLoc& loc, const TString& identifier, int size) +{ + if (identifier.compare("gl_TexCoord") == 0) + limitCheck(loc, size, "gl_MaxTextureCoords", "gl_TexCoord array size"); + else if (identifier.compare("gl_ClipDistance") == 0) + limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistance array size"); + else if (identifier.compare("gl_CullDistance") == 0) + limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistance array size"); + else if (identifier.compare("gl_ClipDistancePerViewNV") == 0) + limitCheck(loc, size, "gl_MaxClipDistances", "gl_ClipDistancePerViewNV array size"); + else if (identifier.compare("gl_CullDistancePerViewNV") == 0) + limitCheck(loc, size, "gl_MaxCullDistances", "gl_CullDistancePerViewNV array size"); +} +#endif // GLSLANG_WEB + +// See if the provided value is less than or equal to the symbol indicated by limit, +// which should be a constant in the symbol table. +void TParseContext::limitCheck(const TSourceLoc& loc, int value, const char* limit, const char* feature) +{ + TSymbol* symbol = symbolTable.find(limit); + assert(symbol->getAsVariable()); + const TConstUnionArray& constArray = symbol->getAsVariable()->getConstArray(); + assert(! constArray.empty()); + if (value > constArray[0].getIConst()) + error(loc, "must be less than or equal to", feature, "%s (%d)", limit, constArray[0].getIConst()); +} + +#ifndef GLSLANG_WEB + +// +// Do any additional error checking, etc., once we know the parsing is done. +// +void TParseContext::finish() +{ + TParseContextBase::finish(); + + if (parsingBuiltins) + return; + + // Check on array indexes for ES 2.0 (version 100) limitations. + for (size_t i = 0; i < needsIndexLimitationChecking.size(); ++i) + constantIndexExpressionCheck(needsIndexLimitationChecking[i]); + + // Check for stages that are enabled by extension. + // Can't do this at the beginning, it is chicken and egg to add a stage by + // extension. + // Stage-specific features were correctly tested for already, this is just + // about the stage itself. + switch (language) { + case EShLangGeometry: + if (isEsProfile() && version == 310) + requireExtensions(getCurrentLoc(), Num_AEP_geometry_shader, AEP_geometry_shader, "geometry shaders"); + break; + case EShLangTessControl: + case EShLangTessEvaluation: + if (isEsProfile() && version == 310) + requireExtensions(getCurrentLoc(), Num_AEP_tessellation_shader, AEP_tessellation_shader, "tessellation shaders"); + else if (!isEsProfile() && version < 400) + requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_tessellation_shader, "tessellation shaders"); + break; + case EShLangCompute: + if (!isEsProfile() && version < 430) + requireExtensions(getCurrentLoc(), 1, &E_GL_ARB_compute_shader, "compute shaders"); + break; + case EShLangTaskNV: + requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "task shaders"); + break; + case EShLangMeshNV: + requireExtensions(getCurrentLoc(), 1, &E_GL_NV_mesh_shader, "mesh shaders"); + break; + default: + break; + } + + // Set default outputs for GL_NV_geometry_shader_passthrough + if (language == EShLangGeometry && extensionTurnedOn(E_SPV_NV_geometry_shader_passthrough)) { + if (intermediate.getOutputPrimitive() == ElgNone) { + switch (intermediate.getInputPrimitive()) { + case ElgPoints: intermediate.setOutputPrimitive(ElgPoints); break; + case ElgLines: intermediate.setOutputPrimitive(ElgLineStrip); break; + case ElgTriangles: intermediate.setOutputPrimitive(ElgTriangleStrip); break; + default: break; + } + } + if (intermediate.getVertices() == TQualifier::layoutNotSet) { + switch (intermediate.getInputPrimitive()) { + case ElgPoints: intermediate.setVertices(1); break; + case ElgLines: intermediate.setVertices(2); break; + case ElgTriangles: intermediate.setVertices(3); break; + default: break; + } + } + } +} +#endif // GLSLANG_WEB + +// +// Layout qualifier stuff. +// + +// Put the id's layout qualification into the public type, for qualifiers not having a number set. +// This is before we know any type information for error checking. +void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id) +{ + std::transform(id.begin(), id.end(), id.begin(), ::tolower); + + if (id == TQualifier::getLayoutMatrixString(ElmColumnMajor)) { + publicType.qualifier.layoutMatrix = ElmColumnMajor; + return; + } + if (id == TQualifier::getLayoutMatrixString(ElmRowMajor)) { + publicType.qualifier.layoutMatrix = ElmRowMajor; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpPacked)) { + if (spvVersion.spv != 0) + spvRemoved(loc, "packed"); + publicType.qualifier.layoutPacking = ElpPacked; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpShared)) { + if (spvVersion.spv != 0) + spvRemoved(loc, "shared"); + publicType.qualifier.layoutPacking = ElpShared; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpStd140)) { + publicType.qualifier.layoutPacking = ElpStd140; + return; + } +#ifndef GLSLANG_WEB + if (id == TQualifier::getLayoutPackingString(ElpStd430)) { + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "std430"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "std430"); + profileRequires(loc, EEsProfile, 310, nullptr, "std430"); + publicType.qualifier.layoutPacking = ElpStd430; + return; + } + if (id == TQualifier::getLayoutPackingString(ElpScalar)) { + requireVulkan(loc, "scalar"); + requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "scalar block layout"); + publicType.qualifier.layoutPacking = ElpScalar; + return; + } + // TODO: compile-time performance: may need to stop doing linear searches + for (TLayoutFormat format = (TLayoutFormat)(ElfNone + 1); format < ElfCount; format = (TLayoutFormat)(format + 1)) { + if (id == TQualifier::getLayoutFormatString(format)) { + if ((format > ElfEsFloatGuard && format < ElfFloatGuard) || + (format > ElfEsIntGuard && format < ElfIntGuard) || + (format > ElfEsUintGuard && format < ElfCount)) + requireProfile(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, "image load-store format"); + profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "image load store"); + profileRequires(loc, EEsProfile, 310, E_GL_ARB_shader_image_load_store, "image load store"); + publicType.qualifier.layoutFormat = format; + return; + } + } + if (id == "push_constant") { + requireVulkan(loc, "push_constant"); + publicType.qualifier.layoutPushConstant = true; + return; + } + if (id == "buffer_reference") { + requireVulkan(loc, "buffer_reference"); + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference"); + publicType.qualifier.layoutBufferReference = true; + intermediate.setUseStorageBuffer(); + intermediate.setUsePhysicalStorageBuffer(); + return; + } + if (language == EShLangGeometry || language == EShLangTessEvaluation || language == EShLangMeshNV) { + if (id == TQualifier::getGeometryString(ElgTriangles)) { + publicType.shaderQualifiers.geometry = ElgTriangles; + return; + } + if (language == EShLangGeometry || language == EShLangMeshNV) { + if (id == TQualifier::getGeometryString(ElgPoints)) { + publicType.shaderQualifiers.geometry = ElgPoints; + return; + } + if (id == TQualifier::getGeometryString(ElgLines)) { + publicType.shaderQualifiers.geometry = ElgLines; + return; + } + if (language == EShLangGeometry) { + if (id == TQualifier::getGeometryString(ElgLineStrip)) { + publicType.shaderQualifiers.geometry = ElgLineStrip; + return; + } + if (id == TQualifier::getGeometryString(ElgLinesAdjacency)) { + publicType.shaderQualifiers.geometry = ElgLinesAdjacency; + return; + } + if (id == TQualifier::getGeometryString(ElgTrianglesAdjacency)) { + publicType.shaderQualifiers.geometry = ElgTrianglesAdjacency; + return; + } + if (id == TQualifier::getGeometryString(ElgTriangleStrip)) { + publicType.shaderQualifiers.geometry = ElgTriangleStrip; + return; + } + if (id == "passthrough") { + requireExtensions(loc, 1, &E_SPV_NV_geometry_shader_passthrough, "geometry shader passthrough"); + publicType.qualifier.layoutPassthrough = true; + intermediate.setGeoPassthroughEXT(); + return; + } + } + } else { + assert(language == EShLangTessEvaluation); + + // input primitive + if (id == TQualifier::getGeometryString(ElgTriangles)) { + publicType.shaderQualifiers.geometry = ElgTriangles; + return; + } + if (id == TQualifier::getGeometryString(ElgQuads)) { + publicType.shaderQualifiers.geometry = ElgQuads; + return; + } + if (id == TQualifier::getGeometryString(ElgIsolines)) { + publicType.shaderQualifiers.geometry = ElgIsolines; + return; + } + + // vertex spacing + if (id == TQualifier::getVertexSpacingString(EvsEqual)) { + publicType.shaderQualifiers.spacing = EvsEqual; + return; + } + if (id == TQualifier::getVertexSpacingString(EvsFractionalEven)) { + publicType.shaderQualifiers.spacing = EvsFractionalEven; + return; + } + if (id == TQualifier::getVertexSpacingString(EvsFractionalOdd)) { + publicType.shaderQualifiers.spacing = EvsFractionalOdd; + return; + } + + // triangle order + if (id == TQualifier::getVertexOrderString(EvoCw)) { + publicType.shaderQualifiers.order = EvoCw; + return; + } + if (id == TQualifier::getVertexOrderString(EvoCcw)) { + publicType.shaderQualifiers.order = EvoCcw; + return; + } + + // point mode + if (id == "point_mode") { + publicType.shaderQualifiers.pointMode = true; + return; + } + } + } + if (language == EShLangFragment) { + if (id == "origin_upper_left") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "origin_upper_left"); + publicType.shaderQualifiers.originUpperLeft = true; + return; + } + if (id == "pixel_center_integer") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "pixel_center_integer"); + publicType.shaderQualifiers.pixelCenterInteger = true; + return; + } + if (id == "early_fragment_tests") { + profileRequires(loc, ENoProfile | ECoreProfile | ECompatibilityProfile, 420, E_GL_ARB_shader_image_load_store, "early_fragment_tests"); + profileRequires(loc, EEsProfile, 310, nullptr, "early_fragment_tests"); + publicType.shaderQualifiers.earlyFragmentTests = true; + return; + } + if (id == "post_depth_coverage") { + requireExtensions(loc, Num_post_depth_coverageEXTs, post_depth_coverageEXTs, "post depth coverage"); + if (extensionTurnedOn(E_GL_ARB_post_depth_coverage)) { + publicType.shaderQualifiers.earlyFragmentTests = true; + } + publicType.shaderQualifiers.postDepthCoverage = true; + return; + } + for (TLayoutDepth depth = (TLayoutDepth)(EldNone + 1); depth < EldCount; depth = (TLayoutDepth)(depth+1)) { + if (id == TQualifier::getLayoutDepthString(depth)) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "depth layout qualifier"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, nullptr, "depth layout qualifier"); + publicType.shaderQualifiers.layoutDepth = depth; + return; + } + } + for (TInterlockOrdering order = (TInterlockOrdering)(EioNone + 1); order < EioCount; order = (TInterlockOrdering)(order+1)) { + if (id == TQualifier::getInterlockOrderingString(order)) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "fragment shader interlock layout qualifier"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 450, nullptr, "fragment shader interlock layout qualifier"); + requireExtensions(loc, 1, &E_GL_ARB_fragment_shader_interlock, TQualifier::getInterlockOrderingString(order)); + if (order == EioShadingRateInterlockOrdered || order == EioShadingRateInterlockUnordered) + requireExtensions(loc, 1, &E_GL_NV_shading_rate_image, TQualifier::getInterlockOrderingString(order)); + publicType.shaderQualifiers.interlockOrdering = order; + return; + } + } + if (id.compare(0, 13, "blend_support") == 0) { + bool found = false; + for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) { + if (id == TQualifier::getBlendEquationString(be)) { + profileRequires(loc, EEsProfile, 320, E_GL_KHR_blend_equation_advanced, "blend equation"); + profileRequires(loc, ~EEsProfile, 0, E_GL_KHR_blend_equation_advanced, "blend equation"); + intermediate.addBlendEquation(be); + publicType.shaderQualifiers.blendEquation = true; + found = true; + break; + } + } + if (! found) + error(loc, "unknown blend equation", "blend_support", ""); + return; + } + if (id == "override_coverage") { + requireExtensions(loc, 1, &E_GL_NV_sample_mask_override_coverage, "sample mask override coverage"); + publicType.shaderQualifiers.layoutOverrideCoverage = true; + return; + } + } + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry ) { + if (id == "viewport_relative") { + requireExtensions(loc, 1, &E_GL_NV_viewport_array2, "view port array2"); + publicType.qualifier.layoutViewportRelative = true; + return; + } + } else { + if (language == EShLangRayGen || language == EShLangIntersect || + language == EShLangAnyHit || language == EShLangClosestHit || + language == EShLangMiss || language == EShLangCallable) { + if (id == "shaderrecordnv" || id == "shaderrecordext") { + if (id == "shaderrecordnv") { + requireExtensions(loc, 1, &E_GL_NV_ray_tracing, "shader record NV"); + } else { + requireExtensions(loc, 1, &E_GL_EXT_ray_tracing, "shader record EXT"); + } + publicType.qualifier.layoutShaderRecord = true; + return; + } + + } + } + if (language == EShLangCompute) { + if (id.compare(0, 17, "derivative_group_") == 0) { + requireExtensions(loc, 1, &E_GL_NV_compute_shader_derivatives, "compute shader derivatives"); + if (id == "derivative_group_quadsnv") { + publicType.shaderQualifiers.layoutDerivativeGroupQuads = true; + return; + } else if (id == "derivative_group_linearnv") { + publicType.shaderQualifiers.layoutDerivativeGroupLinear = true; + return; + } + } + } + + if (id == "primitive_culling") { + requireExtensions(loc, 1, &E_GL_EXT_ray_flags_primitive_culling, "primitive culling"); + publicType.shaderQualifiers.layoutPrimitiveCulling = true; + return; + } +#endif + + error(loc, "unrecognized layout identifier, or qualifier requires assignment (e.g., binding = 4)", id.c_str(), ""); +} + +// Put the id's layout qualifier value into the public type, for qualifiers having a number set. +// This is before we know any type information for error checking. +void TParseContext::setLayoutQualifier(const TSourceLoc& loc, TPublicType& publicType, TString& id, const TIntermTyped* node) +{ + const char* feature = "layout-id value"; + const char* nonLiteralFeature = "non-literal layout-id value"; + + integerCheck(node, feature); + const TIntermConstantUnion* constUnion = node->getAsConstantUnion(); + int value; + bool nonLiteral = false; + if (constUnion) { + value = constUnion->getConstArray()[0].getIConst(); + if (! constUnion->isLiteral()) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, nonLiteralFeature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, nonLiteralFeature); + } + } else { + // grammar should have give out the error message + value = 0; + nonLiteral = true; + } + + if (value < 0) { + error(loc, "cannot be negative", feature, ""); + return; + } + + std::transform(id.begin(), id.end(), id.begin(), ::tolower); + + if (id == "offset") { + // "offset" can be for either + // - uniform offsets + // - atomic_uint offsets + const char* feature = "offset"; + if (spvVersion.spv == 0) { + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, feature); + const char* exts[2] = { E_GL_ARB_enhanced_layouts, E_GL_ARB_shader_atomic_counters }; + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 420, 2, exts, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + publicType.qualifier.layoutOffset = value; + publicType.qualifier.explicitOffset = true; + if (nonLiteral) + error(loc, "needs a literal integer", "offset", ""); + return; + } else if (id == "align") { + const char* feature = "uniform buffer-member align"; + if (spvVersion.spv == 0) { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + } + // "The specified alignment must be a power of 2, or a compile-time error results." + if (! IsPow2(value)) + error(loc, "must be a power of 2", "align", ""); + else + publicType.qualifier.layoutAlign = value; + if (nonLiteral) + error(loc, "needs a literal integer", "align", ""); + return; + } else if (id == "location") { + profileRequires(loc, EEsProfile, 300, nullptr, "location"); + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + // GL_ARB_explicit_uniform_location requires 330 or GL_ARB_explicit_attrib_location we do not need to add it here + profileRequires(loc, ~EEsProfile, 330, 2, exts, "location"); + if ((unsigned int)value >= TQualifier::layoutLocationEnd) + error(loc, "location is too large", id.c_str(), ""); + else + publicType.qualifier.layoutLocation = value; + if (nonLiteral) + error(loc, "needs a literal integer", "location", ""); + return; + } else if (id == "set") { + if ((unsigned int)value >= TQualifier::layoutSetEnd) + error(loc, "set is too large", id.c_str(), ""); + else + publicType.qualifier.layoutSet = value; + if (value != 0) + requireVulkan(loc, "descriptor set"); + if (nonLiteral) + error(loc, "needs a literal integer", "set", ""); + return; + } else if (id == "binding") { +#ifndef GLSLANG_WEB + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, "binding"); + profileRequires(loc, EEsProfile, 310, nullptr, "binding"); +#endif + if ((unsigned int)value >= TQualifier::layoutBindingEnd) + error(loc, "binding is too large", id.c_str(), ""); + else + publicType.qualifier.layoutBinding = value; + if (nonLiteral) + error(loc, "needs a literal integer", "binding", ""); + return; + } + if (id == "constant_id") { + requireSpv(loc, "constant_id"); + if (value >= (int)TQualifier::layoutSpecConstantIdEnd) { + error(loc, "specialization-constant id is too large", id.c_str(), ""); + } else { + publicType.qualifier.layoutSpecConstantId = value; + publicType.qualifier.specConstant = true; + if (! intermediate.addUsedConstantId(value)) + error(loc, "specialization-constant id already used", id.c_str(), ""); + } + if (nonLiteral) + error(loc, "needs a literal integer", "constant_id", ""); + return; + } +#ifndef GLSLANG_WEB + if (id == "component") { + requireProfile(loc, ECoreProfile | ECompatibilityProfile, "component"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "component"); + if ((unsigned)value >= TQualifier::layoutComponentEnd) + error(loc, "component is too large", id.c_str(), ""); + else + publicType.qualifier.layoutComponent = value; + if (nonLiteral) + error(loc, "needs a literal integer", "component", ""); + return; + } + if (id.compare(0, 4, "xfb_") == 0) { + // "Any shader making any static use (after preprocessing) of any of these + // *xfb_* qualifiers will cause the shader to be in a transform feedback + // capturing mode and hence responsible for describing the transform feedback + // setup." + intermediate.setXfbMode(); + const char* feature = "transform feedback qualifier"; + requireStage(loc, (EShLanguageMask)(EShLangVertexMask | EShLangGeometryMask | EShLangTessControlMask | EShLangTessEvaluationMask), feature); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, feature); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + if (id == "xfb_buffer") { + // "It is a compile-time error to specify an *xfb_buffer* that is greater than + // the implementation-dependent constant gl_MaxTransformFeedbackBuffers." + if (value >= resources.maxTransformFeedbackBuffers) + error(loc, "buffer is too large:", id.c_str(), "gl_MaxTransformFeedbackBuffers is %d", resources.maxTransformFeedbackBuffers); + if (value >= (int)TQualifier::layoutXfbBufferEnd) + error(loc, "buffer is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbBufferEnd-1); + else + publicType.qualifier.layoutXfbBuffer = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_buffer", ""); + return; + } else if (id == "xfb_offset") { + if (value >= (int)TQualifier::layoutXfbOffsetEnd) + error(loc, "offset is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbOffsetEnd-1); + else + publicType.qualifier.layoutXfbOffset = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_offset", ""); + return; + } else if (id == "xfb_stride") { + // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the + // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." + if (value > 4 * resources.maxTransformFeedbackInterleavedComponents) { + error(loc, "1/4 stride is too large:", id.c_str(), "gl_MaxTransformFeedbackInterleavedComponents is %d", + resources.maxTransformFeedbackInterleavedComponents); + } + if (value >= (int)TQualifier::layoutXfbStrideEnd) + error(loc, "stride is too large:", id.c_str(), "internal max is %d", TQualifier::layoutXfbStrideEnd-1); + else + publicType.qualifier.layoutXfbStride = value; + if (nonLiteral) + error(loc, "needs a literal integer", "xfb_stride", ""); + return; + } + } + if (id == "input_attachment_index") { + requireVulkan(loc, "input_attachment_index"); + if (value >= (int)TQualifier::layoutAttachmentEnd) + error(loc, "attachment index is too large", id.c_str(), ""); + else + publicType.qualifier.layoutAttachment = value; + if (nonLiteral) + error(loc, "needs a literal integer", "input_attachment_index", ""); + return; + } + if (id == "num_views") { + requireExtensions(loc, Num_OVR_multiview_EXTs, OVR_multiview_EXTs, "num_views"); + publicType.shaderQualifiers.numViews = value; + if (nonLiteral) + error(loc, "needs a literal integer", "num_views", ""); + return; + } + if (language == EShLangVertex || + language == EShLangTessControl || + language == EShLangTessEvaluation || + language == EShLangGeometry) { + if (id == "secondary_view_offset") { + requireExtensions(loc, 1, &E_GL_NV_stereo_view_rendering, "stereo view rendering"); + publicType.qualifier.layoutSecondaryViewportRelativeOffset = value; + if (nonLiteral) + error(loc, "needs a literal integer", "secondary_view_offset", ""); + return; + } + } + + if (id == "buffer_reference_align") { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference, "buffer_reference_align"); + if (! IsPow2(value)) + error(loc, "must be a power of 2", "buffer_reference_align", ""); + else + publicType.qualifier.layoutBufferReferenceAlign = (unsigned int)std::log2(value); + if (nonLiteral) + error(loc, "needs a literal integer", "buffer_reference_align", ""); + return; + } +#endif + + switch (language) { +#ifndef GLSLANG_WEB + case EShLangTessControl: + if (id == "vertices") { + if (value == 0) + error(loc, "must be greater than 0", "vertices", ""); + else + publicType.shaderQualifiers.vertices = value; + if (nonLiteral) + error(loc, "needs a literal integer", "vertices", ""); + return; + } + break; + + case EShLangGeometry: + if (id == "invocations") { + profileRequires(loc, ECompatibilityProfile | ECoreProfile, 400, nullptr, "invocations"); + if (value == 0) + error(loc, "must be at least 1", "invocations", ""); + else + publicType.shaderQualifiers.invocations = value; + if (nonLiteral) + error(loc, "needs a literal integer", "invocations", ""); + return; + } + if (id == "max_vertices") { + publicType.shaderQualifiers.vertices = value; + if (value > resources.maxGeometryOutputVertices) + error(loc, "too large, must be less than gl_MaxGeometryOutputVertices", "max_vertices", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_vertices", ""); + return; + } + if (id == "stream") { + requireProfile(loc, ~EEsProfile, "selecting output stream"); + publicType.qualifier.layoutStream = value; + if (value > 0) + intermediate.setMultiStream(); + if (nonLiteral) + error(loc, "needs a literal integer", "stream", ""); + return; + } + break; + + case EShLangFragment: + if (id == "index") { + requireProfile(loc, ECompatibilityProfile | ECoreProfile | EEsProfile, "index layout qualifier on fragment output"); + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ECompatibilityProfile | ECoreProfile, 330, 2, exts, "index layout qualifier on fragment output"); + profileRequires(loc, EEsProfile ,310, E_GL_EXT_blend_func_extended, "index layout qualifier on fragment output"); + // "It is also a compile-time error if a fragment shader sets a layout index to less than 0 or greater than 1." + if (value < 0 || value > 1) { + value = 0; + error(loc, "value must be 0 or 1", "index", ""); + } + + publicType.qualifier.layoutIndex = value; + if (nonLiteral) + error(loc, "needs a literal integer", "index", ""); + return; + } + break; + + case EShLangMeshNV: + if (id == "max_vertices") { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_vertices"); + publicType.shaderQualifiers.vertices = value; + if (value > resources.maxMeshOutputVerticesNV) + error(loc, "too large, must be less than gl_MaxMeshOutputVerticesNV", "max_vertices", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_vertices", ""); + return; + } + if (id == "max_primitives") { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "max_primitives"); + publicType.shaderQualifiers.primitives = value; + if (value > resources.maxMeshOutputPrimitivesNV) + error(loc, "too large, must be less than gl_MaxMeshOutputPrimitivesNV", "max_primitives", ""); + if (nonLiteral) + error(loc, "needs a literal integer", "max_primitives", ""); + return; + } + // Fall through + + case EShLangTaskNV: + // Fall through +#endif + case EShLangCompute: + if (id.compare(0, 11, "local_size_") == 0) { +#ifndef GLSLANG_WEB + if (language == EShLangMeshNV || language == EShLangTaskNV) { + requireExtensions(loc, 1, &E_GL_NV_mesh_shader, "gl_WorkGroupSize"); + } else { + profileRequires(loc, EEsProfile, 310, 0, "gl_WorkGroupSize"); + profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_compute_shader, "gl_WorkGroupSize"); + } +#endif + if (nonLiteral) + error(loc, "needs a literal integer", "local_size", ""); + if (id.size() == 12 && value == 0) { + error(loc, "must be at least 1", id.c_str(), ""); + return; + } + if (id == "local_size_x") { + publicType.shaderQualifiers.localSize[0] = value; + publicType.shaderQualifiers.localSizeNotDefault[0] = true; + return; + } + if (id == "local_size_y") { + publicType.shaderQualifiers.localSize[1] = value; + publicType.shaderQualifiers.localSizeNotDefault[1] = true; + return; + } + if (id == "local_size_z") { + publicType.shaderQualifiers.localSize[2] = value; + publicType.shaderQualifiers.localSizeNotDefault[2] = true; + return; + } + if (spvVersion.spv != 0) { + if (id == "local_size_x_id") { + publicType.shaderQualifiers.localSizeSpecId[0] = value; + return; + } + if (id == "local_size_y_id") { + publicType.shaderQualifiers.localSizeSpecId[1] = value; + return; + } + if (id == "local_size_z_id") { + publicType.shaderQualifiers.localSizeSpecId[2] = value; + return; + } + } + } + break; + + default: + break; + } + + error(loc, "there is no such layout identifier for this stage taking an assigned value", id.c_str(), ""); +} + +// Merge any layout qualifier information from src into dst, leaving everything else in dst alone +// +// "More than one layout qualifier may appear in a single declaration. +// Additionally, the same layout-qualifier-name can occur multiple times +// within a layout qualifier or across multiple layout qualifiers in the +// same declaration. When the same layout-qualifier-name occurs +// multiple times, in a single declaration, the last occurrence overrides +// the former occurrence(s). Further, if such a layout-qualifier-name +// will effect subsequent declarations or other observable behavior, it +// is only the last occurrence that will have any effect, behaving as if +// the earlier occurrence(s) within the declaration are not present. +// This is also true for overriding layout-qualifier-names, where one +// overrides the other (e.g., row_major vs. column_major); only the last +// occurrence has any effect." +void TParseContext::mergeObjectLayoutQualifiers(TQualifier& dst, const TQualifier& src, bool inheritOnly) +{ + if (src.hasMatrix()) + dst.layoutMatrix = src.layoutMatrix; + if (src.hasPacking()) + dst.layoutPacking = src.layoutPacking; + +#ifndef GLSLANG_WEB + if (src.hasStream()) + dst.layoutStream = src.layoutStream; + if (src.hasFormat()) + dst.layoutFormat = src.layoutFormat; + if (src.hasXfbBuffer()) + dst.layoutXfbBuffer = src.layoutXfbBuffer; + if (src.hasBufferReferenceAlign()) + dst.layoutBufferReferenceAlign = src.layoutBufferReferenceAlign; +#endif + + if (src.hasAlign()) + dst.layoutAlign = src.layoutAlign; + + if (! inheritOnly) { + if (src.hasLocation()) + dst.layoutLocation = src.layoutLocation; + if (src.hasOffset()) + dst.layoutOffset = src.layoutOffset; + if (src.hasSet()) + dst.layoutSet = src.layoutSet; + if (src.layoutBinding != TQualifier::layoutBindingEnd) + dst.layoutBinding = src.layoutBinding; + + if (src.hasSpecConstantId()) + dst.layoutSpecConstantId = src.layoutSpecConstantId; + +#ifndef GLSLANG_WEB + if (src.hasComponent()) + dst.layoutComponent = src.layoutComponent; + if (src.hasIndex()) + dst.layoutIndex = src.layoutIndex; + if (src.hasXfbStride()) + dst.layoutXfbStride = src.layoutXfbStride; + if (src.hasXfbOffset()) + dst.layoutXfbOffset = src.layoutXfbOffset; + if (src.hasAttachment()) + dst.layoutAttachment = src.layoutAttachment; + if (src.layoutPushConstant) + dst.layoutPushConstant = true; + + if (src.layoutBufferReference) + dst.layoutBufferReference = true; + + if (src.layoutPassthrough) + dst.layoutPassthrough = true; + if (src.layoutViewportRelative) + dst.layoutViewportRelative = true; + if (src.layoutSecondaryViewportRelativeOffset != -2048) + dst.layoutSecondaryViewportRelativeOffset = src.layoutSecondaryViewportRelativeOffset; + if (src.layoutShaderRecord) + dst.layoutShaderRecord = true; + if (src.pervertexNV) + dst.pervertexNV = true; +#endif + } +} + +// Do error layout error checking given a full variable/block declaration. +void TParseContext::layoutObjectCheck(const TSourceLoc& loc, const TSymbol& symbol) +{ + const TType& type = symbol.getType(); + const TQualifier& qualifier = type.getQualifier(); + + // first, cross check WRT to just the type + layoutTypeCheck(loc, type); + + // now, any remaining error checking based on the object itself + + if (qualifier.hasAnyLocation()) { + switch (qualifier.storage) { + case EvqUniform: + case EvqBuffer: + if (symbol.getAsVariable() == nullptr) + error(loc, "can only be used on variable declaration", "location", ""); + break; + default: + break; + } + } + + // user-variable location check, which are required for SPIR-V in/out: + // - variables have it directly, + // - blocks have it on each member (already enforced), so check first one + if (spvVersion.spv > 0 && !parsingBuiltins && qualifier.builtIn == EbvNone && + !qualifier.hasLocation() && !intermediate.getAutoMapLocations()) { + + switch (qualifier.storage) { + case EvqVaryingIn: + case EvqVaryingOut: + if (!type.getQualifier().isTaskMemory() && + (type.getBasicType() != EbtBlock || + (!(*type.getStruct())[0].type->getQualifier().hasLocation() && + (*type.getStruct())[0].type->getQualifier().builtIn == EbvNone))) + error(loc, "SPIR-V requires location for user input/output", "location", ""); + break; + default: + break; + } + } + + // Check packing and matrix + if (qualifier.hasUniformLayout()) { + switch (qualifier.storage) { + case EvqUniform: + case EvqBuffer: + if (type.getBasicType() != EbtBlock) { + if (qualifier.hasMatrix()) + error(loc, "cannot specify matrix layout on a variable declaration", "layout", ""); + if (qualifier.hasPacking()) + error(loc, "cannot specify packing on a variable declaration", "layout", ""); + // "The offset qualifier can only be used on block members of blocks..." + if (qualifier.hasOffset() && !type.isAtomic()) + error(loc, "cannot specify on a variable declaration", "offset", ""); + // "The align qualifier can only be used on blocks or block members..." + if (qualifier.hasAlign()) + error(loc, "cannot specify on a variable declaration", "align", ""); + if (qualifier.isPushConstant()) + error(loc, "can only specify on a uniform block", "push_constant", ""); + if (qualifier.isShaderRecord()) + error(loc, "can only specify on a buffer block", "shaderRecordNV", ""); + } + break; + default: + // these were already filtered by layoutTypeCheck() (or its callees) + break; + } + } +} + +// "For some blocks declared as arrays, the location can only be applied at the block level: +// When a block is declared as an array where additional locations are needed for each member +// for each block array element, it is a compile-time error to specify locations on the block +// members. That is, when locations would be under specified by applying them on block members, +// they are not allowed on block members. For arrayed interfaces (those generally having an +// extra level of arrayness due to interface expansion), the outer array is stripped before +// applying this rule." +void TParseContext::layoutMemberLocationArrayCheck(const TSourceLoc& loc, bool memberWithLocation, + TArraySizes* arraySizes) +{ + if (memberWithLocation && arraySizes != nullptr) { + if (arraySizes->getNumDims() > (currentBlockQualifier.isArrayedIo(language) ? 1 : 0)) + error(loc, "cannot use in a block array where new locations are needed for each block element", + "location", ""); + } +} + +// Do layout error checking with respect to a type. +void TParseContext::layoutTypeCheck(const TSourceLoc& loc, const TType& type) +{ + const TQualifier& qualifier = type.getQualifier(); + + // first, intra-layout qualifier-only error checking + layoutQualifierCheck(loc, qualifier); + + // now, error checking combining type and qualifier + + if (qualifier.hasAnyLocation()) { + if (qualifier.hasLocation()) { + if (qualifier.storage == EvqVaryingOut && language == EShLangFragment) { + if (qualifier.layoutLocation >= (unsigned int)resources.maxDrawBuffers) + error(loc, "too large for fragment output", "location", ""); + } + } + if (qualifier.hasComponent()) { + // "It is a compile-time error if this sequence of components gets larger than 3." + if (qualifier.layoutComponent + type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1) > 4) + error(loc, "type overflows the available 4 components", "component", ""); + + // "It is a compile-time error to apply the component qualifier to a matrix, a structure, a block, or an array containing any of these." + if (type.isMatrix() || type.getBasicType() == EbtBlock || type.getBasicType() == EbtStruct) + error(loc, "cannot apply to a matrix, structure, or block", "component", ""); + + // " It is a compile-time error to use component 1 or 3 as the beginning of a double or dvec2." + if (type.getBasicType() == EbtDouble) + if (qualifier.layoutComponent & 1) + error(loc, "doubles cannot start on an odd-numbered component", "component", ""); + } + + switch (qualifier.storage) { + case EvqVaryingIn: + case EvqVaryingOut: + if (type.getBasicType() == EbtBlock) + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, "location qualifier on in/out block"); + if (type.getQualifier().isTaskMemory()) + error(loc, "cannot apply to taskNV in/out blocks", "location", ""); + break; + case EvqUniform: + case EvqBuffer: + if (type.getBasicType() == EbtBlock) + error(loc, "cannot apply to uniform or buffer block", "location", ""); + break; +#ifndef GLSLANG_WEB + case EvqPayload: + case EvqPayloadIn: + case EvqHitAttr: + case EvqCallableData: + case EvqCallableDataIn: + break; +#endif + default: + error(loc, "can only apply to uniform, buffer, in, or out storage qualifiers", "location", ""); + break; + } + + bool typeCollision; + int repeated = intermediate.addUsedLocation(qualifier, type, typeCollision); + if (repeated >= 0 && ! typeCollision) + error(loc, "overlapping use of location", "location", "%d", repeated); + // "fragment-shader outputs ... if two variables are placed within the same + // location, they must have the same underlying type (floating-point or integer)" + if (typeCollision && language == EShLangFragment && qualifier.isPipeOutput()) + error(loc, "fragment outputs sharing the same location must be the same basic type", "location", "%d", repeated); + } + +#ifndef GLSLANG_WEB + if (qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()) { + int repeated = intermediate.addXfbBufferOffset(type); + if (repeated >= 0) + error(loc, "overlapping offsets at", "xfb_offset", "offset %d in buffer %d", repeated, qualifier.layoutXfbBuffer); + if (type.isUnsizedArray()) + error(loc, "unsized array", "xfb_offset", "in buffer %d", qualifier.layoutXfbBuffer); + + // "The offset must be a multiple of the size of the first component of the first + // qualified variable or block member, or a compile-time error results. Further, if applied to an aggregate + // containing a double or 64-bit integer, the offset must also be a multiple of 8..." + if ((type.containsBasicType(EbtDouble) || type.containsBasicType(EbtInt64) || type.containsBasicType(EbtUint64)) && + ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 8)) + error(loc, "type contains double or 64-bit integer; xfb_offset must be a multiple of 8", "xfb_offset", ""); + else if ((type.containsBasicType(EbtBool) || type.containsBasicType(EbtFloat) || + type.containsBasicType(EbtInt) || type.containsBasicType(EbtUint)) && + ! IsMultipleOfPow2(qualifier.layoutXfbOffset, 4)) + error(loc, "must be a multiple of size of first component", "xfb_offset", ""); + // ..., if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2..." + else if ((type.contains16BitFloat() || type.containsBasicType(EbtInt16) || type.containsBasicType(EbtUint16)) && + !IsMultipleOfPow2(qualifier.layoutXfbOffset, 2)) + error(loc, "type contains half float or 16-bit integer; xfb_offset must be a multiple of 2", "xfb_offset", ""); + } + if (qualifier.hasXfbStride() && qualifier.hasXfbBuffer()) { + if (! intermediate.setXfbBufferStride(qualifier.layoutXfbBuffer, qualifier.layoutXfbStride)) + error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer); + } +#endif + + if (qualifier.hasBinding()) { + // Binding checking, from the spec: + // + // "If the binding point for any uniform or shader storage block instance is less than zero, or greater than or + // equal to the implementation-dependent maximum number of uniform buffer bindings, a compile-time + // error will occur. When the binding identifier is used with a uniform or shader storage block instanced as + // an array of size N, all elements of the array from binding through binding + N - 1 must be within this + // range." + // + if (! type.isOpaque() && type.getBasicType() != EbtBlock) + error(loc, "requires block, or sampler/image, or atomic-counter type", "binding", ""); + if (type.getBasicType() == EbtSampler) { + int lastBinding = qualifier.layoutBinding; + if (type.isArray()) { + if (spvVersion.vulkan > 0) + lastBinding += 1; + else { + if (type.isSizedArray()) + lastBinding += type.getCumulativeArraySize(); + else { + lastBinding += 1; +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) + warn(loc, "assuming binding count of one for compile-time checking of binding numbers for unsized array", "[]", ""); +#endif + } + } + } +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0 && lastBinding >= resources.maxCombinedTextureImageUnits) + error(loc, "sampler binding not less than gl_MaxCombinedTextureImageUnits", "binding", type.isArray() ? "(using array)" : ""); +#endif + } + if (type.isAtomic()) { + if (qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) { + error(loc, "atomic_uint binding is too large; see gl_MaxAtomicCounterBindings", "binding", ""); + return; + } + } + } else if (!intermediate.getAutoMapBindings()) { + // some types require bindings + + // atomic_uint + if (type.isAtomic()) + error(loc, "layout(binding=X) is required", "atomic_uint", ""); + + // SPIR-V + if (spvVersion.spv > 0) { + if (qualifier.isUniformOrBuffer()) { + if (type.getBasicType() == EbtBlock && !qualifier.isPushConstant() && + !qualifier.isShaderRecord() && + !qualifier.hasAttachment() && + !qualifier.hasBufferReference()) + error(loc, "uniform/buffer blocks require layout(binding=X)", "binding", ""); + else if (spvVersion.vulkan > 0 && type.getBasicType() == EbtSampler) + error(loc, "sampler/texture/image requires layout(binding=X)", "binding", ""); + } + } + } + + // some things can't have arrays of arrays + if (type.isArrayOfArrays()) { + if (spvVersion.vulkan > 0) { + if (type.isOpaque() || (type.getQualifier().isUniformOrBuffer() && type.getBasicType() == EbtBlock)) + warn(loc, "Generating SPIR-V array-of-arrays, but Vulkan only supports single array level for this resource", "[][]", ""); + } + } + + // "The offset qualifier can only be used on block members of blocks..." + if (qualifier.hasOffset()) { + if (type.getBasicType() == EbtBlock) + error(loc, "only applies to block members, not blocks", "offset", ""); + } + + // Image format + if (qualifier.hasFormat()) { + if (! type.isImage()) + error(loc, "only apply to images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + else { + if (type.getSampler().type == EbtFloat && qualifier.getFormat() > ElfFloatGuard) + error(loc, "does not apply to floating point images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + if (type.getSampler().type == EbtInt && (qualifier.getFormat() < ElfFloatGuard || qualifier.getFormat() > ElfIntGuard)) + error(loc, "does not apply to signed integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + if (type.getSampler().type == EbtUint && qualifier.getFormat() < ElfIntGuard) + error(loc, "does not apply to unsigned integer images", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + + if (isEsProfile()) { + // "Except for image variables qualified with the format qualifiers r32f, r32i, and r32ui, image variables must + // specify either memory qualifier readonly or the memory qualifier writeonly." + if (! (qualifier.getFormat() == ElfR32f || qualifier.getFormat() == ElfR32i || qualifier.getFormat() == ElfR32ui)) { + if (! qualifier.isReadOnly() && ! qualifier.isWriteOnly()) + error(loc, "format requires readonly or writeonly memory qualifier", TQualifier::getLayoutFormatString(qualifier.getFormat()), ""); + } + } + } + } else if (type.isImage() && ! qualifier.isWriteOnly()) { + const char *explanation = "image variables not declared 'writeonly' and without a format layout qualifier"; + requireProfile(loc, ECoreProfile | ECompatibilityProfile, explanation); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 0, E_GL_EXT_shader_image_load_formatted, explanation); + } + + if (qualifier.isPushConstant() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "push_constant", ""); + + if (qualifier.hasBufferReference() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "buffer_reference", ""); + + if (qualifier.isShaderRecord() && type.getBasicType() != EbtBlock) + error(loc, "can only be used with a block", "shaderRecordNV", ""); + + // input attachment + if (type.isSubpass()) { + if (! qualifier.hasAttachment()) + error(loc, "requires an input_attachment_index layout qualifier", "subpass", ""); + } else { + if (qualifier.hasAttachment()) + error(loc, "can only be used with a subpass", "input_attachment_index", ""); + } + + // specialization-constant id + if (qualifier.hasSpecConstantId()) { + if (type.getQualifier().storage != EvqConst) + error(loc, "can only be applied to 'const'-qualified scalar", "constant_id", ""); + if (! type.isScalar()) + error(loc, "can only be applied to a scalar", "constant_id", ""); + switch (type.getBasicType()) + { + case EbtInt8: + case EbtUint8: + case EbtInt16: + case EbtUint16: + case EbtInt: + case EbtUint: + case EbtInt64: + case EbtUint64: + case EbtBool: + case EbtFloat: + case EbtDouble: + case EbtFloat16: + break; + default: + error(loc, "cannot be applied to this type", "constant_id", ""); + break; + } + } +} + +// Do layout error checking that can be done within a layout qualifier proper, not needing to know +// if there are blocks, atomic counters, variables, etc. +void TParseContext::layoutQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (qualifier.storage == EvqShared && qualifier.hasLayout()) + error(loc, "cannot apply layout qualifiers to a shared variable", "shared", ""); + + // "It is a compile-time error to use *component* without also specifying the location qualifier (order does not matter)." + if (qualifier.hasComponent() && ! qualifier.hasLocation()) + error(loc, "must specify 'location' to use 'component'", "component", ""); + + if (qualifier.hasAnyLocation()) { + + // "As with input layout qualifiers, all shaders except compute shaders + // allow *location* layout qualifiers on output variable declarations, + // output block declarations, and output block member declarations." + + switch (qualifier.storage) { +#ifndef GLSLANG_WEB + case EvqVaryingIn: + { + const char* feature = "location qualifier on input"; + if (isEsProfile() && version < 310) + requireStage(loc, EShLangVertex, feature); + else + requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature); + if (language == EShLangVertex) { + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ~EEsProfile, 330, 2, exts, feature); + profileRequires(loc, EEsProfile, 300, nullptr, feature); + } else { + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + break; + } + case EvqVaryingOut: + { + const char* feature = "location qualifier on output"; + if (isEsProfile() && version < 310) + requireStage(loc, EShLangFragment, feature); + else + requireStage(loc, (EShLanguageMask)~EShLangComputeMask, feature); + if (language == EShLangFragment) { + const char* exts[2] = { E_GL_ARB_separate_shader_objects, E_GL_ARB_explicit_attrib_location }; + profileRequires(loc, ~EEsProfile, 330, 2, exts, feature); + profileRequires(loc, EEsProfile, 300, nullptr, feature); + } else { + profileRequires(loc, ~EEsProfile, 410, E_GL_ARB_separate_shader_objects, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + } + break; + } +#endif + case EvqUniform: + case EvqBuffer: + { + const char* feature = "location qualifier on uniform or buffer"; + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile | ENoProfile, feature); + profileRequires(loc, ~EEsProfile, 330, E_GL_ARB_explicit_attrib_location, feature); + profileRequires(loc, ~EEsProfile, 430, E_GL_ARB_explicit_uniform_location, feature); + profileRequires(loc, EEsProfile, 310, nullptr, feature); + break; + } + default: + break; + } + if (qualifier.hasIndex()) { + if (qualifier.storage != EvqVaryingOut) + error(loc, "can only be used on an output", "index", ""); + if (! qualifier.hasLocation()) + error(loc, "can only be used with an explicit location", "index", ""); + } + } + + if (qualifier.hasBinding()) { + if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) + error(loc, "requires uniform or buffer storage qualifier", "binding", ""); + } + if (qualifier.hasStream()) { + if (!qualifier.isPipeOutput()) + error(loc, "can only be used on an output", "stream", ""); + } + if (qualifier.hasXfb()) { + if (!qualifier.isPipeOutput()) + error(loc, "can only be used on an output", "xfb layout qualifier", ""); + } + if (qualifier.hasUniformLayout()) { + if (! qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) { + if (qualifier.hasMatrix() || qualifier.hasPacking()) + error(loc, "matrix or packing qualifiers can only be used on a uniform or buffer", "layout", ""); + if (qualifier.hasOffset() || qualifier.hasAlign()) + error(loc, "offset/align can only be used on a uniform or buffer", "layout", ""); + } + } + if (qualifier.isPushConstant()) { + if (qualifier.storage != EvqUniform) + error(loc, "can only be used with a uniform", "push_constant", ""); + if (qualifier.hasSet()) + error(loc, "cannot be used with push_constant", "set", ""); + } + if (qualifier.hasBufferReference()) { + if (qualifier.storage != EvqBuffer) + error(loc, "can only be used with buffer", "buffer_reference", ""); + } + if (qualifier.isShaderRecord()) { + if (qualifier.storage != EvqBuffer) + error(loc, "can only be used with a buffer", "shaderRecordNV", ""); + if (qualifier.hasBinding()) + error(loc, "cannot be used with shaderRecordNV", "binding", ""); + if (qualifier.hasSet()) + error(loc, "cannot be used with shaderRecordNV", "set", ""); + + } + if (qualifier.storage == EvqHitAttr && qualifier.hasLayout()) { + error(loc, "cannot apply layout qualifiers to hitAttributeNV variable", "hitAttributeNV", ""); + } +} + +// For places that can't have shader-level layout qualifiers +void TParseContext::checkNoShaderLayouts(const TSourceLoc& loc, const TShaderQualifiers& shaderQualifiers) +{ +#ifndef GLSLANG_WEB + const char* message = "can only apply to a standalone qualifier"; + + if (shaderQualifiers.geometry != ElgNone) + error(loc, message, TQualifier::getGeometryString(shaderQualifiers.geometry), ""); + if (shaderQualifiers.spacing != EvsNone) + error(loc, message, TQualifier::getVertexSpacingString(shaderQualifiers.spacing), ""); + if (shaderQualifiers.order != EvoNone) + error(loc, message, TQualifier::getVertexOrderString(shaderQualifiers.order), ""); + if (shaderQualifiers.pointMode) + error(loc, message, "point_mode", ""); + if (shaderQualifiers.invocations != TQualifier::layoutNotSet) + error(loc, message, "invocations", ""); + for (int i = 0; i < 3; ++i) { + if (shaderQualifiers.localSize[i] > 1) + error(loc, message, "local_size", ""); + if (shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) + error(loc, message, "local_size id", ""); + } + if (shaderQualifiers.vertices != TQualifier::layoutNotSet) { + if (language == EShLangGeometry || language == EShLangMeshNV) + error(loc, message, "max_vertices", ""); + else if (language == EShLangTessControl) + error(loc, message, "vertices", ""); + else + assert(0); + } + if (shaderQualifiers.earlyFragmentTests) + error(loc, message, "early_fragment_tests", ""); + if (shaderQualifiers.postDepthCoverage) + error(loc, message, "post_depth_coverage", ""); + if (shaderQualifiers.primitives != TQualifier::layoutNotSet) { + if (language == EShLangMeshNV) + error(loc, message, "max_primitives", ""); + else + assert(0); + } + if (shaderQualifiers.hasBlendEquation()) + error(loc, message, "blend equation", ""); + if (shaderQualifiers.numViews != TQualifier::layoutNotSet) + error(loc, message, "num_views", ""); + if (shaderQualifiers.interlockOrdering != EioNone) + error(loc, message, TQualifier::getInterlockOrderingString(shaderQualifiers.interlockOrdering), ""); + if (shaderQualifiers.layoutPrimitiveCulling) + error(loc, "can only be applied as standalone", "primitive_culling", ""); +#endif +} + +// Correct and/or advance an object's offset layout qualifier. +void TParseContext::fixOffset(const TSourceLoc& loc, TSymbol& symbol) +{ + const TQualifier& qualifier = symbol.getType().getQualifier(); +#ifndef GLSLANG_WEB + if (symbol.getType().isAtomic()) { + if (qualifier.hasBinding() && (int)qualifier.layoutBinding < resources.maxAtomicCounterBindings) { + + // Set the offset + int offset; + if (qualifier.hasOffset()) + offset = qualifier.layoutOffset; + else + offset = atomicUintOffsets[qualifier.layoutBinding]; + + if (offset % 4 != 0) + error(loc, "atomic counters offset should align based on 4:", "offset", "%d", offset); + + symbol.getWritableType().getQualifier().layoutOffset = offset; + + // Check for overlap + int numOffsets = 4; + if (symbol.getType().isArray()) { + if (symbol.getType().isSizedArray() && !symbol.getType().getArraySizes()->isInnerUnsized()) + numOffsets *= symbol.getType().getCumulativeArraySize(); + else { + // "It is a compile-time error to declare an unsized array of atomic_uint." + error(loc, "array must be explicitly sized", "atomic_uint", ""); + } + } + int repeated = intermediate.addUsedOffsets(qualifier.layoutBinding, offset, numOffsets); + if (repeated >= 0) + error(loc, "atomic counters sharing the same offset:", "offset", "%d", repeated); + + // Bump the default offset + atomicUintOffsets[qualifier.layoutBinding] = offset + numOffsets; + } + } +#endif +} + +// +// Look up a function name in the symbol table, and make sure it is a function. +// +// Return the function symbol if found, otherwise nullptr. +// +const TFunction* TParseContext::findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + if (symbolTable.isFunctionNameVariable(call.getName())) { + error(loc, "can't use function syntax on variable", call.getName().c_str(), ""); + return nullptr; + } + +#ifdef GLSLANG_WEB + return findFunctionExact(loc, call, builtIn); +#endif + + const TFunction* function = nullptr; + + // debugPrintfEXT has var args and is in the symbol table as "debugPrintfEXT()", + // mangled to "debugPrintfEXT(" + if (call.getName() == "debugPrintfEXT") { + TSymbol* symbol = symbolTable.find("debugPrintfEXT(", &builtIn); + if (symbol) + return symbol->getAsFunction(); + } + + bool explicitTypesEnabled = extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32) || + extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64); + + if (isEsProfile()) + function = (extensionTurnedOn(E_GL_EXT_shader_implicit_conversions) && version >= 310) ? + findFunction120(loc, call, builtIn) : findFunctionExact(loc, call, builtIn); + else if (version < 120) + function = findFunctionExact(loc, call, builtIn); + else if (version < 400) + function = extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) ? findFunction400(loc, call, builtIn) : findFunction120(loc, call, builtIn); + else if (explicitTypesEnabled) + function = findFunctionExplicitTypes(loc, call, builtIn); + else + function = findFunction400(loc, call, builtIn); + + return function; +} + +// Function finding algorithm for ES and desktop 110. +const TFunction* TParseContext::findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol == nullptr) { + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + + return nullptr; + } + + return symbol->getAsFunction(); +} + +// Function finding algorithm for desktop versions 120 through 330. +const TFunction* TParseContext::findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // exact match not found, look through a list of overloaded functions of the same name + + // "If no exact match is found, then [implicit conversions] will be applied to find a match. Mismatched types + // on input parameters (in or inout or default) must have a conversion from the calling argument type to the + // formal parameter type. Mismatched types on output parameters (out or inout) must have a conversion + // from the formal parameter type to the calling argument type. When argument conversions are used to find + // a match, it is a semantic error if there are multiple ways to apply these conversions to make the call match + // more than one function." + + const TFunction* candidate = nullptr; + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + for (auto it = candidateList.begin(); it != candidateList.end(); ++it) { + const TFunction& function = *(*it); + + // to even be a potential match, number of arguments has to match + if (call.getParamCount() != function.getParamCount()) + continue; + + bool possibleMatch = true; + for (int i = 0; i < function.getParamCount(); ++i) { + // same types is easy + if (*function[i].type == *call[i].type) + continue; + + // We have a mismatch in type, see if it is implicitly convertible + + if (function[i].type->isArray() || call[i].type->isArray() || + ! function[i].type->sameElementShape(*call[i].type)) + possibleMatch = false; + else { + // do direction-specific checks for conversion of basic type + if (function[i].type->getQualifier().isParamInput()) { + if (! intermediate.canImplicitlyPromote(call[i].type->getBasicType(), function[i].type->getBasicType())) + possibleMatch = false; + } + if (function[i].type->getQualifier().isParamOutput()) { + if (! intermediate.canImplicitlyPromote(function[i].type->getBasicType(), call[i].type->getBasicType())) + possibleMatch = false; + } + } + if (! possibleMatch) + break; + } + if (possibleMatch) { + if (candidate) { + // our second match, meaning ambiguity + error(loc, "ambiguous function signature match: multiple signatures match under implicit type conversion", call.getName().c_str(), ""); + } else + candidate = &function; + } + } + + if (candidate == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + + return candidate; +} + +// Function finding algorithm for desktop version 400 and above. +// +// "When function calls are resolved, an exact type match for all the arguments +// is sought. If an exact match is found, all other functions are ignored, and +// the exact match is used. If no exact match is found, then the implicit +// conversions in section 4.1.10 Implicit Conversions will be applied to find +// a match. Mismatched types on input parameters (in or inout or default) must +// have a conversion from the calling argument type to the formal parameter type. +// Mismatched types on output parameters (out or inout) must have a conversion +// from the formal parameter type to the calling argument type. +// +// "If implicit conversions can be used to find more than one matching function, +// a single best-matching function is sought. To determine a best match, the +// conversions between calling argument and formal parameter types are compared +// for each function argument and pair of matching functions. After these +// comparisons are performed, each pair of matching functions are compared. +// A function declaration A is considered a better match than function +// declaration B if +// +// * for at least one function argument, the conversion for that argument in A +// is better than the corresponding conversion in B; and +// * there is no function argument for which the conversion in B is better than +// the corresponding conversion in A. +// +// "If a single function declaration is considered a better match than every +// other matching function declaration, it will be used. Otherwise, a +// compile-time semantic error for an ambiguous overloaded function call occurs. +// +// "To determine whether the conversion for a single argument in one match is +// better than that for another match, the following rules are applied, in order: +// +// 1. An exact match is better than a match involving any implicit conversion. +// 2. A match involving an implicit conversion from float to double is better +// than a match involving any other implicit conversion. +// 3. A match involving an implicit conversion from either int or uint to float +// is better than a match involving an implicit conversion from either int +// or uint to double. +// +// "If none of the rules above apply to a particular pair of conversions, neither +// conversion is considered better than the other." +// +const TFunction* TParseContext::findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // no exact match, use the generic selector, parameterized by the GLSL rules + + // create list of candidates to send + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + // can 'from' convert to 'to'? + const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool { + if (from == to) + return true; + if (from.coopMatParameterOK(to)) + return true; + // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions + if (builtIn && from.isArray() && to.isUnsizedArray()) { + TType fromElementType(from, 0); + TType toElementType(to, 0); + if (fromElementType == toElementType) + return true; + } + if (from.isArray() || to.isArray() || ! from.sameElementShape(to)) + return false; + if (from.isCoopMat() && to.isCoopMat()) + return from.sameCoopMatBaseType(to); + return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType()); + }; + + // Is 'to2' a better conversion than 'to1'? + // Ties should not be considered as better. + // Assumes 'convertible' already said true. + const auto better = [](const TType& from, const TType& to1, const TType& to2) -> bool { + // 1. exact match + if (from == to2) + return from != to1; + if (from == to1) + return false; + + // 2. float -> double is better + if (from.getBasicType() == EbtFloat) { + if (to2.getBasicType() == EbtDouble && to1.getBasicType() != EbtDouble) + return true; + } + + // 3. -> float is better than -> double + return to2.getBasicType() == EbtFloat && to1.getBasicType() == EbtDouble; + }; + + // for ambiguity reporting + bool tie = false; + + // send to the generic selector + const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie); + + if (bestMatch == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + else if (tie) + error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), ""); + + return bestMatch; +} + +// "To determine whether the conversion for a single argument in one match +// is better than that for another match, the conversion is assigned of the +// three ranks ordered from best to worst: +// 1. Exact match: no conversion. +// 2. Promotion: integral or floating-point promotion. +// 3. Conversion: integral conversion, floating-point conversion, +// floating-integral conversion. +// A conversion C1 is better than a conversion C2 if the rank of C1 is +// better than the rank of C2." +const TFunction* TParseContext::findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn) +{ + // first, look for an exact match + TSymbol* symbol = symbolTable.find(call.getMangledName(), &builtIn); + if (symbol) + return symbol->getAsFunction(); + + // no exact match, use the generic selector, parameterized by the GLSL rules + + // create list of candidates to send + TVector candidateList; + symbolTable.findFunctionNameList(call.getMangledName(), candidateList, builtIn); + + // can 'from' convert to 'to'? + const auto convertible = [this,builtIn](const TType& from, const TType& to, TOperator, int) -> bool { + if (from == to) + return true; + if (from.coopMatParameterOK(to)) + return true; + // Allow a sized array to be passed through an unsized array parameter, for coopMatLoad/Store functions + if (builtIn && from.isArray() && to.isUnsizedArray()) { + TType fromElementType(from, 0); + TType toElementType(to, 0); + if (fromElementType == toElementType) + return true; + } + if (from.isArray() || to.isArray() || ! from.sameElementShape(to)) + return false; + if (from.isCoopMat() && to.isCoopMat()) + return from.sameCoopMatBaseType(to); + return intermediate.canImplicitlyPromote(from.getBasicType(), to.getBasicType()); + }; + + // Is 'to2' a better conversion than 'to1'? + // Ties should not be considered as better. + // Assumes 'convertible' already said true. + const auto better = [this](const TType& from, const TType& to1, const TType& to2) -> bool { + // 1. exact match + if (from == to2) + return from != to1; + if (from == to1) + return false; + + // 2. Promotion (integral, floating-point) is better + TBasicType from_type = from.getBasicType(); + TBasicType to1_type = to1.getBasicType(); + TBasicType to2_type = to2.getBasicType(); + bool isPromotion1 = (intermediate.isIntegralPromotion(from_type, to1_type) || + intermediate.isFPPromotion(from_type, to1_type)); + bool isPromotion2 = (intermediate.isIntegralPromotion(from_type, to2_type) || + intermediate.isFPPromotion(from_type, to2_type)); + if (isPromotion2) + return !isPromotion1; + if(isPromotion1) + return false; + + // 3. Conversion (integral, floating-point , floating-integral) + bool isConversion1 = (intermediate.isIntegralConversion(from_type, to1_type) || + intermediate.isFPConversion(from_type, to1_type) || + intermediate.isFPIntegralConversion(from_type, to1_type)); + bool isConversion2 = (intermediate.isIntegralConversion(from_type, to2_type) || + intermediate.isFPConversion(from_type, to2_type) || + intermediate.isFPIntegralConversion(from_type, to2_type)); + + return isConversion2 && !isConversion1; + }; + + // for ambiguity reporting + bool tie = false; + + // send to the generic selector + const TFunction* bestMatch = selectFunction(candidateList, call, convertible, better, tie); + + if (bestMatch == nullptr) + error(loc, "no matching overloaded function found", call.getName().c_str(), ""); + else if (tie) + error(loc, "ambiguous best function under implicit type conversion", call.getName().c_str(), ""); + + return bestMatch; +} + +// When a declaration includes a type, but not a variable name, it can be used +// to establish defaults. +void TParseContext::declareTypeDefaults(const TSourceLoc& loc, const TPublicType& publicType) +{ +#ifndef GLSLANG_WEB + if (publicType.basicType == EbtAtomicUint && publicType.qualifier.hasBinding()) { + if (publicType.qualifier.layoutBinding >= (unsigned int)resources.maxAtomicCounterBindings) { + error(loc, "atomic_uint binding is too large", "binding", ""); + return; + } + + if(publicType.qualifier.hasOffset()) { + atomicUintOffsets[publicType.qualifier.layoutBinding] = publicType.qualifier.layoutOffset; + } + return; + } + + if (publicType.qualifier.hasLayout() && !publicType.qualifier.hasBufferReference()) + warn(loc, "useless application of layout qualifier", "layout", ""); +#endif +} + +// +// Do everything necessary to handle a variable (non-block) declaration. +// Either redeclaring a variable, or making a new one, updating the symbol +// table, and all error checking. +// +// Returns a subtree node that computes an initializer, if needed. +// Returns nullptr if there is no code to execute for initialization. +// +// 'publicType' is the type part of the declaration (to the left) +// 'arraySizes' is the arrayness tagged on the identifier (to the right) +// +TIntermNode* TParseContext::declareVariable(const TSourceLoc& loc, TString& identifier, const TPublicType& publicType, + TArraySizes* arraySizes, TIntermTyped* initializer) +{ + // Make a fresh type that combines the characteristics from the individual + // identifier syntax and the declaration-type syntax. + TType type(publicType); + type.transferArraySizes(arraySizes); + type.copyArrayInnerSizes(publicType.arraySizes); + arrayOfArrayVersionCheck(loc, type.getArraySizes()); + + if (initializer) { + if (type.getBasicType() == EbtRayQuery) { + error(loc, "ray queries can only be initialized by using the rayQueryInitializeEXT intrinsic:", "=", identifier.c_str()); + } + } + + if (type.isCoopMat()) { + intermediate.setUseVulkanMemoryModel(); + intermediate.setUseStorageBuffer(); + + if (!publicType.typeParameters || publicType.typeParameters->getNumDims() != 4) { + error(loc, "expected four type parameters", identifier.c_str(), ""); + } + if (publicType.typeParameters) { + if (isTypeFloat(publicType.basicType) && + publicType.typeParameters->getDimSize(0) != 16 && + publicType.typeParameters->getDimSize(0) != 32 && + publicType.typeParameters->getDimSize(0) != 64) { + error(loc, "expected 16, 32, or 64 bits for first type parameter", identifier.c_str(), ""); + } + if (isTypeInt(publicType.basicType) && + publicType.typeParameters->getDimSize(0) != 8 && + publicType.typeParameters->getDimSize(0) != 32) { + error(loc, "expected 8 or 32 bits for first type parameter", identifier.c_str(), ""); + } + } + + } else { + if (publicType.typeParameters && publicType.typeParameters->getNumDims() != 0) { + error(loc, "unexpected type parameters", identifier.c_str(), ""); + } + } + + if (voidErrorCheck(loc, identifier, type.getBasicType())) + return nullptr; + + if (initializer) + rValueErrorCheck(loc, "initializer", initializer); + else + nonInitConstCheck(loc, identifier, type); + + samplerCheck(loc, type, identifier, initializer); + transparentOpaqueCheck(loc, type, identifier); +#ifndef GLSLANG_WEB + atomicUintCheck(loc, type, identifier); + accStructCheck(loc, type, identifier); + checkAndResizeMeshViewDim(loc, type, /*isBlockMember*/ false); +#endif + if (type.getQualifier().storage == EvqConst && type.containsReference()) { + error(loc, "variables with reference type can't have qualifier 'const'", "qualifier", ""); + } + + if (type.getQualifier().storage != EvqUniform && type.getQualifier().storage != EvqBuffer) { + if (type.contains16BitFloat()) + requireFloat16Arithmetic(loc, "qualifier", "float16 types can only be in uniform block or buffer storage"); + if (type.contains16BitInt()) + requireInt16Arithmetic(loc, "qualifier", "(u)int16 types can only be in uniform block or buffer storage"); + if (type.contains8BitInt()) + requireInt8Arithmetic(loc, "qualifier", "(u)int8 types can only be in uniform block or buffer storage"); + } + + if (type.getQualifier().storage == EvqShared && type.containsCoopMat()) + error(loc, "qualifier", "Cooperative matrix types must not be used in shared memory", ""); + + if (identifier != "gl_FragCoord" && (publicType.shaderQualifiers.originUpperLeft || publicType.shaderQualifiers.pixelCenterInteger)) + error(loc, "can only apply origin_upper_left and pixel_center_origin to gl_FragCoord", "layout qualifier", ""); + if (identifier != "gl_FragDepth" && publicType.shaderQualifiers.getDepth() != EldNone) + error(loc, "can only apply depth layout to gl_FragDepth", "layout qualifier", ""); + + // Check for redeclaration of built-ins and/or attempting to declare a reserved name + TSymbol* symbol = redeclareBuiltinVariable(loc, identifier, type.getQualifier(), publicType.shaderQualifiers); + if (symbol == nullptr) + reservedErrorCheck(loc, identifier); + + inheritGlobalDefaults(type.getQualifier()); + + // Declare the variable + if (type.isArray()) { + // Check that implicit sizing is only where allowed. + arraySizesCheck(loc, type.getQualifier(), type.getArraySizes(), initializer, false); + + if (! arrayQualifierError(loc, type.getQualifier()) && ! arrayError(loc, type)) + declareArray(loc, identifier, type, symbol); + + if (initializer) { + profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "initializer"); + profileRequires(loc, EEsProfile, 300, nullptr, "initializer"); + } + } else { + // non-array case + if (symbol == nullptr) + symbol = declareNonArray(loc, identifier, type); + else if (type != symbol->getType()) + error(loc, "cannot change the type of", "redeclaration", symbol->getName().c_str()); + } + + if (symbol == nullptr) + return nullptr; + + // Deal with initializer + TIntermNode* initNode = nullptr; + if (symbol != nullptr && initializer) { + TVariable* variable = symbol->getAsVariable(); + if (! variable) { + error(loc, "initializer requires a variable, not a member", identifier.c_str(), ""); + return nullptr; + } + initNode = executeInitializer(loc, initializer, variable); + } + + // look for errors in layout qualifier use + layoutObjectCheck(loc, *symbol); + + // fix up + fixOffset(loc, *symbol); + + return initNode; +} + +// Pick up global defaults from the provide global defaults into dst. +void TParseContext::inheritGlobalDefaults(TQualifier& dst) const +{ +#ifndef GLSLANG_WEB + if (dst.storage == EvqVaryingOut) { + if (! dst.hasStream() && language == EShLangGeometry) + dst.layoutStream = globalOutputDefaults.layoutStream; + if (! dst.hasXfbBuffer()) + dst.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + } +#endif +} + +// +// Make an internal-only variable whose name is for debug purposes only +// and won't be searched for. Callers will only use the return value to use +// the variable, not the name to look it up. It is okay if the name +// is the same as other names; there won't be any conflict. +// +TVariable* TParseContext::makeInternalVariable(const char* name, const TType& type) const +{ + TString* nameString = NewPoolTString(name); + TVariable* variable = new TVariable(nameString, type); + symbolTable.makeInternalVariable(*variable); + + return variable; +} + +// +// Declare a non-array variable, the main point being there is no redeclaration +// for resizing allowed. +// +// Return the successfully declared variable. +// +TVariable* TParseContext::declareNonArray(const TSourceLoc& loc, const TString& identifier, const TType& type) +{ + // make a new variable + TVariable* variable = new TVariable(&identifier, type); + +#ifndef GLSLANG_WEB + ioArrayCheck(loc, type, identifier); +#endif + + // add variable to symbol table + if (symbolTable.insert(*variable)) { + if (symbolTable.atGlobalLevel()) + trackLinkage(*variable); + return variable; + } + + error(loc, "redefinition", variable->getName().c_str(), ""); + return nullptr; +} + +// +// Handle all types of initializers from the grammar. +// +// Returning nullptr just means there is no code to execute to handle the +// initializer, which will, for example, be the case for constant initializers. +// +TIntermNode* TParseContext::executeInitializer(const TSourceLoc& loc, TIntermTyped* initializer, TVariable* variable) +{ + // + // Identifier must be of type constant, a global, or a temporary, and + // starting at version 120, desktop allows uniforms to have initializers. + // + TStorageQualifier qualifier = variable->getType().getQualifier().storage; + if (! (qualifier == EvqTemporary || qualifier == EvqGlobal || qualifier == EvqConst || + (qualifier == EvqUniform && !isEsProfile() && version >= 120))) { + error(loc, " cannot initialize this type of qualifier ", variable->getType().getStorageQualifierString(), ""); + return nullptr; + } + arrayObjectCheck(loc, variable->getType(), "array initializer"); + + // + // If the initializer was from braces { ... }, we convert the whole subtree to a + // constructor-style subtree, allowing the rest of the code to operate + // identically for both kinds of initializers. + // + // Type can't be deduced from the initializer list, so a skeletal type to + // follow has to be passed in. Constness and specialization-constness + // should be deduced bottom up, not dictated by the skeletal type. + // + TType skeletalType; + skeletalType.shallowCopy(variable->getType()); + skeletalType.getQualifier().makeTemporary(); +#ifndef GLSLANG_WEB + initializer = convertInitializerList(loc, skeletalType, initializer); +#endif + if (! initializer) { + // error recovery; don't leave const without constant values + if (qualifier == EvqConst) + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // Fix outer arrayness if variable is unsized, getting size from the initializer + if (initializer->getType().isSizedArray() && variable->getType().isUnsizedArray()) + variable->getWritableType().changeOuterArraySize(initializer->getType().getOuterArraySize()); + + // Inner arrayness can also get set by an initializer + if (initializer->getType().isArrayOfArrays() && variable->getType().isArrayOfArrays() && + initializer->getType().getArraySizes()->getNumDims() == + variable->getType().getArraySizes()->getNumDims()) { + // adopt unsized sizes from the initializer's sizes + for (int d = 1; d < variable->getType().getArraySizes()->getNumDims(); ++d) { + if (variable->getType().getArraySizes()->getDimSize(d) == UnsizedArraySize) { + variable->getWritableType().getArraySizes()->setDimSize(d, + initializer->getType().getArraySizes()->getDimSize(d)); + } + } + } + + // Uniforms require a compile-time constant initializer + if (qualifier == EvqUniform && ! initializer->getType().getQualifier().isFrontEndConstant()) { + error(loc, "uniform initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str()); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + // Global consts require a constant initializer (specialization constant is okay) + if (qualifier == EvqConst && symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) { + error(loc, "global const initializers must be constant", "=", "'%s'", variable->getType().getCompleteString().c_str()); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // Const variables require a constant initializer, depending on version + if (qualifier == EvqConst) { + if (! initializer->getType().getQualifier().isConstant()) { + const char* initFeature = "non-constant initializer"; + requireProfile(loc, ~EEsProfile, initFeature); + profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + variable->getWritableType().getQualifier().storage = EvqConstReadOnly; + qualifier = EvqConstReadOnly; + } + } else { + // Non-const global variables in ES need a const initializer. + // + // "In declarations of global variables with no storage qualifier or with a const + // qualifier any initializer must be a constant expression." + if (symbolTable.atGlobalLevel() && ! initializer->getType().getQualifier().isConstant()) { + const char* initFeature = "non-constant global initializer (needs GL_EXT_shader_non_constant_global_initializers)"; + if (isEsProfile()) { + if (relaxedErrors() && ! extensionTurnedOn(E_GL_EXT_shader_non_constant_global_initializers)) + warn(loc, "not allowed in this version", initFeature, ""); + else + profileRequires(loc, EEsProfile, 0, E_GL_EXT_shader_non_constant_global_initializers, initFeature); + } + } + } + + if (qualifier == EvqConst || qualifier == EvqUniform) { + // Compile-time tagging of the variable with its constant value... + + initializer = intermediate.addConversion(EOpAssign, variable->getType(), initializer); + if (! initializer || ! initializer->getType().getQualifier().isConstant() || variable->getType() != initializer->getType()) { + error(loc, "non-matching or non-convertible constant type for const initializer", + variable->getType().getStorageQualifierString(), ""); + variable->getWritableType().getQualifier().makeTemporary(); + return nullptr; + } + + // We either have a folded constant in getAsConstantUnion, or we have to use + // the initializer's subtree in the AST to represent the computation of a + // specialization constant. + assert(initializer->getAsConstantUnion() || initializer->getType().getQualifier().isSpecConstant()); + if (initializer->getAsConstantUnion()) + variable->setConstArray(initializer->getAsConstantUnion()->getConstArray()); + else { + // It's a specialization constant. + variable->getWritableType().getQualifier().makeSpecConstant(); + + // Keep the subtree that computes the specialization constant with the variable. + // Later, a symbol node will adopt the subtree from the variable. + variable->setConstSubtree(initializer); + } + } else { + // normal assigning of a value to a variable... + specializationCheck(loc, initializer->getType(), "initializer"); + TIntermSymbol* intermSymbol = intermediate.addSymbol(*variable, loc); + TIntermTyped* initNode = intermediate.addAssign(EOpAssign, intermSymbol, initializer, loc); + if (! initNode) + assignError(loc, "=", intermSymbol->getCompleteString(), initializer->getCompleteString()); + + return initNode; + } + + return nullptr; +} + +// +// Reprocess any initializer-list (the "{ ... }" syntax) parts of the +// initializer. +// +// Need to hierarchically assign correct types and implicit +// conversions. Will do this mimicking the same process used for +// creating a constructor-style initializer, ensuring we get the +// same form. However, it has to in parallel walk the 'type' +// passed in, as type cannot be deduced from an initializer list. +// +TIntermTyped* TParseContext::convertInitializerList(const TSourceLoc& loc, const TType& type, TIntermTyped* initializer) +{ + // Will operate recursively. Once a subtree is found that is constructor style, + // everything below it is already good: Only the "top part" of the initializer + // can be an initializer list, where "top part" can extend for several (or all) levels. + + // see if we have bottomed out in the tree within the initializer-list part + TIntermAggregate* initList = initializer->getAsAggregate(); + if (! initList || initList->getOp() != EOpNull) + return initializer; + + // Of the initializer-list set of nodes, need to process bottom up, + // so recurse deep, then process on the way up. + + // Go down the tree here... + if (type.isArray()) { + // The type's array might be unsized, which could be okay, so base sizes on the size of the aggregate. + // Later on, initializer execution code will deal with array size logic. + TType arrayType; + arrayType.shallowCopy(type); // sharing struct stuff is fine + arrayType.copyArraySizes(*type.getArraySizes()); // but get a fresh copy of the array information, to edit below + + // edit array sizes to fill in unsized dimensions + arrayType.changeOuterArraySize((int)initList->getSequence().size()); + TIntermTyped* firstInit = initList->getSequence()[0]->getAsTyped(); + if (arrayType.isArrayOfArrays() && firstInit->getType().isArray() && + arrayType.getArraySizes()->getNumDims() == firstInit->getType().getArraySizes()->getNumDims() + 1) { + for (int d = 1; d < arrayType.getArraySizes()->getNumDims(); ++d) { + if (arrayType.getArraySizes()->getDimSize(d) == UnsizedArraySize) + arrayType.getArraySizes()->setDimSize(d, firstInit->getType().getArraySizes()->getDimSize(d - 1)); + } + } + + TType elementType(arrayType, 0); // dereferenced type + for (size_t i = 0; i < initList->getSequence().size(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, elementType, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + + return addConstructor(loc, initList, arrayType); + } else if (type.isStruct()) { + if (type.getStruct()->size() != initList->getSequence().size()) { + error(loc, "wrong number of structure members", "initializer list", ""); + return nullptr; + } + for (size_t i = 0; i < type.getStruct()->size(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, *(*type.getStruct())[i].type, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + } else if (type.isMatrix()) { + if (type.getMatrixCols() != (int)initList->getSequence().size()) { + error(loc, "wrong number of matrix columns:", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + TType vectorType(type, 0); // dereferenced type + for (int i = 0; i < type.getMatrixCols(); ++i) { + initList->getSequence()[i] = convertInitializerList(loc, vectorType, initList->getSequence()[i]->getAsTyped()); + if (initList->getSequence()[i] == nullptr) + return nullptr; + } + } else if (type.isVector()) { + if (type.getVectorSize() != (int)initList->getSequence().size()) { + error(loc, "wrong vector size (or rows in a matrix column):", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + } else { + error(loc, "unexpected initializer-list type:", "initializer list", type.getCompleteString().c_str()); + return nullptr; + } + + // Now that the subtree is processed, process this node as if the + // initializer list is a set of arguments to a constructor. + TIntermNode* emulatedConstructorArguments; + if (initList->getSequence().size() == 1) + emulatedConstructorArguments = initList->getSequence()[0]; + else + emulatedConstructorArguments = initList; + return addConstructor(loc, emulatedConstructorArguments, type); +} + +// +// Test for the correctness of the parameters passed to various constructor functions +// and also convert them to the right data type, if allowed and required. +// +// 'node' is what to construct from. +// 'type' is what type to construct. +// +// Returns nullptr for an error or the constructed node (aggregate or typed) for no error. +// +TIntermTyped* TParseContext::addConstructor(const TSourceLoc& loc, TIntermNode* node, const TType& type) +{ + if (node == nullptr || node->getAsTyped() == nullptr) + return nullptr; + rValueErrorCheck(loc, "constructor", node->getAsTyped()); + + TIntermAggregate* aggrNode = node->getAsAggregate(); + TOperator op = intermediate.mapTypeToConstructorOp(type); + + // Combined texture-sampler constructors are completely semantic checked + // in constructorTextureSamplerError() + if (op == EOpConstructTextureSampler) { + if (aggrNode->getSequence()[1]->getAsTyped()->getType().getSampler().shadow) { + // Transfer depth into the texture (SPIR-V image) type, as a hint + // for tools to know this texture/image is a depth image. + aggrNode->getSequence()[0]->getAsTyped()->getWritableType().getSampler().shadow = true; + } + return intermediate.setAggregateOperator(aggrNode, op, type, loc); + } + + TTypeList::const_iterator memberTypes; + if (op == EOpConstructStruct) + memberTypes = type.getStruct()->begin(); + + TType elementType; + if (type.isArray()) { + TType dereferenced(type, 0); + elementType.shallowCopy(dereferenced); + } else + elementType.shallowCopy(type); + + bool singleArg; + if (aggrNode) { + if (aggrNode->getOp() != EOpNull) + singleArg = true; + else + singleArg = false; + } else + singleArg = true; + + TIntermTyped *newNode; + if (singleArg) { + // If structure constructor or array constructor is being called + // for only one parameter inside the structure, we need to call constructAggregate function once. + if (type.isArray()) + newNode = constructAggregate(node, elementType, 1, node->getLoc()); + else if (op == EOpConstructStruct) + newNode = constructAggregate(node, *(*memberTypes).type, 1, node->getLoc()); + else + newNode = constructBuiltIn(type, op, node->getAsTyped(), node->getLoc(), false); + + if (newNode && (type.isArray() || op == EOpConstructStruct)) + newNode = intermediate.setAggregateOperator(newNode, EOpConstructStruct, type, loc); + + return newNode; + } + + // + // Handle list of arguments. + // + TIntermSequence &sequenceVector = aggrNode->getSequence(); // Stores the information about the parameter to the constructor + // if the structure constructor contains more than one parameter, then construct + // each parameter + + int paramCount = 0; // keeps track of the constructor parameter number being checked + + // for each parameter to the constructor call, check to see if the right type is passed or convert them + // to the right type if possible (and allowed). + // for structure constructors, just check if the right type is passed, no conversion is allowed. + for (TIntermSequence::iterator p = sequenceVector.begin(); + p != sequenceVector.end(); p++, paramCount++) { + if (type.isArray()) + newNode = constructAggregate(*p, elementType, paramCount+1, node->getLoc()); + else if (op == EOpConstructStruct) + newNode = constructAggregate(*p, *(memberTypes[paramCount]).type, paramCount+1, node->getLoc()); + else + newNode = constructBuiltIn(type, op, (*p)->getAsTyped(), node->getLoc(), true); + + if (newNode) + *p = newNode; + else + return nullptr; + } + + return intermediate.setAggregateOperator(aggrNode, op, type, loc); +} + +// Function for constructor implementation. Calls addUnaryMath with appropriate EOp value +// for the parameter to the constructor (passed to this function). Essentially, it converts +// the parameter types correctly. If a constructor expects an int (like ivec2) and is passed a +// float, then float is converted to int. +// +// Returns nullptr for an error or the constructed node. +// +TIntermTyped* TParseContext::constructBuiltIn(const TType& type, TOperator op, TIntermTyped* node, const TSourceLoc& loc, + bool subset) +{ + // If we are changing a matrix in both domain of basic type and to a non matrix, + // do the shape change first (by default, below, basic type is changed before shape). + // This avoids requesting a matrix of a new type that is going to be discarded anyway. + // TODO: This could be generalized to more type combinations, but that would require + // more extensive testing and full algorithm rework. For now, the need to do two changes makes + // the recursive call work, and avoids the most egregious case of creating integer matrices. + if (node->getType().isMatrix() && (type.isScalar() || type.isVector()) && + type.isFloatingDomain() != node->getType().isFloatingDomain()) { + TType transitionType(node->getBasicType(), glslang::EvqTemporary, type.getVectorSize(), 0, 0, node->isVector()); + TOperator transitionOp = intermediate.mapTypeToConstructorOp(transitionType); + node = constructBuiltIn(transitionType, transitionOp, node, loc, false); + } + + TIntermTyped* newNode; + TOperator basicOp; + + // + // First, convert types as needed. + // + switch (op) { + case EOpConstructVec2: + case EOpConstructVec3: + case EOpConstructVec4: + case EOpConstructMat2x2: + case EOpConstructMat2x3: + case EOpConstructMat2x4: + case EOpConstructMat3x2: + case EOpConstructMat3x3: + case EOpConstructMat3x4: + case EOpConstructMat4x2: + case EOpConstructMat4x3: + case EOpConstructMat4x4: + case EOpConstructFloat: + basicOp = EOpConstructFloat; + break; + + case EOpConstructIVec2: + case EOpConstructIVec3: + case EOpConstructIVec4: + case EOpConstructInt: + basicOp = EOpConstructInt; + break; + + case EOpConstructUVec2: + if (node->getType().getBasicType() == EbtReference) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "reference conversion to uvec2"); + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUvec2, true, node, + type); + return newNode; + } + case EOpConstructUVec3: + case EOpConstructUVec4: + case EOpConstructUint: + basicOp = EOpConstructUint; + break; + + case EOpConstructBVec2: + case EOpConstructBVec3: + case EOpConstructBVec4: + case EOpConstructBool: + basicOp = EOpConstructBool; + break; + +#ifndef GLSLANG_WEB + + case EOpConstructDVec2: + case EOpConstructDVec3: + case EOpConstructDVec4: + case EOpConstructDMat2x2: + case EOpConstructDMat2x3: + case EOpConstructDMat2x4: + case EOpConstructDMat3x2: + case EOpConstructDMat3x3: + case EOpConstructDMat3x4: + case EOpConstructDMat4x2: + case EOpConstructDMat4x3: + case EOpConstructDMat4x4: + case EOpConstructDouble: + basicOp = EOpConstructDouble; + break; + + case EOpConstructF16Vec2: + case EOpConstructF16Vec3: + case EOpConstructF16Vec4: + case EOpConstructF16Mat2x2: + case EOpConstructF16Mat2x3: + case EOpConstructF16Mat2x4: + case EOpConstructF16Mat3x2: + case EOpConstructF16Mat3x3: + case EOpConstructF16Mat3x4: + case EOpConstructF16Mat4x2: + case EOpConstructF16Mat4x3: + case EOpConstructF16Mat4x4: + case EOpConstructFloat16: + basicOp = EOpConstructFloat16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticFloat16Enabled()) { + TType tempType(EbtFloat, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructFloat16) + aggregateOp = EOpConstructFloat; + else + aggregateOp = (TOperator)(EOpConstructVec2 + op - EOpConstructF16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtFloat16, newNode); + return newNode; + } + break; + + case EOpConstructI8Vec2: + case EOpConstructI8Vec3: + case EOpConstructI8Vec4: + case EOpConstructInt8: + basicOp = EOpConstructInt8; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt8Enabled()) { + TType tempType(EbtInt, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructInt8) + aggregateOp = EOpConstructInt; + else + aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI8Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtInt8, newNode); + return newNode; + } + break; + + case EOpConstructU8Vec2: + case EOpConstructU8Vec3: + case EOpConstructU8Vec4: + case EOpConstructUint8: + basicOp = EOpConstructUint8; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt8Enabled()) { + TType tempType(EbtUint, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructUint8) + aggregateOp = EOpConstructUint; + else + aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU8Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtUint8, newNode); + return newNode; + } + break; + + case EOpConstructI16Vec2: + case EOpConstructI16Vec3: + case EOpConstructI16Vec4: + case EOpConstructInt16: + basicOp = EOpConstructInt16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt16Enabled()) { + TType tempType(EbtInt, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructInt16) + aggregateOp = EOpConstructInt; + else + aggregateOp = (TOperator)(EOpConstructIVec2 + op - EOpConstructI16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtInt16, newNode); + return newNode; + } + break; + + case EOpConstructU16Vec2: + case EOpConstructU16Vec3: + case EOpConstructU16Vec4: + case EOpConstructUint16: + basicOp = EOpConstructUint16; + // 8/16-bit storage extensions don't support constructing composites of 8/16-bit types, + // so construct a 32-bit type and convert + if (!intermediate.getArithemeticInt16Enabled()) { + TType tempType(EbtUint, EvqTemporary, type.getVectorSize()); + newNode = node; + if (tempType != newNode->getType()) { + TOperator aggregateOp; + if (op == EOpConstructUint16) + aggregateOp = EOpConstructUint; + else + aggregateOp = (TOperator)(EOpConstructUVec2 + op - EOpConstructU16Vec2); + newNode = intermediate.setAggregateOperator(newNode, aggregateOp, tempType, node->getLoc()); + } + newNode = intermediate.addConversion(EbtUint16, newNode); + return newNode; + } + break; + + case EOpConstructI64Vec2: + case EOpConstructI64Vec3: + case EOpConstructI64Vec4: + case EOpConstructInt64: + basicOp = EOpConstructInt64; + break; + + case EOpConstructUint64: + if (type.isScalar() && node->getType().isReference()) { + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvPtrToUint64, true, node, type); + return newNode; + } + // fall through + case EOpConstructU64Vec2: + case EOpConstructU64Vec3: + case EOpConstructU64Vec4: + basicOp = EOpConstructUint64; + break; + + case EOpConstructNonuniform: + // Make a nonuniform copy of node + newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpCopyObject, true, node, type); + return newNode; + + case EOpConstructReference: + // construct reference from reference + if (node->getType().isReference()) { + newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConstructReference, true, node, type); + return newNode; + // construct reference from uint64 + } else if (node->getType().isScalar() && node->getType().getBasicType() == EbtUint64) { + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUint64ToPtr, true, node, + type); + return newNode; + // construct reference from uvec2 + } else if (node->getType().isVector() && node->getType().getBasicType() == EbtUint && + node->getVectorSize() == 2) { + requireExtensions(loc, 1, &E_GL_EXT_buffer_reference_uvec2, "uvec2 conversion to reference"); + TIntermTyped* newNode = intermediate.addBuiltInFunctionCall(node->getLoc(), EOpConvUvec2ToPtr, true, node, + type); + return newNode; + } else { + return nullptr; + } + + case EOpConstructCooperativeMatrix: + if (!node->getType().isCoopMat()) { + if (type.getBasicType() != node->getType().getBasicType()) { + node = intermediate.addConversion(type.getBasicType(), node); + if (node == nullptr) + return nullptr; + } + node = intermediate.setAggregateOperator(node, EOpConstructCooperativeMatrix, type, node->getLoc()); + } else { + TOperator op = EOpNull; + switch (type.getBasicType()) { + default: + assert(0); + break; + case EbtInt: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToInt; break; + case EbtFloat16: op = EOpConvFloat16ToInt; break; + case EbtUint8: op = EOpConvUint8ToInt; break; + case EbtInt8: op = EOpConvInt8ToInt; break; + case EbtUint: op = EOpConvUintToInt; break; + default: assert(0); + } + break; + case EbtUint: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToUint; break; + case EbtFloat16: op = EOpConvFloat16ToUint; break; + case EbtUint8: op = EOpConvUint8ToUint; break; + case EbtInt8: op = EOpConvInt8ToUint; break; + case EbtInt: op = EOpConvIntToUint; break; + case EbtUint: op = EOpConvUintToInt8; break; + default: assert(0); + } + break; + case EbtInt8: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToInt8; break; + case EbtFloat16: op = EOpConvFloat16ToInt8; break; + case EbtUint8: op = EOpConvUint8ToInt8; break; + case EbtInt: op = EOpConvIntToInt8; break; + case EbtUint: op = EOpConvUintToInt8; break; + default: assert(0); + } + break; + case EbtUint8: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToUint8; break; + case EbtFloat16: op = EOpConvFloat16ToUint8; break; + case EbtInt8: op = EOpConvInt8ToUint8; break; + case EbtInt: op = EOpConvIntToUint8; break; + case EbtUint: op = EOpConvUintToUint8; break; + default: assert(0); + } + break; + case EbtFloat: + switch (node->getType().getBasicType()) { + case EbtFloat16: op = EOpConvFloat16ToFloat; break; + case EbtInt8: op = EOpConvInt8ToFloat; break; + case EbtUint8: op = EOpConvUint8ToFloat; break; + case EbtInt: op = EOpConvIntToFloat; break; + case EbtUint: op = EOpConvUintToFloat; break; + default: assert(0); + } + break; + case EbtFloat16: + switch (node->getType().getBasicType()) { + case EbtFloat: op = EOpConvFloatToFloat16; break; + case EbtInt8: op = EOpConvInt8ToFloat16; break; + case EbtUint8: op = EOpConvUint8ToFloat16; break; + case EbtInt: op = EOpConvIntToFloat16; break; + case EbtUint: op = EOpConvUintToFloat16; break; + default: assert(0); + } + break; + } + + node = intermediate.addUnaryNode(op, node, node->getLoc(), type); + // If it's a (non-specialization) constant, it must be folded. + if (node->getAsUnaryNode()->getOperand()->getAsConstantUnion()) + return node->getAsUnaryNode()->getOperand()->getAsConstantUnion()->fold(op, node->getType()); + } + + return node; + +#endif // GLSLANG_WEB + + default: + error(loc, "unsupported construction", "", ""); + + return nullptr; + } + newNode = intermediate.addUnaryMath(basicOp, node, node->getLoc()); + if (newNode == nullptr) { + error(loc, "can't convert", "constructor", ""); + return nullptr; + } + + // + // Now, if there still isn't an operation to do the construction, and we need one, add one. + // + + // Otherwise, skip out early. + if (subset || (newNode != node && newNode->getType() == type)) + return newNode; + + // setAggregateOperator will insert a new node for the constructor, as needed. + return intermediate.setAggregateOperator(newNode, op, type, loc); +} + +// This function tests for the type of the parameters to the structure or array constructor. Raises +// an error message if the expected type does not match the parameter passed to the constructor. +// +// Returns nullptr for an error or the input node itself if the expected and the given parameter types match. +// +TIntermTyped* TParseContext::constructAggregate(TIntermNode* node, const TType& type, int paramCount, const TSourceLoc& loc) +{ + TIntermTyped* converted = intermediate.addConversion(EOpConstructStruct, type, node->getAsTyped()); + if (! converted || converted->getType() != type) { + error(loc, "", "constructor", "cannot convert parameter %d from '%s' to '%s'", paramCount, + node->getAsTyped()->getType().getCompleteString().c_str(), type.getCompleteString().c_str()); + + return nullptr; + } + + return converted; +} + +// If a memory qualifier is present in 'to', also make it present in 'from'. +void TParseContext::inheritMemoryQualifiers(const TQualifier& from, TQualifier& to) +{ +#ifndef GLSLANG_WEB + if (from.isReadOnly()) + to.readonly = from.readonly; + if (from.isWriteOnly()) + to.writeonly = from.writeonly; + if (from.coherent) + to.coherent = from.coherent; + if (from.volatil) + to.volatil = from.volatil; + if (from.restrict) + to.restrict = from.restrict; +#endif +} + +// +// Do everything needed to add an interface block. +// +void TParseContext::declareBlock(const TSourceLoc& loc, TTypeList& typeList, const TString* instanceName, + TArraySizes* arraySizes) +{ + blockStageIoCheck(loc, currentBlockQualifier); + blockQualifierCheck(loc, currentBlockQualifier, instanceName != nullptr); + if (arraySizes != nullptr) { + arraySizesCheck(loc, currentBlockQualifier, arraySizes, nullptr, false); + arrayOfArrayVersionCheck(loc, arraySizes); + if (arraySizes->getNumDims() > 1) + requireProfile(loc, ~EEsProfile, "array-of-array of block"); + } + + // Inherit and check member storage qualifiers WRT to the block-level qualifier. + for (unsigned int member = 0; member < typeList.size(); ++member) { + TType& memberType = *typeList[member].type; + TQualifier& memberQualifier = memberType.getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + globalQualifierFixCheck(memberLoc, memberQualifier); + if (memberQualifier.storage != EvqTemporary && memberQualifier.storage != EvqGlobal && memberQualifier.storage != currentBlockQualifier.storage) + error(memberLoc, "member storage qualifier cannot contradict block storage qualifier", memberType.getFieldName().c_str(), ""); + memberQualifier.storage = currentBlockQualifier.storage; +#ifndef GLSLANG_WEB + inheritMemoryQualifiers(currentBlockQualifier, memberQualifier); + if (currentBlockQualifier.perPrimitiveNV) + memberQualifier.perPrimitiveNV = currentBlockQualifier.perPrimitiveNV; + if (currentBlockQualifier.perViewNV) + memberQualifier.perViewNV = currentBlockQualifier.perViewNV; + if (currentBlockQualifier.perTaskNV) + memberQualifier.perTaskNV = currentBlockQualifier.perTaskNV; +#endif + if ((currentBlockQualifier.storage == EvqUniform || currentBlockQualifier.storage == EvqBuffer) && (memberQualifier.isInterpolation() || memberQualifier.isAuxiliary())) + error(memberLoc, "member of uniform or buffer block cannot have an auxiliary or interpolation qualifier", memberType.getFieldName().c_str(), ""); + if (memberType.isArray()) + arraySizesCheck(memberLoc, currentBlockQualifier, memberType.getArraySizes(), nullptr, member == typeList.size() - 1); + if (memberQualifier.hasOffset()) { + if (spvVersion.spv == 0) { + profileRequires(memberLoc, ~EEsProfile, 440, E_GL_ARB_enhanced_layouts, "\"offset\" on block member"); + profileRequires(memberLoc, EEsProfile, 300, E_GL_ARB_enhanced_layouts, "\"offset\" on block member"); + } + } + + if (memberType.containsOpaque()) + error(memberLoc, "member of block cannot be or contain a sampler, image, or atomic_uint type", typeList[member].type->getFieldName().c_str(), ""); + + if (memberType.containsCoopMat()) + error(memberLoc, "member of block cannot be or contain a cooperative matrix type", typeList[member].type->getFieldName().c_str(), ""); + } + + // This might be a redeclaration of a built-in block. If so, redeclareBuiltinBlock() will + // do all the rest. + if (! symbolTable.atBuiltInLevel() && builtInName(*blockName)) { + redeclareBuiltinBlock(loc, typeList, *blockName, instanceName, arraySizes); + return; + } + + // Not a redeclaration of a built-in; check that all names are user names. + reservedErrorCheck(loc, *blockName); + if (instanceName) + reservedErrorCheck(loc, *instanceName); + for (unsigned int member = 0; member < typeList.size(); ++member) + reservedErrorCheck(typeList[member].loc, typeList[member].type->getFieldName()); + + // Make default block qualification, and adjust the member qualifications + + TQualifier defaultQualification; + switch (currentBlockQualifier.storage) { + case EvqUniform: defaultQualification = globalUniformDefaults; break; + case EvqBuffer: defaultQualification = globalBufferDefaults; break; + case EvqVaryingIn: defaultQualification = globalInputDefaults; break; + case EvqVaryingOut: defaultQualification = globalOutputDefaults; break; + default: defaultQualification.clear(); break; + } + + // Special case for "push_constant uniform", which has a default of std430, + // contrary to normal uniform defaults, and can't have a default tracked for it. + if ((currentBlockQualifier.isPushConstant() && !currentBlockQualifier.hasPacking()) || + (currentBlockQualifier.isShaderRecord() && !currentBlockQualifier.hasPacking())) + currentBlockQualifier.layoutPacking = ElpStd430; + + // Special case for "taskNV in/out", which has a default of std430, + if (currentBlockQualifier.isTaskMemory() && !currentBlockQualifier.hasPacking()) + currentBlockQualifier.layoutPacking = ElpStd430; + + // fix and check for member layout qualifiers + + mergeObjectLayoutQualifiers(defaultQualification, currentBlockQualifier, true); + + // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts." + if (currentBlockQualifier.hasAlign()) { + if (defaultQualification.layoutPacking != ElpStd140 && + defaultQualification.layoutPacking != ElpStd430 && + defaultQualification.layoutPacking != ElpScalar) { + error(loc, "can only be used with std140, std430, or scalar layout packing", "align", ""); + defaultQualification.layoutAlign = -1; + } + } + + bool memberWithLocation = false; + bool memberWithoutLocation = false; + bool memberWithPerViewQualifier = false; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; +#ifndef GLSLANG_WEB + if (memberQualifier.hasStream()) { + if (defaultQualification.layoutStream != memberQualifier.layoutStream) + error(memberLoc, "member cannot contradict block", "stream", ""); + } + + // "This includes a block's inheritance of the + // current global default buffer, a block member's inheritance of the block's + // buffer, and the requirement that any *xfb_buffer* declared on a block + // member must match the buffer inherited from the block." + if (memberQualifier.hasXfbBuffer()) { + if (defaultQualification.layoutXfbBuffer != memberQualifier.layoutXfbBuffer) + error(memberLoc, "member cannot contradict block (or what block inherited from global)", "xfb_buffer", ""); + } +#endif + + if (memberQualifier.hasPacking()) + error(memberLoc, "member of block cannot have a packing layout qualifier", typeList[member].type->getFieldName().c_str(), ""); + if (memberQualifier.hasLocation()) { + const char* feature = "location on block member"; + switch (currentBlockQualifier.storage) { +#ifndef GLSLANG_WEB + case EvqVaryingIn: + case EvqVaryingOut: + requireProfile(memberLoc, ECoreProfile | ECompatibilityProfile | EEsProfile, feature); + profileRequires(memberLoc, ECoreProfile | ECompatibilityProfile, 440, E_GL_ARB_enhanced_layouts, feature); + profileRequires(memberLoc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, feature); + memberWithLocation = true; + break; +#endif + default: + error(memberLoc, "can only use in an in/out block", feature, ""); + break; + } + } else + memberWithoutLocation = true; + + // "The offset qualifier can only be used on block members of blocks declared with std140 or std430 layouts." + // "The align qualifier can only be used on blocks or block members, and only for blocks declared with std140 or std430 layouts." + if (memberQualifier.hasAlign() || memberQualifier.hasOffset()) { + if (defaultQualification.layoutPacking != ElpStd140 && + defaultQualification.layoutPacking != ElpStd430 && + defaultQualification.layoutPacking != ElpScalar) + error(memberLoc, "can only be used with std140, std430, or scalar layout packing", "offset/align", ""); + } + + if (memberQualifier.isPerView()) { + memberWithPerViewQualifier = true; + } + + TQualifier newMemberQualification = defaultQualification; + mergeQualifiers(memberLoc, newMemberQualification, memberQualifier, false); + memberQualifier = newMemberQualification; + } + + layoutMemberLocationArrayCheck(loc, memberWithLocation, arraySizes); + +#ifndef GLSLANG_WEB + // Ensure that the block has an XfbBuffer assigned. This is needed + // because if the block has a XfbOffset assigned, then it is + // assumed that it has implicitly assigned the current global + // XfbBuffer, and because it's members need to be assigned a + // XfbOffset if they lack it. + if (currentBlockQualifier.storage == EvqVaryingOut && globalOutputDefaults.hasXfbBuffer()) { + if (!currentBlockQualifier.hasXfbBuffer() && currentBlockQualifier.hasXfbOffset()) + currentBlockQualifier.layoutXfbBuffer = globalOutputDefaults.layoutXfbBuffer; + } +#endif + + // Process the members + fixBlockLocations(loc, currentBlockQualifier, typeList, memberWithLocation, memberWithoutLocation); + fixXfbOffsets(currentBlockQualifier, typeList); + fixBlockUniformOffsets(currentBlockQualifier, typeList); + fixBlockUniformLayoutMatrix(currentBlockQualifier, &typeList, nullptr); + fixBlockUniformLayoutPacking(currentBlockQualifier, &typeList, nullptr); + for (unsigned int member = 0; member < typeList.size(); ++member) + layoutTypeCheck(typeList[member].loc, *typeList[member].type); + +#ifndef GLSLANG_WEB + if (memberWithPerViewQualifier) { + for (unsigned int member = 0; member < typeList.size(); ++member) { + checkAndResizeMeshViewDim(typeList[member].loc, *typeList[member].type, /*isBlockMember*/ true); + } + } +#endif + + // reverse merge, so that currentBlockQualifier now has all layout information + // (can't use defaultQualification directly, it's missing other non-layout-default-class qualifiers) + mergeObjectLayoutQualifiers(currentBlockQualifier, defaultQualification, true); + + // + // Build and add the interface block as a new type named 'blockName' + // + + TType blockType(&typeList, *blockName, currentBlockQualifier); + if (arraySizes != nullptr) + blockType.transferArraySizes(arraySizes); + +#ifndef GLSLANG_WEB + if (arraySizes == nullptr) + ioArrayCheck(loc, blockType, instanceName ? *instanceName : *blockName); + if (currentBlockQualifier.hasBufferReference()) { + + if (currentBlockQualifier.storage != EvqBuffer) + error(loc, "can only be used with buffer", "buffer_reference", ""); + + // Create the block reference type. If it was forward-declared, detect that + // as a referent struct type with no members. Replace the referent type with + // blockType. + TType blockNameType(EbtReference, blockType, *blockName); + TVariable* blockNameVar = new TVariable(blockName, blockNameType, true); + if (! symbolTable.insert(*blockNameVar)) { + TSymbol* existingName = symbolTable.find(*blockName); + if (existingName->getType().isReference() && + existingName->getType().getReferentType()->getStruct() && + existingName->getType().getReferentType()->getStruct()->size() == 0 && + existingName->getType().getQualifier().storage == blockType.getQualifier().storage) { + existingName->getType().getReferentType()->deepCopy(blockType); + } else { + error(loc, "block name cannot be redefined", blockName->c_str(), ""); + } + } + if (!instanceName) { + return; + } + } else +#endif + { + // + // Don't make a user-defined type out of block name; that will cause an error + // if the same block name gets reused in a different interface. + // + // "Block names have no other use within a shader + // beyond interface matching; it is a compile-time error to use a block name at global scope for anything + // other than as a block name (e.g., use of a block name for a global variable name or function name is + // currently reserved)." + // + // Use the symbol table to prevent normal reuse of the block's name, as a variable entry, + // whose type is EbtBlock, but without all the structure; that will come from the type + // the instances point to. + // + TType blockNameType(EbtBlock, blockType.getQualifier().storage); + TVariable* blockNameVar = new TVariable(blockName, blockNameType); + if (! symbolTable.insert(*blockNameVar)) { + TSymbol* existingName = symbolTable.find(*blockName); + if (existingName->getType().getBasicType() == EbtBlock) { + if (existingName->getType().getQualifier().storage == blockType.getQualifier().storage) { + error(loc, "Cannot reuse block name within the same interface:", blockName->c_str(), blockType.getStorageQualifierString()); + return; + } + } else { + error(loc, "block name cannot redefine a non-block name", blockName->c_str(), ""); + return; + } + } + } + + // Add the variable, as anonymous or named instanceName. + // Make an anonymous variable if no name was provided. + if (! instanceName) + instanceName = NewPoolTString(""); + + TVariable& variable = *new TVariable(instanceName, blockType); + if (! symbolTable.insert(variable)) { + if (*instanceName == "") + error(loc, "nameless block contains a member that already has a name at global scope", blockName->c_str(), ""); + else + error(loc, "block instance name redefinition", variable.getName().c_str(), ""); + + return; + } + + // Check for general layout qualifier errors + layoutObjectCheck(loc, variable); + +#ifndef GLSLANG_WEB + // fix up + if (isIoResizeArray(blockType)) { + ioArraySymbolResizeList.push_back(&variable); + checkIoArraysConsistency(loc, true); + } else + fixIoArraySize(loc, variable.getWritableType()); +#endif + + // Save it in the AST for linker use. + trackLinkage(variable); +} + +// Do all block-declaration checking regarding the combination of in/out/uniform/buffer +// with a particular stage. +void TParseContext::blockStageIoCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + const char *extsrt[2] = { E_GL_NV_ray_tracing, E_GL_EXT_ray_tracing }; + switch (qualifier.storage) { + case EvqUniform: + profileRequires(loc, EEsProfile, 300, nullptr, "uniform block"); + profileRequires(loc, ENoProfile, 140, E_GL_ARB_uniform_buffer_object, "uniform block"); + if (currentBlockQualifier.layoutPacking == ElpStd430 && ! currentBlockQualifier.isPushConstant()) + requireExtensions(loc, 1, &E_GL_EXT_scalar_block_layout, "std430 requires the buffer storage qualifier"); + break; + case EvqBuffer: + requireProfile(loc, EEsProfile | ECoreProfile | ECompatibilityProfile, "buffer block"); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_shader_storage_buffer_object, "buffer block"); + profileRequires(loc, EEsProfile, 310, nullptr, "buffer block"); + break; + case EvqVaryingIn: + profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "input block"); + // It is a compile-time error to have an input block in a vertex shader or an output block in a fragment shader + // "Compute shaders do not permit user-defined input variables..." + requireStage(loc, (EShLanguageMask)(EShLangTessControlMask|EShLangTessEvaluationMask|EShLangGeometryMask| + EShLangFragmentMask|EShLangMeshNVMask), "input block"); + if (language == EShLangFragment) { + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "fragment input block"); + } else if (language == EShLangMeshNV && ! qualifier.isTaskMemory()) { + error(loc, "input blocks cannot be used in a mesh shader", "out", ""); + } + break; + case EvqVaryingOut: + profileRequires(loc, ~EEsProfile, 150, E_GL_ARB_separate_shader_objects, "output block"); + requireStage(loc, (EShLanguageMask)(EShLangVertexMask|EShLangTessControlMask|EShLangTessEvaluationMask| + EShLangGeometryMask|EShLangMeshNVMask|EShLangTaskNVMask), "output block"); + // ES 310 can have a block before shader_io is turned on, so skip this test for built-ins + if (language == EShLangVertex && ! parsingBuiltins) { + profileRequires(loc, EEsProfile, 320, Num_AEP_shader_io_blocks, AEP_shader_io_blocks, "vertex output block"); + } else if (language == EShLangMeshNV && qualifier.isTaskMemory()) { + error(loc, "can only use on input blocks in mesh shader", "taskNV", ""); + } else if (language == EShLangTaskNV && ! qualifier.isTaskMemory()) { + error(loc, "output blocks cannot be used in a task shader", "out", ""); + } + break; +#ifndef GLSLANG_WEB + case EvqPayload: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadNV block"); + requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask), + "rayPayloadNV block"); + break; + case EvqPayloadIn: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "rayPayloadInNV block"); + requireStage(loc, (EShLanguageMask)(EShLangAnyHitMask | EShLangClosestHitMask | EShLangMissMask), + "rayPayloadInNV block"); + break; + case EvqHitAttr: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "hitAttributeNV block"); + requireStage(loc, (EShLanguageMask)(EShLangIntersectMask | EShLangAnyHitMask | EShLangClosestHitMask), "hitAttributeNV block"); + break; + case EvqCallableData: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataNV block"); + requireStage(loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), + "callableDataNV block"); + break; + case EvqCallableDataIn: + profileRequires(loc, ~EEsProfile, 460, 2, extsrt, "callableDataInNV block"); + requireStage(loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV block"); + break; +#endif + default: + error(loc, "only uniform, buffer, in, or out blocks are supported", blockName->c_str(), ""); + break; + } +} + +// Do all block-declaration checking regarding its qualifiers. +void TParseContext::blockQualifierCheck(const TSourceLoc& loc, const TQualifier& qualifier, bool /*instanceName*/) +{ + // The 4.5 specification says: + // + // interface-block : + // layout-qualifieropt interface-qualifier block-name { member-list } instance-nameopt ; + // + // interface-qualifier : + // in + // out + // patch in + // patch out + // uniform + // buffer + // + // Note however memory qualifiers aren't included, yet the specification also says + // + // "...memory qualifiers may also be used in the declaration of shader storage blocks..." + + if (qualifier.isInterpolation()) + error(loc, "cannot use interpolation qualifiers on an interface block", "flat/smooth/noperspective", ""); + if (qualifier.centroid) + error(loc, "cannot use centroid qualifier on an interface block", "centroid", ""); + if (qualifier.isSample()) + error(loc, "cannot use sample qualifier on an interface block", "sample", ""); + if (qualifier.invariant) + error(loc, "cannot use invariant qualifier on an interface block", "invariant", ""); + if (qualifier.isPushConstant()) + intermediate.addPushConstantCount(); + if (qualifier.isShaderRecord()) + intermediate.addShaderRecordCount(); + if (qualifier.isTaskMemory()) + intermediate.addTaskNVCount(); +} + +// +// "For a block, this process applies to the entire block, or until the first member +// is reached that has a location layout qualifier. When a block member is declared with a location +// qualifier, its location comes from that qualifier: The member's location qualifier overrides the block-level +// declaration. Subsequent members are again assigned consecutive locations, based on the newest location, +// until the next member declared with a location qualifier. The values used for locations do not have to be +// declared in increasing order." +void TParseContext::fixBlockLocations(const TSourceLoc& loc, TQualifier& qualifier, TTypeList& typeList, bool memberWithLocation, bool memberWithoutLocation) +{ + // "If a block has no block-level location layout qualifier, it is required that either all or none of its members + // have a location layout qualifier, or a compile-time error results." + if (! qualifier.hasLocation() && memberWithLocation && memberWithoutLocation) + error(loc, "either the block needs a location, or all members need a location, or no members have a location", "location", ""); + else { + if (memberWithLocation) { + // remove any block-level location and make it per *every* member + int nextLocation = 0; // by the rule above, initial value is not relevant + if (qualifier.hasAnyLocation()) { + nextLocation = qualifier.layoutLocation; + qualifier.layoutLocation = TQualifier::layoutLocationEnd; + if (qualifier.hasComponent()) { + // "It is a compile-time error to apply the *component* qualifier to a ... block" + error(loc, "cannot apply to a block", "component", ""); + } + if (qualifier.hasIndex()) { + error(loc, "cannot apply to a block", "index", ""); + } + } + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + if (! memberQualifier.hasLocation()) { + if (nextLocation >= (int)TQualifier::layoutLocationEnd) + error(memberLoc, "location is too large", "location", ""); + memberQualifier.layoutLocation = nextLocation; + memberQualifier.layoutComponent = TQualifier::layoutComponentEnd; + } + nextLocation = memberQualifier.layoutLocation + intermediate.computeTypeLocationSize( + *typeList[member].type, language); + } + } + } +} + +void TParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList) +{ +#ifndef GLSLANG_WEB + // "If a block is qualified with xfb_offset, all its + // members are assigned transform feedback buffer offsets. If a block is not qualified with xfb_offset, any + // members of that block not qualified with an xfb_offset will not be assigned transform feedback buffer + // offsets." + + if (! qualifier.hasXfbBuffer() || ! qualifier.hasXfbOffset()) + return; + + int nextOffset = qualifier.layoutXfbOffset; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + bool contains64BitType = false; + bool contains32BitType = false; + bool contains16BitType = false; + int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType); + // see if we need to auto-assign an offset to this member + if (! memberQualifier.hasXfbOffset()) { + // "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8" + if (contains64BitType) + RoundToPow2(nextOffset, 8); + else if (contains32BitType) + RoundToPow2(nextOffset, 4); + else if (contains16BitType) + RoundToPow2(nextOffset, 2); + memberQualifier.layoutXfbOffset = nextOffset; + } else + nextOffset = memberQualifier.layoutXfbOffset; + nextOffset += memberSize; + } + + // The above gave all block members an offset, so we can take it off the block now, + // which will avoid double counting the offset usage. + qualifier.layoutXfbOffset = TQualifier::layoutXfbOffsetEnd; +#endif +} + +// Calculate and save the offset of each block member, using the recursively +// defined block offset rules and the user-provided offset and align. +// +// Also, compute and save the total size of the block. For the block's size, arrayness +// is not taken into account, as each element is backed by a separate buffer. +// +void TParseContext::fixBlockUniformOffsets(TQualifier& qualifier, TTypeList& typeList) +{ + if (!qualifier.isUniformOrBuffer() && !qualifier.isTaskMemory()) + return; + if (qualifier.layoutPacking != ElpStd140 && qualifier.layoutPacking != ElpStd430 && qualifier.layoutPacking != ElpScalar) + return; + + int offset = 0; + int memberSize; + for (unsigned int member = 0; member < typeList.size(); ++member) { + TQualifier& memberQualifier = typeList[member].type->getQualifier(); + const TSourceLoc& memberLoc = typeList[member].loc; + + // "When align is applied to an array, it effects only the start of the array, not the array's internal stride." + + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = typeList[member].type->getQualifier().layoutMatrix; + int dummyStride; + int memberAlignment = intermediate.getMemberAlignment(*typeList[member].type, memberSize, dummyStride, qualifier.layoutPacking, + subMatrixLayout != ElmNone ? subMatrixLayout == ElmRowMajor : qualifier.layoutMatrix == ElmRowMajor); + if (memberQualifier.hasOffset()) { + // "The specified offset must be a multiple + // of the base alignment of the type of the block member it qualifies, or a compile-time error results." + if (! IsMultipleOfPow2(memberQualifier.layoutOffset, memberAlignment)) + error(memberLoc, "must be a multiple of the member's alignment", "offset", ""); + + // GLSL: "It is a compile-time error to specify an offset that is smaller than the offset of the previous + // member in the block or that lies within the previous member of the block" + if (spvVersion.spv == 0) { + if (memberQualifier.layoutOffset < offset) + error(memberLoc, "cannot lie in previous members", "offset", ""); + + // "The offset qualifier forces the qualified member to start at or after the specified + // integral-constant expression, which will be its byte offset from the beginning of the buffer. + // "The actual offset of a member is computed as + // follows: If offset was declared, start with that offset, otherwise start with the next available offset." + offset = std::max(offset, memberQualifier.layoutOffset); + } else { + // TODO: Vulkan: "It is a compile-time error to have any offset, explicit or assigned, + // that lies within another member of the block." + + offset = memberQualifier.layoutOffset; + } + } + + // "The actual alignment of a member will be the greater of the specified align alignment and the standard + // (e.g., std140) base alignment for the member's type." + if (memberQualifier.hasAlign()) + memberAlignment = std::max(memberAlignment, memberQualifier.layoutAlign); + + // "If the resulting offset is not a multiple of the actual alignment, + // increase it to the first offset that is a multiple of + // the actual alignment." + RoundToPow2(offset, memberAlignment); + typeList[member].type->getQualifier().layoutOffset = offset; + offset += memberSize; + } +} + +// +// Spread LayoutMatrix to uniform block member, if a uniform block member is a struct, +// we need spread LayoutMatrix to this struct member too. and keep this rule for recursive. +// +void TParseContext::fixBlockUniformLayoutMatrix(TQualifier& qualifier, TTypeList* originTypeList, + TTypeList* tmpTypeList) +{ + assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size()); + for (unsigned int member = 0; member < originTypeList->size(); ++member) { + if (qualifier.layoutPacking != ElpNone) { + if (tmpTypeList == nullptr) { + if (((*originTypeList)[member].type->isMatrix() || + (*originTypeList)[member].type->getBasicType() == EbtStruct) && + (*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + (*originTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix; + } + } else { + if (((*tmpTypeList)[member].type->isMatrix() || + (*tmpTypeList)[member].type->getBasicType() == EbtStruct) && + (*tmpTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + (*tmpTypeList)[member].type->getQualifier().layoutMatrix = qualifier.layoutMatrix; + } + } + } + + if ((*originTypeList)[member].type->getBasicType() == EbtStruct) { + TQualifier* memberQualifier = nullptr; + // block member can be declare a matrix style, so it should be update to the member's style + if ((*originTypeList)[member].type->getQualifier().layoutMatrix == ElmNone) { + memberQualifier = &qualifier; + } else { + memberQualifier = &((*originTypeList)[member].type->getQualifier()); + } + + const TType* tmpType = tmpTypeList == nullptr ? + (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type; + + fixBlockUniformLayoutMatrix(*memberQualifier, (*originTypeList)[member].type->getWritableStruct(), + tmpType->getWritableStruct()); + + const TTypeList* structure = recordStructCopy(matrixFixRecord, (*originTypeList)[member].type, tmpType); + + if (tmpTypeList == nullptr) { + (*originTypeList)[member].type->setStruct(const_cast(structure)); + } + if (tmpTypeList != nullptr) { + (*tmpTypeList)[member].type->setStruct(const_cast(structure)); + } + } + } +} + +// +// Spread LayoutPacking to block member, if a block member is a struct, we need spread LayoutPacking to +// this struct member too. and keep this rule for recursive. +// +void TParseContext::fixBlockUniformLayoutPacking(TQualifier& qualifier, TTypeList* originTypeList, + TTypeList* tmpTypeList) +{ + assert(tmpTypeList == nullptr || originTypeList->size() == tmpTypeList->size()); + for (unsigned int member = 0; member < originTypeList->size(); ++member) { + if (qualifier.layoutPacking != ElpNone) { + if (tmpTypeList == nullptr) { + if ((*originTypeList)[member].type->getQualifier().layoutPacking == ElpNone) { + (*originTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking; + } + } else { + if ((*tmpTypeList)[member].type->getQualifier().layoutPacking == ElpNone) { + (*tmpTypeList)[member].type->getQualifier().layoutPacking = qualifier.layoutPacking; + } + } + } + + if ((*originTypeList)[member].type->getBasicType() == EbtStruct) { + // Deep copy the type in pool. + // Because, struct use in different block may have different layout qualifier. + // We have to new a object to distinguish between them. + const TType* tmpType = tmpTypeList == nullptr ? + (*originTypeList)[member].type->clone() : (*tmpTypeList)[member].type; + + fixBlockUniformLayoutPacking(qualifier, (*originTypeList)[member].type->getWritableStruct(), + tmpType->getWritableStruct()); + + const TTypeList* structure = recordStructCopy(packingFixRecord, (*originTypeList)[member].type, tmpType); + + if (tmpTypeList == nullptr) { + (*originTypeList)[member].type->setStruct(const_cast(structure)); + } + if (tmpTypeList != nullptr) { + (*tmpTypeList)[member].type->setStruct(const_cast(structure)); + } + } + } +} + +// For an identifier that is already declared, add more qualification to it. +void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, const TString& identifier) +{ + TSymbol* symbol = symbolTable.find(identifier); + + // A forward declaration of a block reference looks to the grammar like adding + // a qualifier to an existing symbol. Detect this and create the block reference + // type with an empty type list, which will be filled in later in + // TParseContext::declareBlock. + if (!symbol && qualifier.hasBufferReference()) { + TTypeList typeList; + TType blockType(&typeList, identifier, qualifier);; + TType blockNameType(EbtReference, blockType, identifier); + TVariable* blockNameVar = new TVariable(&identifier, blockNameType, true); + if (! symbolTable.insert(*blockNameVar)) { + error(loc, "block name cannot redefine a non-block name", blockName->c_str(), ""); + } + return; + } + + if (! symbol) { + error(loc, "identifier not previously declared", identifier.c_str(), ""); + return; + } + if (symbol->getAsFunction()) { + error(loc, "cannot re-qualify a function name", identifier.c_str(), ""); + return; + } + + if (qualifier.isAuxiliary() || + qualifier.isMemory() || + qualifier.isInterpolation() || + qualifier.hasLayout() || + qualifier.storage != EvqTemporary || + qualifier.precision != EpqNone) { + error(loc, "cannot add storage, auxiliary, memory, interpolation, layout, or precision qualifier to an existing variable", identifier.c_str(), ""); + return; + } + + // For read-only built-ins, add a new symbol for holding the modified qualifier. + // This will bring up an entire block, if a block type has to be modified (e.g., gl_Position inside a block) + if (symbol->isReadOnly()) + symbol = symbolTable.copyUp(symbol); + + if (qualifier.invariant) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot change qualification after use", "invariant", ""); + symbol->getWritableType().getQualifier().invariant = true; + invariantCheck(loc, symbol->getType().getQualifier()); + } else if (qualifier.isNoContraction()) { + if (intermediate.inIoAccessed(identifier)) + error(loc, "cannot change qualification after use", "precise", ""); + symbol->getWritableType().getQualifier().setNoContraction(); + } else if (qualifier.specConstant) { + symbol->getWritableType().getQualifier().makeSpecConstant(); + if (qualifier.hasSpecConstantId()) + symbol->getWritableType().getQualifier().layoutSpecConstantId = qualifier.layoutSpecConstantId; + } else + warn(loc, "unknown requalification", "", ""); +} + +void TParseContext::addQualifierToExisting(const TSourceLoc& loc, TQualifier qualifier, TIdentifierList& identifiers) +{ + for (unsigned int i = 0; i < identifiers.size(); ++i) + addQualifierToExisting(loc, qualifier, *identifiers[i]); +} + +// Make sure 'invariant' isn't being applied to a non-allowed object. +void TParseContext::invariantCheck(const TSourceLoc& loc, const TQualifier& qualifier) +{ + if (! qualifier.invariant) + return; + + bool pipeOut = qualifier.isPipeOutput(); + bool pipeIn = qualifier.isPipeInput(); + if (version >= 300 || (!isEsProfile() && version >= 420)) { + if (! pipeOut) + error(loc, "can only apply to an output", "invariant", ""); + } else { + if ((language == EShLangVertex && pipeIn) || (! pipeOut && ! pipeIn)) + error(loc, "can only apply to an output, or to an input in a non-vertex stage\n", "invariant", ""); + } +} + +// +// Updating default qualifier for the case of a declaration with just a qualifier, +// no type, block, or identifier. +// +void TParseContext::updateStandaloneQualifierDefaults(const TSourceLoc& loc, const TPublicType& publicType) +{ +#ifndef GLSLANG_WEB + if (publicType.shaderQualifiers.vertices != TQualifier::layoutNotSet) { + assert(language == EShLangTessControl || language == EShLangGeometry || language == EShLangMeshNV); + const char* id = (language == EShLangTessControl) ? "vertices" : "max_vertices"; + + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", id, ""); + if (! intermediate.setVertices(publicType.shaderQualifiers.vertices)) + error(loc, "cannot change previously set layout value", id, ""); + + if (language == EShLangTessControl) + checkIoArraysConsistency(loc); + } + if (publicType.shaderQualifiers.primitives != TQualifier::layoutNotSet) { + assert(language == EShLangMeshNV); + const char* id = "max_primitives"; + + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", id, ""); + if (! intermediate.setPrimitives(publicType.shaderQualifiers.primitives)) + error(loc, "cannot change previously set layout value", id, ""); + } + if (publicType.shaderQualifiers.invocations != TQualifier::layoutNotSet) { + if (publicType.qualifier.storage != EvqVaryingIn) + error(loc, "can only apply to 'in'", "invocations", ""); + if (! intermediate.setInvocations(publicType.shaderQualifiers.invocations)) + error(loc, "cannot change previously set layout value", "invocations", ""); + } + if (publicType.shaderQualifiers.geometry != ElgNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + switch (publicType.shaderQualifiers.geometry) { + case ElgPoints: + case ElgLines: + case ElgLinesAdjacency: + case ElgTriangles: + case ElgTrianglesAdjacency: + case ElgQuads: + case ElgIsolines: + if (language == EShLangMeshNV) { + error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + } + if (intermediate.setInputPrimitive(publicType.shaderQualifiers.geometry)) { + if (language == EShLangGeometry) + checkIoArraysConsistency(loc); + } else + error(loc, "cannot change previously set input primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + default: + error(loc, "cannot apply to input", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + } + } else if (publicType.qualifier.storage == EvqVaryingOut) { + switch (publicType.shaderQualifiers.geometry) { + case ElgLines: + case ElgTriangles: + if (language != EShLangMeshNV) { + error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + } + // Fall through + case ElgPoints: + case ElgLineStrip: + case ElgTriangleStrip: + if (! intermediate.setOutputPrimitive(publicType.shaderQualifiers.geometry)) + error(loc, "cannot change previously set output primitive", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + break; + default: + error(loc, "cannot apply to 'out'", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), ""); + } + } else + error(loc, "cannot apply to:", TQualifier::getGeometryString(publicType.shaderQualifiers.geometry), GetStorageQualifierString(publicType.qualifier.storage)); + } + if (publicType.shaderQualifiers.spacing != EvsNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setVertexSpacing(publicType.shaderQualifiers.spacing)) + error(loc, "cannot change previously set vertex spacing", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), ""); + } else + error(loc, "can only apply to 'in'", TQualifier::getVertexSpacingString(publicType.shaderQualifiers.spacing), ""); + } + if (publicType.shaderQualifiers.order != EvoNone) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setVertexOrder(publicType.shaderQualifiers.order)) + error(loc, "cannot change previously set vertex order", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), ""); + } else + error(loc, "can only apply to 'in'", TQualifier::getVertexOrderString(publicType.shaderQualifiers.order), ""); + } + if (publicType.shaderQualifiers.pointMode) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setPointMode(); + else + error(loc, "can only apply to 'in'", "point_mode", ""); + } +#endif + for (int i = 0; i < 3; ++i) { + if (publicType.shaderQualifiers.localSizeNotDefault[i]) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setLocalSize(i, publicType.shaderQualifiers.localSize[i])) + error(loc, "cannot change previously set size", "local_size", ""); + else { + int max = 0; + if (language == EShLangCompute) { + switch (i) { + case 0: max = resources.maxComputeWorkGroupSizeX; break; + case 1: max = resources.maxComputeWorkGroupSizeY; break; + case 2: max = resources.maxComputeWorkGroupSizeZ; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxComputeWorkGroupSize", "local_size", ""); + } +#ifndef GLSLANG_WEB + else if (language == EShLangMeshNV) { + switch (i) { + case 0: max = resources.maxMeshWorkGroupSizeX_NV; break; + case 1: max = resources.maxMeshWorkGroupSizeY_NV; break; + case 2: max = resources.maxMeshWorkGroupSizeZ_NV; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxMeshWorkGroupSizeNV", "local_size", ""); + } else if (language == EShLangTaskNV) { + switch (i) { + case 0: max = resources.maxTaskWorkGroupSizeX_NV; break; + case 1: max = resources.maxTaskWorkGroupSizeY_NV; break; + case 2: max = resources.maxTaskWorkGroupSizeZ_NV; break; + default: break; + } + if (intermediate.getLocalSize(i) > (unsigned int)max) + error(loc, "too large; see gl_MaxTaskWorkGroupSizeNV", "local_size", ""); + } +#endif + else { + assert(0); + } + + // Fix the existing constant gl_WorkGroupSize with this new information. + TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize"); + if (workGroupSize != nullptr) + workGroupSize->getWritableConstArray()[i].setUConst(intermediate.getLocalSize(i)); + } + } else + error(loc, "can only apply to 'in'", "local_size", ""); + } + if (publicType.shaderQualifiers.localSizeSpecId[i] != TQualifier::layoutNotSet) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (! intermediate.setLocalSizeSpecId(i, publicType.shaderQualifiers.localSizeSpecId[i])) + error(loc, "cannot change previously set size", "local_size", ""); + } else + error(loc, "can only apply to 'in'", "local_size id", ""); + // Set the workgroup built-in variable as a specialization constant + TVariable* workGroupSize = getEditableVariable("gl_WorkGroupSize"); + if (workGroupSize != nullptr) + workGroupSize->getWritableType().getQualifier().specConstant = true; + } + } + +#ifndef GLSLANG_WEB + if (publicType.shaderQualifiers.earlyFragmentTests) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setEarlyFragmentTests(); + else + error(loc, "can only apply to 'in'", "early_fragment_tests", ""); + } + if (publicType.shaderQualifiers.postDepthCoverage) { + if (publicType.qualifier.storage == EvqVaryingIn) + intermediate.setPostDepthCoverage(); + else + error(loc, "can only apply to 'in'", "post_coverage_coverage", ""); + } + if (publicType.shaderQualifiers.hasBlendEquation()) { + if (publicType.qualifier.storage != EvqVaryingOut) + error(loc, "can only apply to 'out'", "blend equation", ""); + } + if (publicType.shaderQualifiers.interlockOrdering) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if (!intermediate.setInterlockOrdering(publicType.shaderQualifiers.interlockOrdering)) + error(loc, "cannot change previously set fragment shader interlock ordering", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), ""); + } + else + error(loc, "can only apply to 'in'", TQualifier::getInterlockOrderingString(publicType.shaderQualifiers.interlockOrdering), ""); + } + + if (publicType.shaderQualifiers.layoutDerivativeGroupQuads && + publicType.shaderQualifiers.layoutDerivativeGroupLinear) { + error(loc, "cannot be both specified", "derivative_group_quadsNV and derivative_group_linearNV", ""); + } + + if (publicType.shaderQualifiers.layoutDerivativeGroupQuads) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if ((intermediate.getLocalSize(0) & 1) || + (intermediate.getLocalSize(1) & 1)) + error(loc, "requires local_size_x and local_size_y to be multiple of two", "derivative_group_quadsNV", ""); + else + intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupQuads); + } + else + error(loc, "can only apply to 'in'", "derivative_group_quadsNV", ""); + } + if (publicType.shaderQualifiers.layoutDerivativeGroupLinear) { + if (publicType.qualifier.storage == EvqVaryingIn) { + if((intermediate.getLocalSize(0) * + intermediate.getLocalSize(1) * + intermediate.getLocalSize(2)) % 4 != 0) + error(loc, "requires total group size to be multiple of four", "derivative_group_linearNV", ""); + else + intermediate.setLayoutDerivativeMode(LayoutDerivativeGroupLinear); + } + else + error(loc, "can only apply to 'in'", "derivative_group_linearNV", ""); + } + // Check mesh out array sizes, once all the necessary out qualifiers are defined. + if ((language == EShLangMeshNV) && + (intermediate.getVertices() != TQualifier::layoutNotSet) && + (intermediate.getPrimitives() != TQualifier::layoutNotSet) && + (intermediate.getOutputPrimitive() != ElgNone)) + { + checkIoArraysConsistency(loc); + } + + if (publicType.shaderQualifiers.layoutPrimitiveCulling) { + if (publicType.qualifier.storage != EvqTemporary) + error(loc, "layout qualifier can not have storage qualifiers", "primitive_culling","", ""); + else { + intermediate.setLayoutPrimitiveCulling(); + } + // Exit early as further checks are not valid + return; + } +#endif + const TQualifier& qualifier = publicType.qualifier; + + if (qualifier.isAuxiliary() || + qualifier.isMemory() || + qualifier.isInterpolation() || + qualifier.precision != EpqNone) + error(loc, "cannot use auxiliary, memory, interpolation, or precision qualifier in a default qualifier declaration (declaration with no type)", "qualifier", ""); + + // "The offset qualifier can only be used on block members of blocks..." + // "The align qualifier can only be used on blocks or block members..." + if (qualifier.hasOffset() || + qualifier.hasAlign()) + error(loc, "cannot use offset or align qualifiers in a default qualifier declaration (declaration with no type)", "layout qualifier", ""); + + layoutQualifierCheck(loc, qualifier); + + switch (qualifier.storage) { + case EvqUniform: + if (qualifier.hasMatrix()) + globalUniformDefaults.layoutMatrix = qualifier.layoutMatrix; + if (qualifier.hasPacking()) + globalUniformDefaults.layoutPacking = qualifier.layoutPacking; + break; + case EvqBuffer: + if (qualifier.hasMatrix()) + globalBufferDefaults.layoutMatrix = qualifier.layoutMatrix; + if (qualifier.hasPacking()) + globalBufferDefaults.layoutPacking = qualifier.layoutPacking; + break; + case EvqVaryingIn: + break; + case EvqVaryingOut: +#ifndef GLSLANG_WEB + if (qualifier.hasStream()) + globalOutputDefaults.layoutStream = qualifier.layoutStream; + if (qualifier.hasXfbBuffer()) + globalOutputDefaults.layoutXfbBuffer = qualifier.layoutXfbBuffer; + if (globalOutputDefaults.hasXfbBuffer() && qualifier.hasXfbStride()) { + if (! intermediate.setXfbBufferStride(globalOutputDefaults.layoutXfbBuffer, qualifier.layoutXfbStride)) + error(loc, "all stride settings must match for xfb buffer", "xfb_stride", "%d", qualifier.layoutXfbBuffer); + } +#endif + break; + default: + error(loc, "default qualifier requires 'uniform', 'buffer', 'in', or 'out' storage qualification", "", ""); + return; + } + + if (qualifier.hasBinding()) + error(loc, "cannot declare a default, include a type or full declaration", "binding", ""); + if (qualifier.hasAnyLocation()) + error(loc, "cannot declare a default, use a full declaration", "location/component/index", ""); + if (qualifier.hasXfbOffset()) + error(loc, "cannot declare a default, use a full declaration", "xfb_offset", ""); + if (qualifier.isPushConstant()) + error(loc, "cannot declare a default, can only be used on a block", "push_constant", ""); + if (qualifier.hasBufferReference()) + error(loc, "cannot declare a default, can only be used on a block", "buffer_reference", ""); + if (qualifier.hasSpecConstantId()) + error(loc, "cannot declare a default, can only be used on a scalar", "constant_id", ""); + if (qualifier.isShaderRecord()) + error(loc, "cannot declare a default, can only be used on a block", "shaderRecordNV", ""); +} + +// +// Take the sequence of statements that has been built up since the last case/default, +// put it on the list of top-level nodes for the current (inner-most) switch statement, +// and follow that by the case/default we are on now. (See switch topology comment on +// TIntermSwitch.) +// +void TParseContext::wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode) +{ + TIntermSequence* switchSequence = switchSequenceStack.back(); + + if (statements) { + if (switchSequence->size() == 0) + error(statements->getLoc(), "cannot have statements before first case/default label", "switch", ""); + statements->setOperator(EOpSequence); + switchSequence->push_back(statements); + } + if (branchNode) { + // check all previous cases for the same label (or both are 'default') + for (unsigned int s = 0; s < switchSequence->size(); ++s) { + TIntermBranch* prevBranch = (*switchSequence)[s]->getAsBranchNode(); + if (prevBranch) { + TIntermTyped* prevExpression = prevBranch->getExpression(); + TIntermTyped* newExpression = branchNode->getAsBranchNode()->getExpression(); + if (prevExpression == nullptr && newExpression == nullptr) + error(branchNode->getLoc(), "duplicate label", "default", ""); + else if (prevExpression != nullptr && + newExpression != nullptr && + prevExpression->getAsConstantUnion() && + newExpression->getAsConstantUnion() && + prevExpression->getAsConstantUnion()->getConstArray()[0].getIConst() == + newExpression->getAsConstantUnion()->getConstArray()[0].getIConst()) + error(branchNode->getLoc(), "duplicated value", "case", ""); + } + } + switchSequence->push_back(branchNode); + } +} + +// +// Turn the top-level node sequence built up of wrapupSwitchSubsequence9) +// into a switch node. +// +TIntermNode* TParseContext::addSwitch(const TSourceLoc& loc, TIntermTyped* expression, TIntermAggregate* lastStatements) +{ + profileRequires(loc, EEsProfile, 300, nullptr, "switch statements"); + profileRequires(loc, ENoProfile, 130, nullptr, "switch statements"); + + wrapupSwitchSubsequence(lastStatements, nullptr); + + if (expression == nullptr || + (expression->getBasicType() != EbtInt && expression->getBasicType() != EbtUint) || + expression->getType().isArray() || expression->getType().isMatrix() || expression->getType().isVector()) + error(loc, "condition must be a scalar integer expression", "switch", ""); + + // If there is nothing to do, drop the switch but still execute the expression + TIntermSequence* switchSequence = switchSequenceStack.back(); + if (switchSequence->size() == 0) + return expression; + + if (lastStatements == nullptr) { + // This was originally an ERRROR, because early versions of the specification said + // "it is an error to have no statement between a label and the end of the switch statement." + // The specifications were updated to remove this (being ill-defined what a "statement" was), + // so, this became a warning. However, 3.0 tests still check for the error. + if (isEsProfile() && version <= 300 && ! relaxedErrors()) + error(loc, "last case/default label not followed by statements", "switch", ""); + else + warn(loc, "last case/default label not followed by statements", "switch", ""); + + // emulate a break for error recovery + lastStatements = intermediate.makeAggregate(intermediate.addBranch(EOpBreak, loc)); + lastStatements->setOperator(EOpSequence); + switchSequence->push_back(lastStatements); + } + + TIntermAggregate* body = new TIntermAggregate(EOpSequence); + body->getSequence() = *switchSequenceStack.back(); + body->setLoc(loc); + + TIntermSwitch* switchNode = new TIntermSwitch(expression, body); + switchNode->setLoc(loc); + + return switchNode; +} + +// +// When a struct used in block, and has it's own layout packing, layout matrix, +// record the origin structure of a struct to map, and Record the structure copy to the copy table, +// +const TTypeList* TParseContext::recordStructCopy(TStructRecord& record, const TType* originType, const TType* tmpType) +{ + size_t memberCount = tmpType->getStruct()->size(); + size_t originHash = 0, tmpHash = 0; + std::hash hasher; + for (size_t i = 0; i < memberCount; i++) { + size_t originMemberHash = hasher(originType->getStruct()->at(i).type->getQualifier().layoutPacking + + originType->getStruct()->at(i).type->getQualifier().layoutMatrix); + size_t tmpMemberHash = hasher(tmpType->getStruct()->at(i).type->getQualifier().layoutPacking + + tmpType->getStruct()->at(i).type->getQualifier().layoutMatrix); + originHash = hasher((originHash ^ originMemberHash) << 1); + tmpHash = hasher((tmpHash ^ tmpMemberHash) << 1); + } + const TTypeList* originStruct = originType->getStruct(); + const TTypeList* tmpStruct = tmpType->getStruct(); + if (originHash != tmpHash) { + auto fixRecords = record.find(originStruct); + if (fixRecords != record.end()) { + auto fixRecord = fixRecords->second.find(tmpHash); + if (fixRecord != fixRecords->second.end()) { + return fixRecord->second; + } else { + record[originStruct][tmpHash] = tmpStruct; + return tmpStruct; + } + } else { + record[originStruct] = std::map(); + record[originStruct][tmpHash] = tmpStruct; + return tmpStruct; + } + } + return originStruct; +} + +} // end namespace glslang + diff --git a/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.h b/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.h new file mode 100644 index 0000000..0f09ada --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/ParseHelper.h @@ -0,0 +1,534 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This header defines a two-level parse-helper hierarchy, derived from +// TParseVersions: +// - TParseContextBase: sharable across multiple parsers +// - TParseContext: GLSL specific helper +// + +#ifndef _PARSER_HELPER_INCLUDED_ +#define _PARSER_HELPER_INCLUDED_ + +#include +#include + +#include "parseVersions.h" +#include "../Include/ShHandle.h" +#include "SymbolTable.h" +#include "localintermediate.h" +#include "Scan.h" +#include "attribute.h" + +namespace glslang { + +struct TPragma { + TPragma(bool o, bool d) : optimize(o), debug(d) { } + bool optimize; + bool debug; + TPragmaTable pragmaTable; +}; + +class TScanContext; +class TPpContext; + +typedef std::set TIdSetType; +typedef std::map> TStructRecord; + +// +// Sharable code (as well as what's in TParseVersions) across +// parse helpers. +// +class TParseContextBase : public TParseVersions { +public: + TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version, + EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + TInfoSink& infoSink, bool forwardCompatible, EShMessages messages, + const TString* entryPoint = nullptr) + : TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages), + scopeMangler("::"), + symbolTable(symbolTable), + statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0), + currentFunctionType(nullptr), + postEntryPointReturn(false), + contextPragma(true, false), + beginInvocationInterlockCount(0), endInvocationInterlockCount(0), + parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr), + limits(resources.limits), + globalUniformBlock(nullptr), + globalUniformBinding(TQualifier::layoutBindingEnd), + globalUniformSet(TQualifier::layoutSetEnd) + { + if (entryPoint != nullptr) + sourceEntryPointName = *entryPoint; + } + virtual ~TParseContextBase() { } + +#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL) + virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); + virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...); +#endif + + virtual void setLimits(const TBuiltInResource&) = 0; + + void checkIndex(const TSourceLoc&, const TType&, int& index); + + EShLanguage getLanguage() const { return language; } + void setScanContext(TScanContext* c) { scanContext = c; } + TScanContext* getScanContext() const { return scanContext; } + void setPpContext(TPpContext* c) { ppContext = c; } + TPpContext* getPpContext() const { return ppContext; } + + virtual void setLineCallback(const std::function& func) { lineCallback = func; } + virtual void setExtensionCallback(const std::function& func) { extensionCallback = func; } + virtual void setVersionCallback(const std::function& func) { versionCallback = func; } + virtual void setPragmaCallback(const std::function&)>& func) { pragmaCallback = func; } + virtual void setErrorCallback(const std::function& func) { errorCallback = func; } + + virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0; + virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0; + virtual bool lineDirectiveShouldSetNextLine() const = 0; + virtual void handlePragma(const TSourceLoc&, const TVector&) = 0; + + virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0; + + virtual void notifyVersion(int line, int version, const char* type_string) + { + if (versionCallback) + versionCallback(line, version, type_string); + } + virtual void notifyErrorDirective(int line, const char* error_message) + { + if (errorCallback) + errorCallback(line, error_message); + } + virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName) + { + if (lineCallback) + lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName); + } + virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior) + { + if (extensionCallback) + extensionCallback(line, extension, behavior); + } + +#ifdef ENABLE_HLSL + // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL) + virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr); +#endif + + // Potentially rename shader entry point function + void renameShaderFunction(TString*& name) const + { + // Replace the entry point name given in the shader with the real entry point name, + // if there is a substitution. + if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0) + name = NewPoolTString(intermediate.getEntryPointName().c_str()); + } + + virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*); + virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*); + + const char* const scopeMangler; + + // Basic parsing state, easily accessible to the grammar + + TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile + int statementNestingLevel; // 0 if outside all flow control or compound statements + int loopNestingLevel; // 0 if outside all loops + int structNestingLevel; // 0 if outside blocks and structures + int controlFlowNestingLevel; // 0 if outside all flow control + const TType* currentFunctionType; // the return type of the function that's currently being parsed + bool functionReturnsValue; // true if a non-void function has a return + // if inside a function, true if the function is the entry point and this is after a return statement + bool postEntryPointReturn; + // case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting + TList switchSequenceStack; + // the statementNestingLevel the current switch statement is at, which must match the level of its case statements + TList switchLevel; + struct TPragma contextPragma; + int beginInvocationInterlockCount; + int endInvocationInterlockCount; + +protected: + TParseContextBase(TParseContextBase&); + TParseContextBase& operator=(TParseContextBase&); + + const bool parsingBuiltins; // true if parsing built-in symbols/functions + TVector linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving + TScanContext* scanContext; + TPpContext* ppContext; + TBuiltInResource resources; + TLimits& limits; + TString sourceEntryPointName; + + // These, if set, will be called when a line, pragma ... is preprocessed. + // They will be called with any parameters to the original directive. + std::function lineCallback; + std::function&)> pragmaCallback; + std::function versionCallback; + std::function extensionCallback; + std::function errorCallback; + + // see implementation for detail + const TFunction* selectFunction(const TVector, const TFunction&, + std::function, + std::function, + /* output */ bool& tie); + + virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size, + TSwizzleSelectors&); + + // Manage the global uniform block (default uniforms in GLSL, $Global in HLSL) + TVariable* globalUniformBlock; // the actual block, inserted into the symbol table + unsigned int globalUniformBinding; // the block's binding number + unsigned int globalUniformSet; // the block's set number + int firstNewMember; // the index of the first member not yet inserted into the symbol table + // override this to set the language-specific name + virtual const char* getGlobalUniformBlockName() const { return ""; } + virtual void setUniformBlockDefaults(TType&) const { } + virtual void finalizeGlobalUniformBlockLayout(TVariable&) { } + virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, TPrefixType prefix, + va_list args); + virtual void trackLinkage(TSymbol& symbol); + virtual void makeEditable(TSymbol*&); + virtual TVariable* getEditableVariable(const char* name); + virtual void finish(); +}; + +// +// Manage the state for when to respect precision qualifiers and when to warn about +// the defaults being different than might be expected. +// +class TPrecisionManager { +public: + TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ } + virtual ~TPrecisionManager() {} + + void respectPrecisionQualifiers() { obey = true; } + bool respectingPrecisionQualifiers() const { return obey; } + bool shouldWarnAboutDefaults() const { return warn; } + void defaultWarningGiven() { warn = false; } + void warnAboutDefaults() { warn = true; } + void explicitIntDefaultSeen() + { + explicitIntDefault = true; + if (explicitFloatDefault) + warn = false; + } + void explicitFloatDefaultSeen() + { + explicitFloatDefault = true; + if (explicitIntDefault) + warn = false; + } + +protected: + bool obey; // respect precision qualifiers + bool warn; // need to give a warning about the defaults + bool explicitIntDefault; // user set the default for int/uint + bool explicitFloatDefault; // user set the default for float +}; + +// +// GLSL-specific parse helper. Should have GLSL in the name, but that's +// too big of a change for comparing branches at the moment, and perhaps +// impacts downstream consumers as well. +// +class TParseContext : public TParseContextBase { +public: + TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&, + bool forwardCompatible = false, EShMessages messages = EShMsgDefault, + const TString* entryPoint = nullptr); + virtual ~TParseContext(); + + bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); } + void setPrecisionDefaults(); + + void setLimits(const TBuiltInResource&) override; + bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override; + void parserError(const char* s); // for bison's yyerror + + void reservedErrorCheck(const TSourceLoc&, const TString&); + void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override; + bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override; + bool lineDirectiveShouldSetNextLine() const override; + bool builtInName(const TString&); + + void handlePragma(const TSourceLoc&, const TVector&) override; + TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string); + TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index); + void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index); + +#ifndef GLSLANG_WEB + void makeEditable(TSymbol*&) override; + void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier); +#endif + bool isIoResizeArray(const TType&) const; + void fixIoArraySize(const TSourceLoc&, TType&); + void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base); + void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false); + int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const; + void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&); + + TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right); + TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode); + TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field); + TIntermTyped* handleDotSwizzle(const TSourceLoc&, TIntermTyped* base, const TString& field); + void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName); + TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype); + TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&); + TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*); + TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function); + void computeBuiltinPrecisions(TIntermTyped&, const TFunction&); + TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*); + void checkLocation(const TSourceLoc&, TOperator); + TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*); + void addInputArgumentConversions(const TFunction&, TIntermNode*&) const; + TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const; + TIntermTyped* addAssign(const TSourceLoc&, TOperator op, TIntermTyped* left, TIntermTyped* right); + void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&); + void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&); + void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&); + void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*); + TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&); + void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier); + void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier); + void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode); + + void assignError(const TSourceLoc&, const char* op, TString left, TString right); + void unaryOpError(const TSourceLoc&, const char* op, TString operand); + void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right); + void variableCheck(TIntermTyped*& nodePtr); + bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override; + void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override; + void constantValueCheck(TIntermTyped* node, const char* token); + void integerCheck(const TIntermTyped* node, const char* token); + void globalCheck(const TSourceLoc&, const char* token); + bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&); + bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&); + void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType); + bool arrayQualifierError(const TSourceLoc&, const TQualifier&); + bool arrayError(const TSourceLoc&, const TType&); + void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&); + void structArrayCheck(const TSourceLoc&, const TType& structure); + void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember); + void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*); + bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType); + void boolCheck(const TSourceLoc&, const TIntermTyped*); + void boolCheck(const TSourceLoc&, const TPublicType&); + void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer); + void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier); + void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier); + void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier); + void memberQualifierCheck(glslang::TPublicType&); + void globalQualifierFixCheck(const TSourceLoc&, TQualifier&); + void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&); + bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType); + void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force); + void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier); + int computeSamplerTypeIndex(TSampler&); + TPrecisionQualifier getDefaultPrecision(TPublicType&); + void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&); + void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type); + bool containsFieldWithBasicType(const TType& type ,TBasicType basicType); + TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&); + void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes); + void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type); + void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type); + void nestedBlockCheck(const TSourceLoc&); + void nestedStructCheck(const TSourceLoc&); + void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op); + void opaqueCheck(const TSourceLoc&, const TType&, const char* op); + void referenceCheck(const TSourceLoc&, const TType&, const char* op); + void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op); + void specializationCheck(const TSourceLoc&, const TType&, const char* op); + void structTypeCheck(const TSourceLoc&, TPublicType&); + void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop); + void arrayLimitCheck(const TSourceLoc&, const TString&, int size); + void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature); + + void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&); + void constantIndexExpressionCheck(TIntermNode*); + + void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&); + void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*); + void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly); + void layoutObjectCheck(const TSourceLoc&, const TSymbol&); + void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes); + void layoutTypeCheck(const TSourceLoc&, const TType&); + void layoutQualifierCheck(const TSourceLoc&, const TQualifier&); + void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&); + void fixOffset(const TSourceLoc&, TSymbol&); + + const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn); + void declareTypeDefaults(const TSourceLoc&, const TPublicType&); + TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0); + TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&); + TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&); + TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset); + void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to); + void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0); + void blockStageIoCheck(const TSourceLoc&, const TQualifier&); + void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName); + void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation); + void fixXfbOffsets(TQualifier&, TTypeList&); + void fixBlockUniformOffsets(TQualifier&, TTypeList&); + void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*); + void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*); + void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier); + void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&); + void invariantCheck(const TSourceLoc&, const TQualifier&); + void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&); + void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode); + TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body); + const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*); + +#ifndef GLSLANG_WEB + TAttributeType attributeFromName(const TString& name) const; + TAttributes* makeAttributes(const TString& identifier) const; + TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const; + TAttributes* mergeAttributes(TAttributes*, TAttributes*) const; + + // Determine selection control from attributes + void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*); + void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*); + // Determine loop control from attributes + void handleLoopAttributes(const TAttributes& attributes, TIntermNode*); +#endif + + void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember); + +protected: + void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type); + void inheritGlobalDefaults(TQualifier& dst) const; + TVariable* makeInternalVariable(const char* name, const TType&) const; + TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&); + void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&); + void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&); + bool isRuntimeLength(const TIntermTyped&) const; + TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable); + TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer); +#ifndef GLSLANG_WEB + void finish() override; +#endif + +public: + // + // Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access + // + + // Current state of parsing + bool inMain; // if inside a function, true if the function is main + const TString* blockName; + TQualifier currentBlockQualifier; + TPrecisionQualifier defaultPrecision[EbtNumTypes]; + TBuiltInResource resources; + TLimits& limits; + +protected: + TParseContext(TParseContext&); + TParseContext& operator=(TParseContext&); + + static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex() + TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex]; + TPrecisionManager precisionManager; + TQualifier globalBufferDefaults; + TQualifier globalUniformDefaults; + TQualifier globalInputDefaults; + TQualifier globalOutputDefaults; + TString currentCaller; // name of last function body entered (not valid when at global scope) +#ifndef GLSLANG_WEB + int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point + bool anyIndexLimits; + TIdSetType inductiveLoopIds; + TVector needsIndexLimitationChecking; + TStructRecord matrixFixRecord; + TStructRecord packingFixRecord; + + // + // Geometry shader input arrays: + // - array sizing is based on input primitive and/or explicit size + // + // Tessellation control output arrays: + // - array sizing is based on output layout(vertices=...) and/or explicit size + // + // Both: + // - array sizing is retroactive + // - built-in block redeclarations interact with this + // + // Design: + // - use a per-context "resize-list", a list of symbols whose array sizes + // can be fixed + // + // - the resize-list starts empty at beginning of user-shader compilation, it does + // not have built-ins in it + // + // - on built-in array use: copyUp() symbol and add it to the resize-list + // + // - on user array declaration: add it to the resize-list + // + // - on block redeclaration: copyUp() symbol and add it to the resize-list + // * note, that appropriately gives an error if redeclaring a block that + // was already used and hence already copied-up + // + // - on seeing a layout declaration that sizes the array, fix everything in the + // resize-list, giving errors for mismatch + // + // - on seeing an array size declaration, give errors on mismatch between it and previous + // array-sizing declarations + // + TVector ioArraySymbolResizeList; +#endif +}; + +} // end namespace glslang + +#endif // _PARSER_HELPER_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp b/src/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp new file mode 100644 index 0000000..84c40f4 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/PoolAlloc.cpp @@ -0,0 +1,315 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/Common.h" +#include "../Include/PoolAlloc.h" + +#include "../Include/InitializeGlobals.h" +#include "../OSDependent/osinclude.h" + +namespace glslang { + +// Process-wide TLS index +OS_TLSIndex PoolIndex; + +// Return the thread-specific current pool. +TPoolAllocator& GetThreadPoolAllocator() +{ + return *static_cast(OS_GetTLSValue(PoolIndex)); +} + +// Set the thread-specific current pool. +void SetThreadPoolAllocator(TPoolAllocator* poolAllocator) +{ + OS_SetTLSValue(PoolIndex, poolAllocator); +} + +// Process-wide set up of the TLS pool storage. +bool InitializePoolIndex() +{ + // Allocate a TLS index. + if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX) + return false; + + return true; +} + +// +// Implement the functionality of the TPoolAllocator class, which +// is documented in PoolAlloc.h. +// +TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) : + pageSize(growthIncrement), + alignment(allocationAlignment), + freeList(nullptr), + inUseList(nullptr), + numCalls(0) +{ + // + // Don't allow page sizes we know are smaller than all common + // OS page sizes. + // + if (pageSize < 4*1024) + pageSize = 4*1024; + + // + // A large currentPageOffset indicates a new page needs to + // be obtained to allocate memory. + // + currentPageOffset = pageSize; + + // + // Adjust alignment to be at least pointer aligned and + // power of 2. + // + size_t minAlign = sizeof(void*); + alignment &= ~(minAlign - 1); + if (alignment < minAlign) + alignment = minAlign; + size_t a = 1; + while (a < alignment) + a <<= 1; + alignment = a; + alignmentMask = a - 1; + + // + // Align header skip + // + headerSkip = minAlign; + if (headerSkip < sizeof(tHeader)) { + headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask; + } + + push(); +} + +TPoolAllocator::~TPoolAllocator() +{ + while (inUseList) { + tHeader* next = inUseList->nextPage; + inUseList->~tHeader(); + delete [] reinterpret_cast(inUseList); + inUseList = next; + } + + // + // Always delete the free list memory - it can't be being + // (correctly) referenced, whether the pool allocator was + // global or not. We should not check the guard blocks + // here, because we did it already when the block was + // placed into the free list. + // + while (freeList) { + tHeader* next = freeList->nextPage; + delete [] reinterpret_cast(freeList); + freeList = next; + } +} + +const unsigned char TAllocation::guardBlockBeginVal = 0xfb; +const unsigned char TAllocation::guardBlockEndVal = 0xfe; +const unsigned char TAllocation::userDataFill = 0xcd; + +# ifdef GUARD_BLOCKS + const size_t TAllocation::guardBlockSize = 16; +# else + const size_t TAllocation::guardBlockSize = 0; +# endif + +// +// Check a single guard block for damage +// +#ifdef GUARD_BLOCKS +void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const +#else +void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const +#endif +{ +#ifdef GUARD_BLOCKS + for (size_t x = 0; x < guardBlockSize; x++) { + if (blockMem[x] != val) { + const int maxSize = 80; + char assertMsg[maxSize]; + + // We don't print the assert message. It's here just to be helpful. + snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", + locText, size, data()); + assert(0 && "PoolAlloc: Damage in guard block"); + } + } +#else + assert(guardBlockSize == 0); +#endif +} + +void TPoolAllocator::push() +{ + tAllocState state = { currentPageOffset, inUseList }; + + stack.push_back(state); + + // + // Indicate there is no current page to allocate from. + // + currentPageOffset = pageSize; +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred since the last push(), or since the +// last pop(), or since the object's creation. +// +// The deallocated pages are saved for future allocations. +// +void TPoolAllocator::pop() +{ + if (stack.size() < 1) + return; + + tHeader* page = stack.back().page; + currentPageOffset = stack.back().offset; + + while (inUseList != page) { + tHeader* nextInUse = inUseList->nextPage; + size_t pageCount = inUseList->pageCount; + + // This technically ends the lifetime of the header as C++ object, + // but we will still control the memory and reuse it. + inUseList->~tHeader(); // currently, just a debug allocation checker + + if (pageCount > 1) { + delete [] reinterpret_cast(inUseList); + } else { + inUseList->nextPage = freeList; + freeList = inUseList; + } + inUseList = nextInUse; + } + + stack.pop_back(); +} + +// +// Do a mass-deallocation of all the individual allocations +// that have occurred. +// +void TPoolAllocator::popAll() +{ + while (stack.size() > 0) + pop(); +} + +void* TPoolAllocator::allocate(size_t numBytes) +{ + // If we are using guard blocks, all allocations are bracketed by + // them: [guardblock][allocation][guardblock]. numBytes is how + // much memory the caller asked for. allocationSize is the total + // size including guard blocks. In release build, + // guardBlockSize=0 and this all gets optimized away. + size_t allocationSize = TAllocation::allocationSize(numBytes); + + // + // Just keep some interesting statistics. + // + ++numCalls; + totalBytes += numBytes; + + // + // Do the allocation, most likely case first, for efficiency. + // This step could be moved to be inline sometime. + // + if (currentPageOffset + allocationSize <= pageSize) { + // + // Safe to allocate from currentPageOffset. + // + unsigned char* memory = reinterpret_cast(inUseList) + currentPageOffset; + currentPageOffset += allocationSize; + currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask; + + return initializeAllocation(inUseList, memory, numBytes); + } + + if (allocationSize + headerSkip > pageSize) { + // + // Do a multi-page allocation. Don't mix these with the others. + // The OS is efficient and allocating and free-ing multiple pages. + // + size_t numBytesToAlloc = allocationSize + headerSkip; + tHeader* memory = reinterpret_cast(::new char[numBytesToAlloc]); + if (memory == 0) + return 0; + + // Use placement-new to initialize header + new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize); + inUseList = memory; + + currentPageOffset = pageSize; // make next allocation come from a new page + + // No guard blocks for multi-page allocations (yet) + return reinterpret_cast(reinterpret_cast(memory) + headerSkip); + } + + // + // Need a simple page to allocate from. + // + tHeader* memory; + if (freeList) { + memory = freeList; + freeList = freeList->nextPage; + } else { + memory = reinterpret_cast(::new char[pageSize]); + if (memory == 0) + return 0; + } + + // Use placement-new to initialize header + new(memory) tHeader(inUseList, 1); + inUseList = memory; + + unsigned char* ret = reinterpret_cast(inUseList) + headerSkip; + currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask; + + return initializeAllocation(inUseList, ret, numBytes); +} + +// +// Check all allocations in a list for damage by calling check on each. +// +void TAllocation::checkAllocList() const +{ + for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc) + alloc->check(); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp b/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp new file mode 100644 index 0000000..1d33bfd --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.cpp @@ -0,0 +1,118 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../Include/intermediate.h" +#include "RemoveTree.h" + +namespace glslang { + +// +// Code to recursively delete the intermediate tree. +// +struct TRemoveTraverser : TIntermTraverser { + TRemoveTraverser() : TIntermTraverser(false, false, true, false) {} + + virtual void visitSymbol(TIntermSymbol* node) + { + delete node; + } + + virtual bool visitBinary(TVisit /* visit*/ , TIntermBinary* node) + { + delete node; + + return true; + } + + virtual bool visitUnary(TVisit /* visit */, TIntermUnary* node) + { + delete node; + + return true; + } + + virtual bool visitAggregate(TVisit /* visit*/ , TIntermAggregate* node) + { + delete node; + + return true; + } + + virtual bool visitSelection(TVisit /* visit*/ , TIntermSelection* node) + { + delete node; + + return true; + } + + virtual bool visitSwitch(TVisit /* visit*/ , TIntermSwitch* node) + { + delete node; + + return true; + } + + virtual void visitConstantUnion(TIntermConstantUnion* node) + { + delete node; + } + + virtual bool visitLoop(TVisit /* visit*/ , TIntermLoop* node) + { + delete node; + + return true; + } + + virtual bool visitBranch(TVisit /* visit*/ , TIntermBranch* node) + { + delete node; + + return true; + } +}; + +// +// Entry point. +// +void RemoveAllTreeNodes(TIntermNode* root) +{ + TRemoveTraverser it; + + root->traverse(&it); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.h b/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.h new file mode 100644 index 0000000..1ed0156 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/RemoveTree.h @@ -0,0 +1,41 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#pragma once + +namespace glslang { + +void RemoveAllTreeNodes(TIntermNode*); + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Scan.cpp b/src/third_party/glslang/glslang/MachineIndependent/Scan.cpp new file mode 100644 index 0000000..bd3181a --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Scan.cpp @@ -0,0 +1,1827 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2020 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// GLSL scanning, leveraging the scanning done by the preprocessor. +// + +#include +#include +#include + +#include "../Include/Types.h" +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "attribute.h" +#include "glslang_tab.cpp.h" +#include "ScanContext.h" +#include "Scan.h" + +// preprocessor includes +#include "preprocessor/PpContext.h" +#include "preprocessor/PpTokens.h" + +// Required to avoid missing prototype warnings for some compilers +int yylex(YYSTYPE*, glslang::TParseContext&); + +namespace glslang { + +// read past any white space +void TInputScanner::consumeWhiteSpace(bool& foundNonSpaceTab) +{ + int c = peek(); // don't accidentally consume anything other than whitespace + while (c == ' ' || c == '\t' || c == '\r' || c == '\n') { + if (c == '\r' || c == '\n') + foundNonSpaceTab = true; + get(); + c = peek(); + } +} + +// return true if a comment was actually consumed +bool TInputScanner::consumeComment() +{ + if (peek() != '/') + return false; + + get(); // consume the '/' + int c = peek(); + if (c == '/') { + + // a '//' style comment + get(); // consume the second '/' + c = get(); + do { + while (c != EndOfInput && c != '\\' && c != '\r' && c != '\n') + c = get(); + + if (c == EndOfInput || c == '\r' || c == '\n') { + while (c == '\r' || c == '\n') + c = get(); + + // we reached the end of the comment + break; + } else { + // it's a '\', so we need to keep going, after skipping what's escaped + + // read the skipped character + c = get(); + + // if it's a two-character newline, skip both characters + if (c == '\r' && peek() == '\n') + get(); + c = get(); + } + } while (true); + + // put back the last non-comment character + if (c != EndOfInput) + unget(); + + return true; + } else if (c == '*') { + + // a '/*' style comment + get(); // consume the '*' + c = get(); + do { + while (c != EndOfInput && c != '*') + c = get(); + if (c == '*') { + c = get(); + if (c == '/') + break; // end of comment + // not end of comment + } else // end of input + break; + } while (true); + + return true; + } else { + // it's not a comment, put the '/' back + unget(); + + return false; + } +} + +// skip whitespace, then skip a comment, rinse, repeat +void TInputScanner::consumeWhitespaceComment(bool& foundNonSpaceTab) +{ + do { + consumeWhiteSpace(foundNonSpaceTab); + + // if not starting a comment now, then done + int c = peek(); + if (c != '/' || c == EndOfInput) + return; + + // skip potential comment + foundNonSpaceTab = true; + if (! consumeComment()) + return; + + } while (true); +} + +// Returns true if there was non-white space (e.g., a comment, newline) before the #version +// or no #version was found; otherwise, returns false. There is no error case, it always +// succeeds, but will leave version == 0 if no #version was found. +// +// Sets notFirstToken based on whether tokens (beyond white space and comments) +// appeared before the #version. +// +// N.B. does not attempt to leave input in any particular known state. The assumption +// is that scanning will start anew, following the rules for the chosen version/profile, +// and with a corresponding parsing context. +// +bool TInputScanner::scanVersion(int& version, EProfile& profile, bool& notFirstToken) +{ + // This function doesn't have to get all the semantics correct, + // just find the #version if there is a correct one present. + // The preprocessor will have the responsibility of getting all the semantics right. + + bool versionNotFirst = false; // means not first WRT comments and white space, nothing more + notFirstToken = false; // means not first WRT to real tokens + version = 0; // means not found + profile = ENoProfile; + + bool foundNonSpaceTab = false; + bool lookingInMiddle = false; + int c; + do { + if (lookingInMiddle) { + notFirstToken = true; + // make forward progress by finishing off the current line plus extra new lines + if (peek() != '\n' && peek() != '\r') { + do { + c = get(); + } while (c != EndOfInput && c != '\n' && c != '\r'); + } + while (peek() == '\n' || peek() == '\r') + get(); + if (peek() == EndOfInput) + return true; + } + lookingInMiddle = true; + + // Nominal start, skipping the desktop allowed comments and white space, but tracking if + // something else was found for ES: + consumeWhitespaceComment(foundNonSpaceTab); + if (foundNonSpaceTab) + versionNotFirst = true; + + // "#" + if (get() != '#') { + versionNotFirst = true; + continue; + } + + // whitespace + do { + c = get(); + } while (c == ' ' || c == '\t'); + + // "version" + if ( c != 'v' || + get() != 'e' || + get() != 'r' || + get() != 's' || + get() != 'i' || + get() != 'o' || + get() != 'n') { + versionNotFirst = true; + continue; + } + + // whitespace + do { + c = get(); + } while (c == ' ' || c == '\t'); + + // version number + while (c >= '0' && c <= '9') { + version = 10 * version + (c - '0'); + c = get(); + } + if (version == 0) { + versionNotFirst = true; + continue; + } + + // whitespace + while (c == ' ' || c == '\t') + c = get(); + + // profile + const int maxProfileLength = 13; // not including any 0 + char profileString[maxProfileLength]; + int profileLength; + for (profileLength = 0; profileLength < maxProfileLength; ++profileLength) { + if (c == EndOfInput || c == ' ' || c == '\t' || c == '\n' || c == '\r') + break; + profileString[profileLength] = (char)c; + c = get(); + } + if (c != EndOfInput && c != ' ' && c != '\t' && c != '\n' && c != '\r') { + versionNotFirst = true; + continue; + } + + if (profileLength == 2 && strncmp(profileString, "es", profileLength) == 0) + profile = EEsProfile; + else if (profileLength == 4 && strncmp(profileString, "core", profileLength) == 0) + profile = ECoreProfile; + else if (profileLength == 13 && strncmp(profileString, "compatibility", profileLength) == 0) + profile = ECompatibilityProfile; + + return versionNotFirst; + } while (true); +} + +// Fill this in when doing glslang-level scanning, to hand back to the parser. +class TParserToken { +public: + explicit TParserToken(YYSTYPE& b) : sType(b) { } + + YYSTYPE& sType; +protected: + TParserToken(TParserToken&); + TParserToken& operator=(TParserToken&); +}; + +} // end namespace glslang + +// This is the function the glslang parser (i.e., bison) calls to get its next token +int yylex(YYSTYPE* glslangTokenDesc, glslang::TParseContext& parseContext) +{ + glslang::TParserToken token(*glslangTokenDesc); + + return parseContext.getScanContext()->tokenize(parseContext.getPpContext(), token); +} + +namespace { + +struct str_eq +{ + bool operator()(const char* lhs, const char* rhs) const + { + return strcmp(lhs, rhs) == 0; + } +}; + +struct str_hash +{ + size_t operator()(const char* str) const + { + // djb2 + unsigned long hash = 5381; + int c; + + while ((c = *str++) != 0) + hash = ((hash << 5) + hash) + c; + + return hash; + } +}; + +// A single global usable by all threads, by all versions, by all languages. +// After a single process-level initialization, this is read only and thread safe +std::unordered_map* KeywordMap = nullptr; +#ifndef GLSLANG_WEB +std::unordered_set* ReservedSet = nullptr; +#endif + +}; + +namespace glslang { + +void TScanContext::fillInKeywordMap() +{ + if (KeywordMap != nullptr) { + // this is really an error, as this should called only once per process + // but, the only risk is if two threads called simultaneously + return; + } + KeywordMap = new std::unordered_map; + + (*KeywordMap)["const"] = CONST; + (*KeywordMap)["uniform"] = UNIFORM; + (*KeywordMap)["buffer"] = BUFFER; + (*KeywordMap)["in"] = IN; + (*KeywordMap)["out"] = OUT; + (*KeywordMap)["smooth"] = SMOOTH; + (*KeywordMap)["flat"] = FLAT; + (*KeywordMap)["centroid"] = CENTROID; + (*KeywordMap)["invariant"] = INVARIANT; + (*KeywordMap)["packed"] = PACKED; + (*KeywordMap)["resource"] = RESOURCE; + (*KeywordMap)["inout"] = INOUT; + (*KeywordMap)["struct"] = STRUCT; + (*KeywordMap)["break"] = BREAK; + (*KeywordMap)["continue"] = CONTINUE; + (*KeywordMap)["do"] = DO; + (*KeywordMap)["for"] = FOR; + (*KeywordMap)["while"] = WHILE; + (*KeywordMap)["switch"] = SWITCH; + (*KeywordMap)["case"] = CASE; + (*KeywordMap)["default"] = DEFAULT; + (*KeywordMap)["if"] = IF; + (*KeywordMap)["else"] = ELSE; + (*KeywordMap)["discard"] = DISCARD; + (*KeywordMap)["return"] = RETURN; + (*KeywordMap)["void"] = VOID; + (*KeywordMap)["bool"] = BOOL; + (*KeywordMap)["float"] = FLOAT; + (*KeywordMap)["int"] = INT; + (*KeywordMap)["bvec2"] = BVEC2; + (*KeywordMap)["bvec3"] = BVEC3; + (*KeywordMap)["bvec4"] = BVEC4; + (*KeywordMap)["vec2"] = VEC2; + (*KeywordMap)["vec3"] = VEC3; + (*KeywordMap)["vec4"] = VEC4; + (*KeywordMap)["ivec2"] = IVEC2; + (*KeywordMap)["ivec3"] = IVEC3; + (*KeywordMap)["ivec4"] = IVEC4; + (*KeywordMap)["mat2"] = MAT2; + (*KeywordMap)["mat3"] = MAT3; + (*KeywordMap)["mat4"] = MAT4; + (*KeywordMap)["true"] = BOOLCONSTANT; + (*KeywordMap)["false"] = BOOLCONSTANT; + (*KeywordMap)["layout"] = LAYOUT; + (*KeywordMap)["shared"] = SHARED; + (*KeywordMap)["highp"] = HIGH_PRECISION; + (*KeywordMap)["mediump"] = MEDIUM_PRECISION; + (*KeywordMap)["lowp"] = LOW_PRECISION; + (*KeywordMap)["superp"] = SUPERP; + (*KeywordMap)["precision"] = PRECISION; + (*KeywordMap)["mat2x2"] = MAT2X2; + (*KeywordMap)["mat2x3"] = MAT2X3; + (*KeywordMap)["mat2x4"] = MAT2X4; + (*KeywordMap)["mat3x2"] = MAT3X2; + (*KeywordMap)["mat3x3"] = MAT3X3; + (*KeywordMap)["mat3x4"] = MAT3X4; + (*KeywordMap)["mat4x2"] = MAT4X2; + (*KeywordMap)["mat4x3"] = MAT4X3; + (*KeywordMap)["mat4x4"] = MAT4X4; + (*KeywordMap)["uint"] = UINT; + (*KeywordMap)["uvec2"] = UVEC2; + (*KeywordMap)["uvec3"] = UVEC3; + (*KeywordMap)["uvec4"] = UVEC4; + +#ifndef GLSLANG_WEB + (*KeywordMap)["nonuniformEXT"] = NONUNIFORM; + (*KeywordMap)["demote"] = DEMOTE; + (*KeywordMap)["attribute"] = ATTRIBUTE; + (*KeywordMap)["varying"] = VARYING; + (*KeywordMap)["noperspective"] = NOPERSPECTIVE; + (*KeywordMap)["coherent"] = COHERENT; + (*KeywordMap)["devicecoherent"] = DEVICECOHERENT; + (*KeywordMap)["queuefamilycoherent"] = QUEUEFAMILYCOHERENT; + (*KeywordMap)["workgroupcoherent"] = WORKGROUPCOHERENT; + (*KeywordMap)["subgroupcoherent"] = SUBGROUPCOHERENT; + (*KeywordMap)["shadercallcoherent"] = SHADERCALLCOHERENT; + (*KeywordMap)["nonprivate"] = NONPRIVATE; + (*KeywordMap)["restrict"] = RESTRICT; + (*KeywordMap)["readonly"] = READONLY; + (*KeywordMap)["writeonly"] = WRITEONLY; + (*KeywordMap)["atomic_uint"] = ATOMIC_UINT; + (*KeywordMap)["volatile"] = VOLATILE; + (*KeywordMap)["patch"] = PATCH; + (*KeywordMap)["sample"] = SAMPLE; + (*KeywordMap)["subroutine"] = SUBROUTINE; + (*KeywordMap)["dmat2"] = DMAT2; + (*KeywordMap)["dmat3"] = DMAT3; + (*KeywordMap)["dmat4"] = DMAT4; + (*KeywordMap)["dmat2x2"] = DMAT2X2; + (*KeywordMap)["dmat2x3"] = DMAT2X3; + (*KeywordMap)["dmat2x4"] = DMAT2X4; + (*KeywordMap)["dmat3x2"] = DMAT3X2; + (*KeywordMap)["dmat3x3"] = DMAT3X3; + (*KeywordMap)["dmat3x4"] = DMAT3X4; + (*KeywordMap)["dmat4x2"] = DMAT4X2; + (*KeywordMap)["dmat4x3"] = DMAT4X3; + (*KeywordMap)["dmat4x4"] = DMAT4X4; + (*KeywordMap)["image1D"] = IMAGE1D; + (*KeywordMap)["iimage1D"] = IIMAGE1D; + (*KeywordMap)["uimage1D"] = UIMAGE1D; + (*KeywordMap)["image2D"] = IMAGE2D; + (*KeywordMap)["iimage2D"] = IIMAGE2D; + (*KeywordMap)["uimage2D"] = UIMAGE2D; + (*KeywordMap)["image3D"] = IMAGE3D; + (*KeywordMap)["iimage3D"] = IIMAGE3D; + (*KeywordMap)["uimage3D"] = UIMAGE3D; + (*KeywordMap)["image2DRect"] = IMAGE2DRECT; + (*KeywordMap)["iimage2DRect"] = IIMAGE2DRECT; + (*KeywordMap)["uimage2DRect"] = UIMAGE2DRECT; + (*KeywordMap)["imageCube"] = IMAGECUBE; + (*KeywordMap)["iimageCube"] = IIMAGECUBE; + (*KeywordMap)["uimageCube"] = UIMAGECUBE; + (*KeywordMap)["imageBuffer"] = IMAGEBUFFER; + (*KeywordMap)["iimageBuffer"] = IIMAGEBUFFER; + (*KeywordMap)["uimageBuffer"] = UIMAGEBUFFER; + (*KeywordMap)["image1DArray"] = IMAGE1DARRAY; + (*KeywordMap)["iimage1DArray"] = IIMAGE1DARRAY; + (*KeywordMap)["uimage1DArray"] = UIMAGE1DARRAY; + (*KeywordMap)["image2DArray"] = IMAGE2DARRAY; + (*KeywordMap)["iimage2DArray"] = IIMAGE2DARRAY; + (*KeywordMap)["uimage2DArray"] = UIMAGE2DARRAY; + (*KeywordMap)["imageCubeArray"] = IMAGECUBEARRAY; + (*KeywordMap)["iimageCubeArray"] = IIMAGECUBEARRAY; + (*KeywordMap)["uimageCubeArray"] = UIMAGECUBEARRAY; + (*KeywordMap)["image2DMS"] = IMAGE2DMS; + (*KeywordMap)["iimage2DMS"] = IIMAGE2DMS; + (*KeywordMap)["uimage2DMS"] = UIMAGE2DMS; + (*KeywordMap)["image2DMSArray"] = IMAGE2DMSARRAY; + (*KeywordMap)["iimage2DMSArray"] = IIMAGE2DMSARRAY; + (*KeywordMap)["uimage2DMSArray"] = UIMAGE2DMSARRAY; + (*KeywordMap)["double"] = DOUBLE; + (*KeywordMap)["dvec2"] = DVEC2; + (*KeywordMap)["dvec3"] = DVEC3; + (*KeywordMap)["dvec4"] = DVEC4; + (*KeywordMap)["int64_t"] = INT64_T; + (*KeywordMap)["uint64_t"] = UINT64_T; + (*KeywordMap)["i64vec2"] = I64VEC2; + (*KeywordMap)["i64vec3"] = I64VEC3; + (*KeywordMap)["i64vec4"] = I64VEC4; + (*KeywordMap)["u64vec2"] = U64VEC2; + (*KeywordMap)["u64vec3"] = U64VEC3; + (*KeywordMap)["u64vec4"] = U64VEC4; + + // GL_EXT_shader_explicit_arithmetic_types + (*KeywordMap)["int8_t"] = INT8_T; + (*KeywordMap)["i8vec2"] = I8VEC2; + (*KeywordMap)["i8vec3"] = I8VEC3; + (*KeywordMap)["i8vec4"] = I8VEC4; + (*KeywordMap)["uint8_t"] = UINT8_T; + (*KeywordMap)["u8vec2"] = U8VEC2; + (*KeywordMap)["u8vec3"] = U8VEC3; + (*KeywordMap)["u8vec4"] = U8VEC4; + + (*KeywordMap)["int16_t"] = INT16_T; + (*KeywordMap)["i16vec2"] = I16VEC2; + (*KeywordMap)["i16vec3"] = I16VEC3; + (*KeywordMap)["i16vec4"] = I16VEC4; + (*KeywordMap)["uint16_t"] = UINT16_T; + (*KeywordMap)["u16vec2"] = U16VEC2; + (*KeywordMap)["u16vec3"] = U16VEC3; + (*KeywordMap)["u16vec4"] = U16VEC4; + + (*KeywordMap)["int32_t"] = INT32_T; + (*KeywordMap)["i32vec2"] = I32VEC2; + (*KeywordMap)["i32vec3"] = I32VEC3; + (*KeywordMap)["i32vec4"] = I32VEC4; + (*KeywordMap)["uint32_t"] = UINT32_T; + (*KeywordMap)["u32vec2"] = U32VEC2; + (*KeywordMap)["u32vec3"] = U32VEC3; + (*KeywordMap)["u32vec4"] = U32VEC4; + + (*KeywordMap)["float16_t"] = FLOAT16_T; + (*KeywordMap)["f16vec2"] = F16VEC2; + (*KeywordMap)["f16vec3"] = F16VEC3; + (*KeywordMap)["f16vec4"] = F16VEC4; + (*KeywordMap)["f16mat2"] = F16MAT2; + (*KeywordMap)["f16mat3"] = F16MAT3; + (*KeywordMap)["f16mat4"] = F16MAT4; + (*KeywordMap)["f16mat2x2"] = F16MAT2X2; + (*KeywordMap)["f16mat2x3"] = F16MAT2X3; + (*KeywordMap)["f16mat2x4"] = F16MAT2X4; + (*KeywordMap)["f16mat3x2"] = F16MAT3X2; + (*KeywordMap)["f16mat3x3"] = F16MAT3X3; + (*KeywordMap)["f16mat3x4"] = F16MAT3X4; + (*KeywordMap)["f16mat4x2"] = F16MAT4X2; + (*KeywordMap)["f16mat4x3"] = F16MAT4X3; + (*KeywordMap)["f16mat4x4"] = F16MAT4X4; + + (*KeywordMap)["float32_t"] = FLOAT32_T; + (*KeywordMap)["f32vec2"] = F32VEC2; + (*KeywordMap)["f32vec3"] = F32VEC3; + (*KeywordMap)["f32vec4"] = F32VEC4; + (*KeywordMap)["f32mat2"] = F32MAT2; + (*KeywordMap)["f32mat3"] = F32MAT3; + (*KeywordMap)["f32mat4"] = F32MAT4; + (*KeywordMap)["f32mat2x2"] = F32MAT2X2; + (*KeywordMap)["f32mat2x3"] = F32MAT2X3; + (*KeywordMap)["f32mat2x4"] = F32MAT2X4; + (*KeywordMap)["f32mat3x2"] = F32MAT3X2; + (*KeywordMap)["f32mat3x3"] = F32MAT3X3; + (*KeywordMap)["f32mat3x4"] = F32MAT3X4; + (*KeywordMap)["f32mat4x2"] = F32MAT4X2; + (*KeywordMap)["f32mat4x3"] = F32MAT4X3; + (*KeywordMap)["f32mat4x4"] = F32MAT4X4; + (*KeywordMap)["float64_t"] = FLOAT64_T; + (*KeywordMap)["f64vec2"] = F64VEC2; + (*KeywordMap)["f64vec3"] = F64VEC3; + (*KeywordMap)["f64vec4"] = F64VEC4; + (*KeywordMap)["f64mat2"] = F64MAT2; + (*KeywordMap)["f64mat3"] = F64MAT3; + (*KeywordMap)["f64mat4"] = F64MAT4; + (*KeywordMap)["f64mat2x2"] = F64MAT2X2; + (*KeywordMap)["f64mat2x3"] = F64MAT2X3; + (*KeywordMap)["f64mat2x4"] = F64MAT2X4; + (*KeywordMap)["f64mat3x2"] = F64MAT3X2; + (*KeywordMap)["f64mat3x3"] = F64MAT3X3; + (*KeywordMap)["f64mat3x4"] = F64MAT3X4; + (*KeywordMap)["f64mat4x2"] = F64MAT4X2; + (*KeywordMap)["f64mat4x3"] = F64MAT4X3; + (*KeywordMap)["f64mat4x4"] = F64MAT4X4; +#endif + + (*KeywordMap)["sampler2D"] = SAMPLER2D; + (*KeywordMap)["samplerCube"] = SAMPLERCUBE; + (*KeywordMap)["samplerCubeShadow"] = SAMPLERCUBESHADOW; + (*KeywordMap)["sampler2DArray"] = SAMPLER2DARRAY; + (*KeywordMap)["sampler2DArrayShadow"] = SAMPLER2DARRAYSHADOW; + (*KeywordMap)["isampler2D"] = ISAMPLER2D; + (*KeywordMap)["isampler3D"] = ISAMPLER3D; + (*KeywordMap)["isamplerCube"] = ISAMPLERCUBE; + (*KeywordMap)["isampler2DArray"] = ISAMPLER2DARRAY; + (*KeywordMap)["usampler2D"] = USAMPLER2D; + (*KeywordMap)["usampler3D"] = USAMPLER3D; + (*KeywordMap)["usamplerCube"] = USAMPLERCUBE; + (*KeywordMap)["usampler2DArray"] = USAMPLER2DARRAY; + (*KeywordMap)["sampler3D"] = SAMPLER3D; + (*KeywordMap)["sampler2DShadow"] = SAMPLER2DSHADOW; + + (*KeywordMap)["texture2D"] = TEXTURE2D; + (*KeywordMap)["textureCube"] = TEXTURECUBE; + (*KeywordMap)["texture2DArray"] = TEXTURE2DARRAY; + (*KeywordMap)["itexture2D"] = ITEXTURE2D; + (*KeywordMap)["itexture3D"] = ITEXTURE3D; + (*KeywordMap)["itextureCube"] = ITEXTURECUBE; + (*KeywordMap)["itexture2DArray"] = ITEXTURE2DARRAY; + (*KeywordMap)["utexture2D"] = UTEXTURE2D; + (*KeywordMap)["utexture3D"] = UTEXTURE3D; + (*KeywordMap)["utextureCube"] = UTEXTURECUBE; + (*KeywordMap)["utexture2DArray"] = UTEXTURE2DARRAY; + (*KeywordMap)["texture3D"] = TEXTURE3D; + + (*KeywordMap)["sampler"] = SAMPLER; + (*KeywordMap)["samplerShadow"] = SAMPLERSHADOW; + +#ifndef GLSLANG_WEB + (*KeywordMap)["textureCubeArray"] = TEXTURECUBEARRAY; + (*KeywordMap)["itextureCubeArray"] = ITEXTURECUBEARRAY; + (*KeywordMap)["utextureCubeArray"] = UTEXTURECUBEARRAY; + (*KeywordMap)["samplerCubeArray"] = SAMPLERCUBEARRAY; + (*KeywordMap)["samplerCubeArrayShadow"] = SAMPLERCUBEARRAYSHADOW; + (*KeywordMap)["isamplerCubeArray"] = ISAMPLERCUBEARRAY; + (*KeywordMap)["usamplerCubeArray"] = USAMPLERCUBEARRAY; + (*KeywordMap)["sampler1DArrayShadow"] = SAMPLER1DARRAYSHADOW; + (*KeywordMap)["isampler1DArray"] = ISAMPLER1DARRAY; + (*KeywordMap)["usampler1D"] = USAMPLER1D; + (*KeywordMap)["isampler1D"] = ISAMPLER1D; + (*KeywordMap)["usampler1DArray"] = USAMPLER1DARRAY; + (*KeywordMap)["samplerBuffer"] = SAMPLERBUFFER; + (*KeywordMap)["isampler2DRect"] = ISAMPLER2DRECT; + (*KeywordMap)["usampler2DRect"] = USAMPLER2DRECT; + (*KeywordMap)["isamplerBuffer"] = ISAMPLERBUFFER; + (*KeywordMap)["usamplerBuffer"] = USAMPLERBUFFER; + (*KeywordMap)["sampler2DMS"] = SAMPLER2DMS; + (*KeywordMap)["isampler2DMS"] = ISAMPLER2DMS; + (*KeywordMap)["usampler2DMS"] = USAMPLER2DMS; + (*KeywordMap)["sampler2DMSArray"] = SAMPLER2DMSARRAY; + (*KeywordMap)["isampler2DMSArray"] = ISAMPLER2DMSARRAY; + (*KeywordMap)["usampler2DMSArray"] = USAMPLER2DMSARRAY; + (*KeywordMap)["sampler1D"] = SAMPLER1D; + (*KeywordMap)["sampler1DShadow"] = SAMPLER1DSHADOW; + (*KeywordMap)["sampler2DRect"] = SAMPLER2DRECT; + (*KeywordMap)["sampler2DRectShadow"] = SAMPLER2DRECTSHADOW; + (*KeywordMap)["sampler1DArray"] = SAMPLER1DARRAY; + + (*KeywordMap)["samplerExternalOES"] = SAMPLEREXTERNALOES; // GL_OES_EGL_image_external + + (*KeywordMap)["__samplerExternal2DY2YEXT"] = SAMPLEREXTERNAL2DY2YEXT; // GL_EXT_YUV_target + + (*KeywordMap)["itexture1DArray"] = ITEXTURE1DARRAY; + (*KeywordMap)["utexture1D"] = UTEXTURE1D; + (*KeywordMap)["itexture1D"] = ITEXTURE1D; + (*KeywordMap)["utexture1DArray"] = UTEXTURE1DARRAY; + (*KeywordMap)["textureBuffer"] = TEXTUREBUFFER; + (*KeywordMap)["itexture2DRect"] = ITEXTURE2DRECT; + (*KeywordMap)["utexture2DRect"] = UTEXTURE2DRECT; + (*KeywordMap)["itextureBuffer"] = ITEXTUREBUFFER; + (*KeywordMap)["utextureBuffer"] = UTEXTUREBUFFER; + (*KeywordMap)["texture2DMS"] = TEXTURE2DMS; + (*KeywordMap)["itexture2DMS"] = ITEXTURE2DMS; + (*KeywordMap)["utexture2DMS"] = UTEXTURE2DMS; + (*KeywordMap)["texture2DMSArray"] = TEXTURE2DMSARRAY; + (*KeywordMap)["itexture2DMSArray"] = ITEXTURE2DMSARRAY; + (*KeywordMap)["utexture2DMSArray"] = UTEXTURE2DMSARRAY; + (*KeywordMap)["texture1D"] = TEXTURE1D; + (*KeywordMap)["texture2DRect"] = TEXTURE2DRECT; + (*KeywordMap)["texture1DArray"] = TEXTURE1DARRAY; + + (*KeywordMap)["subpassInput"] = SUBPASSINPUT; + (*KeywordMap)["subpassInputMS"] = SUBPASSINPUTMS; + (*KeywordMap)["isubpassInput"] = ISUBPASSINPUT; + (*KeywordMap)["isubpassInputMS"] = ISUBPASSINPUTMS; + (*KeywordMap)["usubpassInput"] = USUBPASSINPUT; + (*KeywordMap)["usubpassInputMS"] = USUBPASSINPUTMS; + + (*KeywordMap)["f16sampler1D"] = F16SAMPLER1D; + (*KeywordMap)["f16sampler2D"] = F16SAMPLER2D; + (*KeywordMap)["f16sampler3D"] = F16SAMPLER3D; + (*KeywordMap)["f16sampler2DRect"] = F16SAMPLER2DRECT; + (*KeywordMap)["f16samplerCube"] = F16SAMPLERCUBE; + (*KeywordMap)["f16sampler1DArray"] = F16SAMPLER1DARRAY; + (*KeywordMap)["f16sampler2DArray"] = F16SAMPLER2DARRAY; + (*KeywordMap)["f16samplerCubeArray"] = F16SAMPLERCUBEARRAY; + (*KeywordMap)["f16samplerBuffer"] = F16SAMPLERBUFFER; + (*KeywordMap)["f16sampler2DMS"] = F16SAMPLER2DMS; + (*KeywordMap)["f16sampler2DMSArray"] = F16SAMPLER2DMSARRAY; + (*KeywordMap)["f16sampler1DShadow"] = F16SAMPLER1DSHADOW; + (*KeywordMap)["f16sampler2DShadow"] = F16SAMPLER2DSHADOW; + (*KeywordMap)["f16sampler2DRectShadow"] = F16SAMPLER2DRECTSHADOW; + (*KeywordMap)["f16samplerCubeShadow"] = F16SAMPLERCUBESHADOW; + (*KeywordMap)["f16sampler1DArrayShadow"] = F16SAMPLER1DARRAYSHADOW; + (*KeywordMap)["f16sampler2DArrayShadow"] = F16SAMPLER2DARRAYSHADOW; + (*KeywordMap)["f16samplerCubeArrayShadow"] = F16SAMPLERCUBEARRAYSHADOW; + + (*KeywordMap)["f16image1D"] = F16IMAGE1D; + (*KeywordMap)["f16image2D"] = F16IMAGE2D; + (*KeywordMap)["f16image3D"] = F16IMAGE3D; + (*KeywordMap)["f16image2DRect"] = F16IMAGE2DRECT; + (*KeywordMap)["f16imageCube"] = F16IMAGECUBE; + (*KeywordMap)["f16image1DArray"] = F16IMAGE1DARRAY; + (*KeywordMap)["f16image2DArray"] = F16IMAGE2DARRAY; + (*KeywordMap)["f16imageCubeArray"] = F16IMAGECUBEARRAY; + (*KeywordMap)["f16imageBuffer"] = F16IMAGEBUFFER; + (*KeywordMap)["f16image2DMS"] = F16IMAGE2DMS; + (*KeywordMap)["f16image2DMSArray"] = F16IMAGE2DMSARRAY; + + (*KeywordMap)["f16texture1D"] = F16TEXTURE1D; + (*KeywordMap)["f16texture2D"] = F16TEXTURE2D; + (*KeywordMap)["f16texture3D"] = F16TEXTURE3D; + (*KeywordMap)["f16texture2DRect"] = F16TEXTURE2DRECT; + (*KeywordMap)["f16textureCube"] = F16TEXTURECUBE; + (*KeywordMap)["f16texture1DArray"] = F16TEXTURE1DARRAY; + (*KeywordMap)["f16texture2DArray"] = F16TEXTURE2DARRAY; + (*KeywordMap)["f16textureCubeArray"] = F16TEXTURECUBEARRAY; + (*KeywordMap)["f16textureBuffer"] = F16TEXTUREBUFFER; + (*KeywordMap)["f16texture2DMS"] = F16TEXTURE2DMS; + (*KeywordMap)["f16texture2DMSArray"] = F16TEXTURE2DMSARRAY; + + (*KeywordMap)["f16subpassInput"] = F16SUBPASSINPUT; + (*KeywordMap)["f16subpassInputMS"] = F16SUBPASSINPUTMS; + (*KeywordMap)["__explicitInterpAMD"] = EXPLICITINTERPAMD; + (*KeywordMap)["pervertexNV"] = PERVERTEXNV; + (*KeywordMap)["precise"] = PRECISE; + + (*KeywordMap)["rayPayloadNV"] = PAYLOADNV; + (*KeywordMap)["rayPayloadEXT"] = PAYLOADEXT; + (*KeywordMap)["rayPayloadInNV"] = PAYLOADINNV; + (*KeywordMap)["rayPayloadInEXT"] = PAYLOADINEXT; + (*KeywordMap)["hitAttributeNV"] = HITATTRNV; + (*KeywordMap)["hitAttributeEXT"] = HITATTREXT; + (*KeywordMap)["callableDataNV"] = CALLDATANV; + (*KeywordMap)["callableDataEXT"] = CALLDATAEXT; + (*KeywordMap)["callableDataInNV"] = CALLDATAINNV; + (*KeywordMap)["callableDataInEXT"] = CALLDATAINEXT; + (*KeywordMap)["accelerationStructureNV"] = ACCSTRUCTNV; + (*KeywordMap)["accelerationStructureEXT"] = ACCSTRUCTEXT; + (*KeywordMap)["rayQueryEXT"] = RAYQUERYEXT; + (*KeywordMap)["perprimitiveNV"] = PERPRIMITIVENV; + (*KeywordMap)["perviewNV"] = PERVIEWNV; + (*KeywordMap)["taskNV"] = PERTASKNV; + + (*KeywordMap)["fcoopmatNV"] = FCOOPMATNV; + (*KeywordMap)["icoopmatNV"] = ICOOPMATNV; + (*KeywordMap)["ucoopmatNV"] = UCOOPMATNV; + + ReservedSet = new std::unordered_set; + + ReservedSet->insert("common"); + ReservedSet->insert("partition"); + ReservedSet->insert("active"); + ReservedSet->insert("asm"); + ReservedSet->insert("class"); + ReservedSet->insert("union"); + ReservedSet->insert("enum"); + ReservedSet->insert("typedef"); + ReservedSet->insert("template"); + ReservedSet->insert("this"); + ReservedSet->insert("goto"); + ReservedSet->insert("inline"); + ReservedSet->insert("noinline"); + ReservedSet->insert("public"); + ReservedSet->insert("static"); + ReservedSet->insert("extern"); + ReservedSet->insert("external"); + ReservedSet->insert("interface"); + ReservedSet->insert("long"); + ReservedSet->insert("short"); + ReservedSet->insert("half"); + ReservedSet->insert("fixed"); + ReservedSet->insert("unsigned"); + ReservedSet->insert("input"); + ReservedSet->insert("output"); + ReservedSet->insert("hvec2"); + ReservedSet->insert("hvec3"); + ReservedSet->insert("hvec4"); + ReservedSet->insert("fvec2"); + ReservedSet->insert("fvec3"); + ReservedSet->insert("fvec4"); + ReservedSet->insert("sampler3DRect"); + ReservedSet->insert("filter"); + ReservedSet->insert("sizeof"); + ReservedSet->insert("cast"); + ReservedSet->insert("namespace"); + ReservedSet->insert("using"); +#endif +} + +void TScanContext::deleteKeywordMap() +{ + delete KeywordMap; + KeywordMap = nullptr; +#ifndef GLSLANG_WEB + delete ReservedSet; + ReservedSet = nullptr; +#endif +} + +// Called by yylex to get the next token. +// Returning 0 implies end of input. +int TScanContext::tokenize(TPpContext* pp, TParserToken& token) +{ + do { + parserToken = &token; + TPpToken ppToken; + int token = pp->tokenize(ppToken); + if (token == EndOfInput) + return 0; + + tokenText = ppToken.name; + loc = ppToken.loc; + parserToken->sType.lex.loc = loc; + switch (token) { + case ';': afterType = false; afterBuffer = false; return SEMICOLON; + case ',': afterType = false; return COMMA; + case ':': return COLON; + case '=': afterType = false; return EQUAL; + case '(': afterType = false; return LEFT_PAREN; + case ')': afterType = false; return RIGHT_PAREN; + case '.': field = true; return DOT; + case '!': return BANG; + case '-': return DASH; + case '~': return TILDE; + case '+': return PLUS; + case '*': return STAR; + case '/': return SLASH; + case '%': return PERCENT; + case '<': return LEFT_ANGLE; + case '>': return RIGHT_ANGLE; + case '|': return VERTICAL_BAR; + case '^': return CARET; + case '&': return AMPERSAND; + case '?': return QUESTION; + case '[': return LEFT_BRACKET; + case ']': return RIGHT_BRACKET; + case '{': afterStruct = false; afterBuffer = false; return LEFT_BRACE; + case '}': return RIGHT_BRACE; + case '\\': + parseContext.error(loc, "illegal use of escape character", "\\", ""); + break; + + case PPAtomAddAssign: return ADD_ASSIGN; + case PPAtomSubAssign: return SUB_ASSIGN; + case PPAtomMulAssign: return MUL_ASSIGN; + case PPAtomDivAssign: return DIV_ASSIGN; + case PPAtomModAssign: return MOD_ASSIGN; + + case PpAtomRight: return RIGHT_OP; + case PpAtomLeft: return LEFT_OP; + + case PpAtomRightAssign: return RIGHT_ASSIGN; + case PpAtomLeftAssign: return LEFT_ASSIGN; + case PpAtomAndAssign: return AND_ASSIGN; + case PpAtomOrAssign: return OR_ASSIGN; + case PpAtomXorAssign: return XOR_ASSIGN; + + case PpAtomAnd: return AND_OP; + case PpAtomOr: return OR_OP; + case PpAtomXor: return XOR_OP; + + case PpAtomEQ: return EQ_OP; + case PpAtomGE: return GE_OP; + case PpAtomNE: return NE_OP; + case PpAtomLE: return LE_OP; + + case PpAtomDecrement: return DEC_OP; + case PpAtomIncrement: return INC_OP; + + case PpAtomColonColon: + parseContext.error(loc, "not supported", "::", ""); + break; + + case PpAtomConstString: parserToken->sType.lex.string = NewPoolTString(tokenText); return STRING_LITERAL; + case PpAtomConstInt: parserToken->sType.lex.i = ppToken.ival; return INTCONSTANT; + case PpAtomConstUint: parserToken->sType.lex.i = ppToken.ival; return UINTCONSTANT; + case PpAtomConstFloat: parserToken->sType.lex.d = ppToken.dval; return FLOATCONSTANT; +#ifndef GLSLANG_WEB + case PpAtomConstInt16: parserToken->sType.lex.i = ppToken.ival; return INT16CONSTANT; + case PpAtomConstUint16: parserToken->sType.lex.i = ppToken.ival; return UINT16CONSTANT; + case PpAtomConstInt64: parserToken->sType.lex.i64 = ppToken.i64val; return INT64CONSTANT; + case PpAtomConstUint64: parserToken->sType.lex.i64 = ppToken.i64val; return UINT64CONSTANT; + case PpAtomConstDouble: parserToken->sType.lex.d = ppToken.dval; return DOUBLECONSTANT; + case PpAtomConstFloat16: parserToken->sType.lex.d = ppToken.dval; return FLOAT16CONSTANT; +#endif + case PpAtomIdentifier: + { + int token = tokenizeIdentifier(); + field = false; + return token; + } + + case EndOfInput: return 0; + + default: + char buf[2]; + buf[0] = (char)token; + buf[1] = 0; + parseContext.error(loc, "unexpected token", buf, ""); + break; + } + } while (true); +} + +int TScanContext::tokenizeIdentifier() +{ +#ifndef GLSLANG_WEB + if (ReservedSet->find(tokenText) != ReservedSet->end()) + return reservedWord(); +#endif + + auto it = KeywordMap->find(tokenText); + if (it == KeywordMap->end()) { + // Should have an identifier of some sort + return identifierOrType(); + } + keyword = it->second; + + switch (keyword) { + case CONST: + case UNIFORM: + case IN: + case OUT: + case INOUT: + case BREAK: + case CONTINUE: + case DO: + case FOR: + case WHILE: + case IF: + case ELSE: + case DISCARD: + case RETURN: + case CASE: + return keyword; + + case BUFFER: + afterBuffer = true; + if ((parseContext.isEsProfile() && parseContext.version < 310) || + (!parseContext.isEsProfile() && (parseContext.version < 430 && + !parseContext.extensionTurnedOn(E_GL_ARB_shader_storage_buffer_object)))) + return identifierOrType(); + return keyword; + + case STRUCT: + afterStruct = true; + return keyword; + + case SWITCH: + case DEFAULT: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + reservedWord(); + return keyword; + + case VOID: + case BOOL: + case FLOAT: + case INT: + case BVEC2: + case BVEC3: + case BVEC4: + case VEC2: + case VEC3: + case VEC4: + case IVEC2: + case IVEC3: + case IVEC4: + case MAT2: + case MAT3: + case MAT4: + case SAMPLER2D: + case SAMPLERCUBE: + afterType = true; + return keyword; + + case BOOLCONSTANT: + if (strcmp("true", tokenText) == 0) + parserToken->sType.lex.b = true; + else + parserToken->sType.lex.b = false; + return keyword; + + case SMOOTH: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + return identifierOrType(); + return keyword; + case FLAT: + if (parseContext.isEsProfile() && parseContext.version < 300) + reservedWord(); + else if (!parseContext.isEsProfile() && parseContext.version < 130) + return identifierOrType(); + return keyword; + case CENTROID: + if (parseContext.version < 120) + return identifierOrType(); + return keyword; + case INVARIANT: + if (!parseContext.isEsProfile() && parseContext.version < 120) + return identifierOrType(); + return keyword; + case PACKED: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 330)) + return reservedWord(); + return identifierOrType(); + + case RESOURCE: + { + bool reserved = (parseContext.isEsProfile() && parseContext.version >= 300) || + (!parseContext.isEsProfile() && parseContext.version >= 420); + return identifierOrReserved(reserved); + } + case SUPERP: + { + bool reserved = parseContext.isEsProfile() || parseContext.version >= 130; + return identifierOrReserved(reserved); + } + +#ifndef GLSLANG_WEB + case NOPERSPECTIVE: + if (parseContext.extensionTurnedOn(E_GL_NV_shader_noperspective_interpolation)) + return keyword; + return es30ReservedFromGLSL(130); + + case NONUNIFORM: + if (parseContext.extensionTurnedOn(E_GL_EXT_nonuniform_qualifier)) + return keyword; + else + return identifierOrType(); + case ATTRIBUTE: + case VARYING: + if (parseContext.isEsProfile() && parseContext.version >= 300) + reservedWord(); + return keyword; + case PAYLOADNV: + case PAYLOADINNV: + case HITATTRNV: + case CALLDATANV: + case CALLDATAINNV: + case ACCSTRUCTNV: + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_ray_tracing)) + return keyword; + return identifierOrType(); + case PAYLOADEXT: + case PAYLOADINEXT: + case HITATTREXT: + case CALLDATAEXT: + case CALLDATAINEXT: + case ACCSTRUCTEXT: + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_ray_tracing) || + parseContext.extensionTurnedOn(E_GL_EXT_ray_query)) + return keyword; + return identifierOrType(); + case RAYQUERYEXT: + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && parseContext.version >= 460 + && parseContext.extensionTurnedOn(E_GL_EXT_ray_query))) + return keyword; + return identifierOrType(); + case ATOMIC_UINT: + if ((parseContext.isEsProfile() && parseContext.version >= 310) || + parseContext.extensionTurnedOn(E_GL_ARB_shader_atomic_counters)) + return keyword; + return es30ReservedFromGLSL(420); + + case COHERENT: + case DEVICECOHERENT: + case QUEUEFAMILYCOHERENT: + case WORKGROUPCOHERENT: + case SUBGROUPCOHERENT: + case SHADERCALLCOHERENT: + case NONPRIVATE: + case RESTRICT: + case READONLY: + case WRITEONLY: + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + return es30ReservedFromGLSL(parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store) ? 130 : 420); + case VOLATILE: + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + if (! parseContext.symbolTable.atBuiltInLevel() && (parseContext.isEsProfile() || + (parseContext.version < 420 && ! parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store)))) + reservedWord(); + return keyword; + case PATCH: + if (parseContext.symbolTable.atBuiltInLevel() || + (parseContext.isEsProfile() && + (parseContext.version >= 320 || + parseContext.extensionsTurnedOn(Num_AEP_tessellation_shader, AEP_tessellation_shader))) || + (!parseContext.isEsProfile() && parseContext.extensionTurnedOn(E_GL_ARB_tessellation_shader))) + return keyword; + + return es30ReservedFromGLSL(400); + + case SAMPLE: + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(1, &E_GL_OES_shader_multisample_interpolation)) + return keyword; + return es30ReservedFromGLSL(400); + + case SUBROUTINE: + return es30ReservedFromGLSL(400); +#endif + case SHARED: + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 140)) + return identifierOrType(); + return keyword; + case LAYOUT: + { + const int numLayoutExts = 2; + const char* layoutExts[numLayoutExts] = { E_GL_ARB_shading_language_420pack, + E_GL_ARB_explicit_attrib_location }; + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 140 && + ! parseContext.extensionsTurnedOn(numLayoutExts, layoutExts))) + return identifierOrType(); + return keyword; + } + + case HIGH_PRECISION: + case MEDIUM_PRECISION: + case LOW_PRECISION: + case PRECISION: + return precisionKeyword(); + + case MAT2X2: + case MAT2X3: + case MAT2X4: + case MAT3X2: + case MAT3X3: + case MAT3X4: + case MAT4X2: + case MAT4X3: + case MAT4X4: + return matNxM(); + +#ifndef GLSLANG_WEB + case DMAT2: + case DMAT3: + case DMAT4: + case DMAT2X2: + case DMAT2X3: + case DMAT2X4: + case DMAT3X2: + case DMAT3X3: + case DMAT3X4: + case DMAT4X2: + case DMAT4X3: + case DMAT4X4: + return dMat(); + + case IMAGE1D: + case IIMAGE1D: + case UIMAGE1D: + case IMAGE1DARRAY: + case IIMAGE1DARRAY: + case UIMAGE1DARRAY: + case IMAGE2DRECT: + case IIMAGE2DRECT: + case UIMAGE2DRECT: + afterType = true; + return firstGenerationImage(false); + + case IMAGEBUFFER: + case IIMAGEBUFFER: + case UIMAGEBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return firstGenerationImage(false); + + case IMAGE2D: + case IIMAGE2D: + case UIMAGE2D: + case IMAGE3D: + case IIMAGE3D: + case UIMAGE3D: + case IMAGECUBE: + case IIMAGECUBE: + case UIMAGECUBE: + case IMAGE2DARRAY: + case IIMAGE2DARRAY: + case UIMAGE2DARRAY: + afterType = true; + return firstGenerationImage(true); + + case IMAGECUBEARRAY: + case IIMAGECUBEARRAY: + case UIMAGECUBEARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) + return keyword; + return secondGenerationImage(); + + case IMAGE2DMS: + case IIMAGE2DMS: + case UIMAGE2DMS: + case IMAGE2DMSARRAY: + case IIMAGE2DMSARRAY: + case UIMAGE2DMSARRAY: + afterType = true; + return secondGenerationImage(); + + case DOUBLE: + case DVEC2: + case DVEC3: + case DVEC4: + afterType = true; + if (parseContext.isEsProfile() || parseContext.version < 150 || + (!parseContext.symbolTable.atBuiltInLevel() && + (parseContext.version < 400 && !parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_fp64) && + (parseContext.version < 410 && !parseContext.extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit))))) + reservedWord(); + return keyword; + + case INT64_T: + case UINT64_T: + case I64VEC2: + case I64VEC3: + case I64VEC4: + case U64VEC2: + case U64VEC3: + case U64VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_int64) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int64)) + return keyword; + return identifierOrType(); + + case INT8_T: + case UINT8_T: + case I8VEC2: + case I8VEC3: + case I8VEC4: + case U8VEC2: + case U8VEC3: + case U8VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_8bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int8)) + return keyword; + return identifierOrType(); + + case INT16_T: + case UINT16_T: + case I16VEC2: + case I16VEC3: + case I16VEC4: + case U16VEC2: + case U16VEC3: + case U16VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_int16) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int16)) + return keyword; + return identifierOrType(); + case INT32_T: + case UINT32_T: + case I32VEC2: + case I32VEC3: + case I32VEC4: + case U32VEC2: + case U32VEC3: + case U32VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_int32)) + return keyword; + return identifierOrType(); + case FLOAT32_T: + case F32VEC2: + case F32VEC3: + case F32VEC4: + case F32MAT2: + case F32MAT3: + case F32MAT4: + case F32MAT2X2: + case F32MAT2X3: + case F32MAT2X4: + case F32MAT3X2: + case F32MAT3X3: + case F32MAT3X4: + case F32MAT4X2: + case F32MAT4X3: + case F32MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float32)) + return keyword; + return identifierOrType(); + + case FLOAT64_T: + case F64VEC2: + case F64VEC3: + case F64VEC4: + case F64MAT2: + case F64MAT3: + case F64MAT4: + case F64MAT2X2: + case F64MAT2X3: + case F64MAT2X4: + case F64MAT3X2: + case F64MAT3X3: + case F64MAT3X4: + case F64MAT4X2: + case F64MAT4X3: + case F64MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float64)) + return keyword; + return identifierOrType(); + + case FLOAT16_T: + case F16VEC2: + case F16VEC3: + case F16VEC4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_16bit_storage) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16)) + return keyword; + + return identifierOrType(); + + case F16MAT2: + case F16MAT3: + case F16MAT4: + case F16MAT2X2: + case F16MAT2X3: + case F16MAT2X4: + case F16MAT3X2: + case F16MAT3X3: + case F16MAT3X4: + case F16MAT4X2: + case F16MAT4X3: + case F16MAT4X4: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types) || + parseContext.extensionTurnedOn(E_GL_EXT_shader_explicit_arithmetic_types_float16)) + return keyword; + + return identifierOrType(); + + case SAMPLERCUBEARRAY: + case SAMPLERCUBEARRAYSHADOW: + case ISAMPLERCUBEARRAY: + case USAMPLERCUBEARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_cube_map_array, AEP_texture_cube_map_array)) + return keyword; + if (parseContext.isEsProfile() || (parseContext.version < 400 && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_cube_map_array))) + reservedWord(); + return keyword; + + case TEXTURECUBEARRAY: + case ITEXTURECUBEARRAY: + case UTEXTURECUBEARRAY: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); +#endif + + case UINT: + case UVEC2: + case UVEC3: + case UVEC4: + case SAMPLERCUBESHADOW: + case SAMPLER2DARRAY: + case SAMPLER2DARRAYSHADOW: + case ISAMPLER2D: + case ISAMPLER3D: + case ISAMPLERCUBE: + case ISAMPLER2DARRAY: + case USAMPLER2D: + case USAMPLER3D: + case USAMPLERCUBE: + case USAMPLER2DARRAY: + afterType = true; + return nonreservedKeyword(300, 130); + + case SAMPLER3D: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version < 300) { + if (!parseContext.extensionTurnedOn(E_GL_OES_texture_3D)) + reservedWord(); + } + return keyword; + + case SAMPLER2DSHADOW: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version < 300) { + if (!parseContext.extensionTurnedOn(E_GL_EXT_shadow_samplers)) + reservedWord(); + } + return keyword; + + case TEXTURE2D: + case TEXTURECUBE: + case TEXTURE2DARRAY: + case ITEXTURE2D: + case ITEXTURE3D: + case ITEXTURECUBE: + case ITEXTURE2DARRAY: + case UTEXTURE2D: + case UTEXTURE3D: + case UTEXTURECUBE: + case UTEXTURE2DARRAY: + case TEXTURE3D: + case SAMPLER: + case SAMPLERSHADOW: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + +#ifndef GLSLANG_WEB + case ISAMPLER1D: + case ISAMPLER1DARRAY: + case SAMPLER1DARRAYSHADOW: + case USAMPLER1D: + case USAMPLER1DARRAY: + afterType = true; + return es30ReservedFromGLSL(130); + case ISAMPLER2DRECT: + case USAMPLER2DRECT: + afterType = true; + return es30ReservedFromGLSL(140); + + case SAMPLERBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return es30ReservedFromGLSL(130); + + case ISAMPLERBUFFER: + case USAMPLERBUFFER: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(Num_AEP_texture_buffer, AEP_texture_buffer)) + return keyword; + return es30ReservedFromGLSL(140); + + case SAMPLER2DMS: + case ISAMPLER2DMS: + case USAMPLER2DMS: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version >= 310) + return keyword; + if (!parseContext.isEsProfile() && (parseContext.version > 140 || + (parseContext.version == 140 && parseContext.extensionsTurnedOn(1, &E_GL_ARB_texture_multisample)))) + return keyword; + return es30ReservedFromGLSL(150); + + case SAMPLER2DMSARRAY: + case ISAMPLER2DMSARRAY: + case USAMPLER2DMSARRAY: + afterType = true; + if ((parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionsTurnedOn(1, &E_GL_OES_texture_storage_multisample_2d_array)) + return keyword; + if (!parseContext.isEsProfile() && (parseContext.version > 140 || + (parseContext.version == 140 && parseContext.extensionsTurnedOn(1, &E_GL_ARB_texture_multisample)))) + return keyword; + return es30ReservedFromGLSL(150); + + case SAMPLER1D: + case SAMPLER1DSHADOW: + afterType = true; + if (parseContext.isEsProfile()) + reservedWord(); + return keyword; + + case SAMPLER2DRECT: + case SAMPLER2DRECTSHADOW: + afterType = true; + if (parseContext.isEsProfile()) + reservedWord(); + else if (parseContext.version < 140 && ! parseContext.symbolTable.atBuiltInLevel() && ! parseContext.extensionTurnedOn(E_GL_ARB_texture_rectangle)) { + if (parseContext.relaxedErrors()) + parseContext.requireExtensions(loc, 1, &E_GL_ARB_texture_rectangle, "texture-rectangle sampler keyword"); + else + reservedWord(); + } + return keyword; + + case SAMPLER1DARRAY: + afterType = true; + if (parseContext.isEsProfile() && parseContext.version == 300) + reservedWord(); + else if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < 130)) + return identifierOrType(); + return keyword; + + case SAMPLEREXTERNALOES: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external) || + parseContext.extensionTurnedOn(E_GL_OES_EGL_image_external_essl3)) + return keyword; + return identifierOrType(); + + case SAMPLEREXTERNAL2DY2YEXT: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_EXT_YUV_target)) + return keyword; + return identifierOrType(); + + case ITEXTURE1DARRAY: + case UTEXTURE1D: + case ITEXTURE1D: + case UTEXTURE1DARRAY: + case TEXTUREBUFFER: + case ITEXTURE2DRECT: + case UTEXTURE2DRECT: + case ITEXTUREBUFFER: + case UTEXTUREBUFFER: + case TEXTURE2DMS: + case ITEXTURE2DMS: + case UTEXTURE2DMS: + case TEXTURE2DMSARRAY: + case ITEXTURE2DMSARRAY: + case UTEXTURE2DMSARRAY: + case TEXTURE1D: + case TEXTURE2DRECT: + case TEXTURE1DARRAY: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + + case SUBPASSINPUT: + case SUBPASSINPUTMS: + case ISUBPASSINPUT: + case ISUBPASSINPUTMS: + case USUBPASSINPUT: + case USUBPASSINPUTMS: + if (parseContext.spvVersion.vulkan > 0) + return keyword; + else + return identifierOrType(); + + case F16SAMPLER1D: + case F16SAMPLER2D: + case F16SAMPLER3D: + case F16SAMPLER2DRECT: + case F16SAMPLERCUBE: + case F16SAMPLER1DARRAY: + case F16SAMPLER2DARRAY: + case F16SAMPLERCUBEARRAY: + case F16SAMPLERBUFFER: + case F16SAMPLER2DMS: + case F16SAMPLER2DMSARRAY: + case F16SAMPLER1DSHADOW: + case F16SAMPLER2DSHADOW: + case F16SAMPLER1DARRAYSHADOW: + case F16SAMPLER2DARRAYSHADOW: + case F16SAMPLER2DRECTSHADOW: + case F16SAMPLERCUBESHADOW: + case F16SAMPLERCUBEARRAYSHADOW: + + case F16IMAGE1D: + case F16IMAGE2D: + case F16IMAGE3D: + case F16IMAGE2DRECT: + case F16IMAGECUBE: + case F16IMAGE1DARRAY: + case F16IMAGE2DARRAY: + case F16IMAGECUBEARRAY: + case F16IMAGEBUFFER: + case F16IMAGE2DMS: + case F16IMAGE2DMSARRAY: + + case F16TEXTURE1D: + case F16TEXTURE2D: + case F16TEXTURE3D: + case F16TEXTURE2DRECT: + case F16TEXTURECUBE: + case F16TEXTURE1DARRAY: + case F16TEXTURE2DARRAY: + case F16TEXTURECUBEARRAY: + case F16TEXTUREBUFFER: + case F16TEXTURE2DMS: + case F16TEXTURE2DMSARRAY: + + case F16SUBPASSINPUT: + case F16SUBPASSINPUTMS: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_AMD_gpu_shader_half_float_fetch)) + return keyword; + return identifierOrType(); + + case EXPLICITINTERPAMD: + if (parseContext.extensionTurnedOn(E_GL_AMD_shader_explicit_vertex_parameter)) + return keyword; + return identifierOrType(); + + case PERVERTEXNV: + if ((!parseContext.isEsProfile() && parseContext.version >= 450) || + parseContext.extensionTurnedOn(E_GL_NV_fragment_shader_barycentric)) + return keyword; + return identifierOrType(); + + case PRECISE: + if ((parseContext.isEsProfile() && + (parseContext.version >= 320 || parseContext.extensionsTurnedOn(Num_AEP_gpu_shader5, AEP_gpu_shader5))) || + (!parseContext.isEsProfile() && parseContext.version >= 400)) + return keyword; + if (parseContext.isEsProfile() && parseContext.version == 310) { + reservedWord(); + return keyword; + } + return identifierOrType(); + + case PERPRIMITIVENV: + case PERVIEWNV: + case PERTASKNV: + if ((!parseContext.isEsProfile() && parseContext.version >= 450) || + (parseContext.isEsProfile() && parseContext.version >= 320) || + parseContext.extensionTurnedOn(E_GL_NV_mesh_shader)) + return keyword; + return identifierOrType(); + + case FCOOPMATNV: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_cooperative_matrix)) + return keyword; + return identifierOrType(); + + case UCOOPMATNV: + case ICOOPMATNV: + afterType = true; + if (parseContext.symbolTable.atBuiltInLevel() || + parseContext.extensionTurnedOn(E_GL_NV_integer_cooperative_matrix)) + return keyword; + return identifierOrType(); + + case DEMOTE: + if (parseContext.extensionTurnedOn(E_GL_EXT_demote_to_helper_invocation)) + return keyword; + else + return identifierOrType(); +#endif + + default: + parseContext.infoSink.info.message(EPrefixInternalError, "Unknown glslang keyword", loc); + return 0; + } +} + +int TScanContext::identifierOrType() +{ + parserToken->sType.lex.string = NewPoolTString(tokenText); + if (field) + return IDENTIFIER; + + parserToken->sType.lex.symbol = parseContext.symbolTable.find(*parserToken->sType.lex.string); + if ((afterType == false && afterStruct == false) && parserToken->sType.lex.symbol != nullptr) { + if (const TVariable* variable = parserToken->sType.lex.symbol->getAsVariable()) { + if (variable->isUserType() && + // treat redeclaration of forward-declared buffer/uniform reference as an identifier + !(variable->getType().isReference() && afterBuffer)) { + afterType = true; + + return TYPE_NAME; + } + } + } + + return IDENTIFIER; +} + +// Give an error for use of a reserved symbol. +// However, allow built-in declarations to use reserved words, to allow +// extension support before the extension is enabled. +int TScanContext::reservedWord() +{ + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.error(loc, "Reserved word.", tokenText, "", ""); + + return 0; +} + +int TScanContext::identifierOrReserved(bool reserved) +{ + if (reserved) { + reservedWord(); + + return 0; + } + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future reserved keyword", tokenText, ""); + + return identifierOrType(); +} + +// For keywords that suddenly showed up on non-ES (not previously reserved) +// but then got reserved by ES 3.0. +int TScanContext::es30ReservedFromGLSL(int version) +{ + if (parseContext.symbolTable.atBuiltInLevel()) + return keyword; + + if ((parseContext.isEsProfile() && parseContext.version < 300) || + (!parseContext.isEsProfile() && parseContext.version < version)) { + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "future reserved word in ES 300 and keyword in GLSL", tokenText, ""); + + return identifierOrType(); + } else if (parseContext.isEsProfile() && parseContext.version >= 300) + reservedWord(); + + return keyword; +} + +// For a keyword that was never reserved, until it suddenly +// showed up, both in an es version and a non-ES version. +int TScanContext::nonreservedKeyword(int esVersion, int nonEsVersion) +{ + if ((parseContext.isEsProfile() && parseContext.version < esVersion) || + (!parseContext.isEsProfile() && parseContext.version < nonEsVersion)) { + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future keyword", tokenText, ""); + + return identifierOrType(); + } + + return keyword; +} + +int TScanContext::precisionKeyword() +{ + if (parseContext.isEsProfile() || parseContext.version >= 130) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using ES precision qualifier keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::matNxM() +{ + afterType = true; + + if (parseContext.version > 110) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future non-square matrix type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::dMat() +{ + afterType = true; + + if (parseContext.isEsProfile() && parseContext.version >= 300) { + reservedWord(); + + return keyword; + } + + if (!parseContext.isEsProfile() && (parseContext.version >= 400 || + parseContext.symbolTable.atBuiltInLevel() || + (parseContext.version >= 150 && parseContext.extensionTurnedOn(E_GL_ARB_gpu_shader_fp64)) || + (parseContext.version >= 150 && parseContext.extensionTurnedOn(E_GL_ARB_vertex_attrib_64bit) + && parseContext.language == EShLangVertex))) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::firstGenerationImage(bool inEs310) +{ + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && (parseContext.version >= 420 || + parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store))) || + (inEs310 && parseContext.isEsProfile() && parseContext.version >= 310)) + return keyword; + + if ((parseContext.isEsProfile() && parseContext.version >= 300) || + (!parseContext.isEsProfile() && parseContext.version >= 130)) { + reservedWord(); + + return keyword; + } + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +int TScanContext::secondGenerationImage() +{ + if (parseContext.isEsProfile() && parseContext.version >= 310) { + reservedWord(); + return keyword; + } + + if (parseContext.symbolTable.atBuiltInLevel() || + (!parseContext.isEsProfile() && + (parseContext.version >= 420 || parseContext.extensionTurnedOn(E_GL_ARB_shader_image_load_store)))) + return keyword; + + if (parseContext.isForwardCompatible()) + parseContext.warn(loc, "using future type keyword", tokenText, ""); + + return identifierOrType(); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Scan.h b/src/third_party/glslang/glslang/MachineIndependent/Scan.h new file mode 100644 index 0000000..24b75cf --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Scan.h @@ -0,0 +1,276 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef _GLSLANG_SCAN_INCLUDED_ +#define _GLSLANG_SCAN_INCLUDED_ + +#include "Versions.h" + +namespace glslang { + +// Use a global end-of-input character, so no translation is needed across +// layers of encapsulation. Characters are all 8 bit, and positive, so there is +// no aliasing of character 255 onto -1, for example. +const int EndOfInput = -1; + +// +// A character scanner that seamlessly, on read-only strings, reads across an +// array of strings without assuming null termination. +// +class TInputScanner { +public: + TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr, + int b = 0, int f = 0, bool single = false) : + numSources(n), + // up to this point, common usage is "char*", but now we need positive 8-bit characters + sources(reinterpret_cast(s)), + lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single), + endOfFileReached(false) + { + loc = new TSourceLoc[numSources]; + for (int i = 0; i < numSources; ++i) { + loc[i].init(i - stringBias); + } + if (names != nullptr) { + for (int i = 0; i < numSources; ++i) + loc[i].name = names[i] != nullptr ? NewPoolTString(names[i]) : nullptr; + } + loc[currentSource].line = 1; + logicalSourceLoc.init(1); + logicalSourceLoc.name = loc[0].name; + } + + virtual ~TInputScanner() + { + delete [] loc; + } + + // retrieve the next character and advance one character + int get() + { + int ret = peek(); + if (ret == EndOfInput) + return ret; + ++loc[currentSource].column; + ++logicalSourceLoc.column; + if (ret == '\n') { + ++loc[currentSource].line; + ++logicalSourceLoc.line; + logicalSourceLoc.column = 0; + loc[currentSource].column = 0; + } + advance(); + + return ret; + } + + // retrieve the next character, no advance + int peek() + { + if (currentSource >= numSources) { + endOfFileReached = true; + return EndOfInput; + } + // Make sure we do not read off the end of a string. + // N.B. Sources can have a length of 0. + int sourceToRead = currentSource; + size_t charToRead = currentChar; + while(charToRead >= lengths[sourceToRead]) { + charToRead = 0; + sourceToRead += 1; + if (sourceToRead >= numSources) { + return EndOfInput; + } + } + + // Here, we care about making negative valued characters positive + return sources[sourceToRead][charToRead]; + } + + // go back one character + void unget() + { + // Do not roll back once we've reached the end of the file. + if (endOfFileReached) + return; + + if (currentChar > 0) { + --currentChar; + --loc[currentSource].column; + --logicalSourceLoc.column; + if (loc[currentSource].column < 0) { + // We've moved back past a new line. Find the + // previous newline (or start of the file) to compute + // the column count on the now current line. + size_t chIndex = currentChar; + while (chIndex > 0) { + if (sources[currentSource][chIndex] == '\n') { + break; + } + --chIndex; + } + logicalSourceLoc.column = (int)(currentChar - chIndex); + loc[currentSource].column = (int)(currentChar - chIndex); + } + } else { + do { + --currentSource; + } while (currentSource > 0 && lengths[currentSource] == 0); + if (lengths[currentSource] == 0) { + // set to 0 if we've backed up to the start of an empty string + currentChar = 0; + } else + currentChar = lengths[currentSource] - 1; + } + if (peek() == '\n') { + --loc[currentSource].line; + --logicalSourceLoc.line; + } + } + + // for #line override + void setLine(int newLine) + { + logicalSourceLoc.line = newLine; + loc[getLastValidSourceIndex()].line = newLine; + } + + // for #line override in filename based parsing + void setFile(const char* filename) + { + TString* fn_tstr = NewPoolTString(filename); + logicalSourceLoc.name = fn_tstr; + loc[getLastValidSourceIndex()].name = fn_tstr; + } + + void setFile(const char* filename, int i) + { + TString* fn_tstr = NewPoolTString(filename); + if (i == getLastValidSourceIndex()) { + logicalSourceLoc.name = fn_tstr; + } + loc[i].name = fn_tstr; + } + + void setString(int newString) + { + logicalSourceLoc.string = newString; + loc[getLastValidSourceIndex()].string = newString; + logicalSourceLoc.name = nullptr; + loc[getLastValidSourceIndex()].name = nullptr; + } + + // for #include content indentation + void setColumn(int col) + { + logicalSourceLoc.column = col; + loc[getLastValidSourceIndex()].column = col; + } + + void setEndOfInput() + { + endOfFileReached = true; + currentSource = numSources; + } + + bool atEndOfInput() const { return endOfFileReached; } + + const TSourceLoc& getSourceLoc() const + { + if (singleLogical) { + return logicalSourceLoc; + } else { + return loc[std::max(0, std::min(currentSource, numSources - finale - 1))]; + } + } + // Returns the index (starting from 0) of the most recent valid source string we are reading from. + int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); } + + void consumeWhiteSpace(bool& foundNonSpaceTab); + bool consumeComment(); + void consumeWhitespaceComment(bool& foundNonSpaceTab); + bool scanVersion(int& version, EProfile& profile, bool& notFirstToken); + +protected: + + // advance one character + void advance() + { + ++currentChar; + if (currentChar >= lengths[currentSource]) { + ++currentSource; + if (currentSource < numSources) { + loc[currentSource].string = loc[currentSource - 1].string + 1; + loc[currentSource].line = 1; + loc[currentSource].column = 0; + } + while (currentSource < numSources && lengths[currentSource] == 0) { + ++currentSource; + if (currentSource < numSources) { + loc[currentSource].string = loc[currentSource - 1].string + 1; + loc[currentSource].line = 1; + loc[currentSource].column = 0; + } + } + currentChar = 0; + } + } + + int numSources; // number of strings in source + const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput + const size_t *lengths; // length of each string + int currentSource; + size_t currentChar; + + // This is for reporting what string/line an error occurred on, and can be overridden by #line. + // It remembers the last state of each source string as it is left for the next one, so unget() + // can restore that state. + TSourceLoc* loc; // an array + + int stringBias; // the first string that is the user's string number 0 + int finale; // number of internal strings after user's last string + + TSourceLoc logicalSourceLoc; + bool singleLogical; // treats the strings as a single logical string. + // locations will be reported from the first string. + + // Set to true once peek() returns EndOfFile, so that we won't roll back + // once we've reached EndOfFile. + bool endOfFileReached; +}; + +} // end namespace glslang + +#endif // _GLSLANG_SCAN_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/ScanContext.h b/src/third_party/glslang/glslang/MachineIndependent/ScanContext.h new file mode 100644 index 0000000..74b2b3c --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/ScanContext.h @@ -0,0 +1,93 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This holds context specific to the GLSL scanner, which +// sits between the preprocessor scanner and parser. +// + +#pragma once + +#include "ParseHelper.h" + +namespace glslang { + +class TPpContext; +class TPpToken; +class TParserToken; + +class TScanContext { +public: + explicit TScanContext(TParseContextBase& pc) : + parseContext(pc), + afterType(false), afterStruct(false), + field(false), afterBuffer(false) { } + virtual ~TScanContext() { } + + static void fillInKeywordMap(); + static void deleteKeywordMap(); + + int tokenize(TPpContext*, TParserToken&); + +protected: + TScanContext(TScanContext&); + TScanContext& operator=(TScanContext&); + + int tokenizeIdentifier(); + int identifierOrType(); + int reservedWord(); + int identifierOrReserved(bool reserved); + int es30ReservedFromGLSL(int version); + int nonreservedKeyword(int esVersion, int nonEsVersion); + int precisionKeyword(); + int matNxM(); + int dMat(); + int firstGenerationImage(bool inEs310); + int secondGenerationImage(); + + TParseContextBase& parseContext; + bool afterType; // true if we've recognized a type, so can only be looking for an identifier + bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier + bool field; // true if we're on a field, right after a '.' + bool afterBuffer; // true if we've recognized the BUFFER keyword + TSourceLoc loc; + TParserToken* parserToken; + TPpToken* ppToken; + + const char* tokenText; + int keyword; +}; + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp b/src/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp new file mode 100644 index 0000000..c1a0ba0 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/ShaderLang.cpp @@ -0,0 +1,2146 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// Copyright (C) 2015-2020 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Implement the top-level of interface to the compiler/linker, +// as defined in ShaderLang.h +// This is the platform independent interface between an OGL driver +// and the shading language compiler/linker. +// +#include +#include +#include +#include +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "Scan.h" +#include "ScanContext.h" + +#ifdef ENABLE_HLSL +#include "../HLSL/hlslParseHelper.h" +#include "../HLSL/hlslParseables.h" +#include "../HLSL/hlslScanContext.h" +#endif + +#include "../Include/ShHandle.h" +#include "../../OGLCompilersDLL/InitializeDll.h" + +#include "preprocessor/PpContext.h" + +#define SH_EXPORTING +#include "../Public/ShaderLang.h" +#include "reflection.h" +#include "iomapper.h" +#include "Initialize.h" + +// TODO: this really shouldn't be here, it is only because of the trial addition +// of printing pre-processed tokens, which requires knowing the string literal +// token to print ", but none of that seems appropriate for this file. +#include "preprocessor/PpTokens.h" + +// Build-time generated includes +#include "../build_info.h" + +namespace { // anonymous namespace for file-local functions and symbols + +// Total number of successful initializers of glslang: a refcount +// Shared global; access should be protected by a global mutex/critical section. +int NumberOfClients = 0; + +using namespace glslang; + +// Create a language specific version of parseables. +TBuiltInParseables* CreateBuiltInParseables(TInfoSink& infoSink, EShSource source) +{ + switch (source) { + case EShSourceGlsl: return new TBuiltIns(); // GLSL builtIns +#ifdef ENABLE_HLSL + case EShSourceHlsl: return new TBuiltInParseablesHlsl(); // HLSL intrinsics +#endif + + default: + infoSink.info.message(EPrefixInternalError, "Unable to determine source language"); + return nullptr; + } +} + +// Create a language specific version of a parse context. +TParseContextBase* CreateParseContext(TSymbolTable& symbolTable, TIntermediate& intermediate, + int version, EProfile profile, EShSource source, + EShLanguage language, TInfoSink& infoSink, + SpvVersion spvVersion, bool forwardCompatible, EShMessages messages, + bool parsingBuiltIns, std::string sourceEntryPointName = "") +{ + switch (source) { + case EShSourceGlsl: { + if (sourceEntryPointName.size() == 0) + intermediate.setEntryPointName("main"); + TString entryPoint = sourceEntryPointName.c_str(); + return new TParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion, + language, infoSink, forwardCompatible, messages, &entryPoint); + } +#ifdef ENABLE_HLSL + case EShSourceHlsl: + return new HlslParseContext(symbolTable, intermediate, parsingBuiltIns, version, profile, spvVersion, + language, infoSink, sourceEntryPointName.c_str(), forwardCompatible, messages); +#endif + default: + infoSink.info.message(EPrefixInternalError, "Unable to determine source language"); + return nullptr; + } +} + +// Local mapping functions for making arrays of symbol tables.... + +const int VersionCount = 17; // index range in MapVersionToIndex + +int MapVersionToIndex(int version) +{ + int index = 0; + + switch (version) { + case 100: index = 0; break; + case 110: index = 1; break; + case 120: index = 2; break; + case 130: index = 3; break; + case 140: index = 4; break; + case 150: index = 5; break; + case 300: index = 6; break; + case 330: index = 7; break; + case 400: index = 8; break; + case 410: index = 9; break; + case 420: index = 10; break; + case 430: index = 11; break; + case 440: index = 12; break; + case 310: index = 13; break; + case 450: index = 14; break; + case 500: index = 0; break; // HLSL + case 320: index = 15; break; + case 460: index = 16; break; + default: assert(0); break; + } + + assert(index < VersionCount); + + return index; +} + +const int SpvVersionCount = 3; // index range in MapSpvVersionToIndex + +int MapSpvVersionToIndex(const SpvVersion& spvVersion) +{ + int index = 0; + + if (spvVersion.openGl > 0) + index = 1; + else if (spvVersion.vulkan > 0) + index = 2; + + assert(index < SpvVersionCount); + + return index; +} + +const int ProfileCount = 4; // index range in MapProfileToIndex + +int MapProfileToIndex(EProfile profile) +{ + int index = 0; + + switch (profile) { + case ENoProfile: index = 0; break; + case ECoreProfile: index = 1; break; + case ECompatibilityProfile: index = 2; break; + case EEsProfile: index = 3; break; + default: break; + } + + assert(index < ProfileCount); + + return index; +} + +const int SourceCount = 2; + +int MapSourceToIndex(EShSource source) +{ + int index = 0; + + switch (source) { + case EShSourceGlsl: index = 0; break; + case EShSourceHlsl: index = 1; break; + default: break; + } + + assert(index < SourceCount); + + return index; +} + +// only one of these needed for non-ES; ES needs 2 for different precision defaults of built-ins +enum EPrecisionClass { + EPcGeneral, + EPcFragment, + EPcCount +}; + +// A process-global symbol table per version per profile for built-ins common +// to multiple stages (languages), and a process-global symbol table per version +// per profile per stage for built-ins unique to each stage. They will be sparsely +// populated, so they will only be generated as needed. +// +// Each has a different set of built-ins, and we want to preserve that from +// compile to compile. +// +TSymbolTable* CommonSymbolTable[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EPcCount] = {}; +TSymbolTable* SharedSymbolTables[VersionCount][SpvVersionCount][ProfileCount][SourceCount][EShLangCount] = {}; + +TPoolAllocator* PerProcessGPA = nullptr; + +// +// Parse and add to the given symbol table the content of the given shader string. +// +bool InitializeSymbolTable(const TString& builtIns, int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, + EShSource source, TInfoSink& infoSink, TSymbolTable& symbolTable) +{ + TIntermediate intermediate(language, version, profile); + + intermediate.setSource(source); + + std::unique_ptr parseContext(CreateParseContext(symbolTable, intermediate, version, profile, source, + language, infoSink, spvVersion, true, EShMsgDefault, + true)); + + TShader::ForbidIncluder includer; + TPpContext ppContext(*parseContext, "", includer); + TScanContext scanContext(*parseContext); + parseContext->setScanContext(&scanContext); + parseContext->setPpContext(&ppContext); + + // + // Push the symbol table to give it an initial scope. This + // push should not have a corresponding pop, so that built-ins + // are preserved, and the test for an empty table fails. + // + + symbolTable.push(); + + const char* builtInShaders[2]; + size_t builtInLengths[2]; + builtInShaders[0] = builtIns.c_str(); + builtInLengths[0] = builtIns.size(); + + if (builtInLengths[0] == 0) + return true; + + TInputScanner input(1, builtInShaders, builtInLengths); + if (! parseContext->parseShaderStrings(ppContext, input) != 0) { + infoSink.info.message(EPrefixInternalError, "Unable to parse built-ins"); + printf("Unable to parse built-ins\n%s\n", infoSink.info.c_str()); + printf("%s\n", builtInShaders[0]); + + return false; + } + + return true; +} + +int CommonIndex(EProfile profile, EShLanguage language) +{ + return (profile == EEsProfile && language == EShLangFragment) ? EPcFragment : EPcGeneral; +} + +// +// To initialize per-stage shared tables, with the common table already complete. +// +void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int version, EProfile profile, const SpvVersion& spvVersion, + EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable, + TSymbolTable** symbolTables) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#elif defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + + (*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]); + InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source, + infoSink, *symbolTables[language]); + builtInParseables.identifyBuiltIns(version, profile, spvVersion, language, *symbolTables[language]); + if (profile == EEsProfile && version >= 300) + (*symbolTables[language]).setNoBuiltInRedeclarations(); + if (version == 110) + (*symbolTables[language]).setSeparateNameSpaces(); +} + +// +// Initialize the full set of shareable symbol tables; +// The common (cross-stage) and those shareable per-stage. +// +bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source) +{ +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#elif defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + + std::unique_ptr builtInParseables(CreateBuiltInParseables(infoSink, source)); + + if (builtInParseables == nullptr) + return false; + + builtInParseables->initialize(version, profile, spvVersion); + + // do the common tables + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangVertex, source, + infoSink, *commonTable[EPcGeneral]); + if (profile == EEsProfile) + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, EShLangFragment, source, + infoSink, *commonTable[EPcFragment]); + + // do the per-stage tables + + // always have vertex and fragment + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangVertex, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source, + infoSink, commonTable, symbolTables); + +#ifndef GLSLANG_WEB + // check for tessellation + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) { + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessControl, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTessEvaluation, source, + infoSink, commonTable, symbolTables); + } + + // check for geometry + if ((profile != EEsProfile && version >= 150) || + (profile == EEsProfile && version >= 310)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source, + infoSink, commonTable, symbolTables); + + // check for compute + if ((profile != EEsProfile && version >= 420) || + (profile == EEsProfile && version >= 310)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source, + infoSink, commonTable, symbolTables); + +#ifndef GLSLANG_ANGLE + // check for ray tracing stages + if (profile != EEsProfile && version >= 450) { + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGen, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangIntersect, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangAnyHit, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangClosestHit, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMiss, source, + infoSink, commonTable, symbolTables); + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCallable, source, + infoSink, commonTable, symbolTables); + } + + // check for mesh + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangMeshNV, source, + infoSink, commonTable, symbolTables); + + // check for task + if ((profile != EEsProfile && version >= 450) || + (profile == EEsProfile && version >= 320)) + InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source, + infoSink, commonTable, symbolTables); +#endif // !GLSLANG_ANGLE +#endif // !GLSLANG_WEB + + return true; +} + +bool AddContextSpecificSymbols(const TBuiltInResource* resources, TInfoSink& infoSink, TSymbolTable& symbolTable, int version, + EProfile profile, const SpvVersion& spvVersion, EShLanguage language, EShSource source) +{ + std::unique_ptr builtInParseables(CreateBuiltInParseables(infoSink, source)); + + if (builtInParseables == nullptr) + return false; + + builtInParseables->initialize(*resources, version, profile, spvVersion, language); + InitializeSymbolTable(builtInParseables->getCommonString(), version, profile, spvVersion, language, source, infoSink, symbolTable); + builtInParseables->identifyBuiltIns(version, profile, spvVersion, language, symbolTable, *resources); + + return true; +} + +// +// To do this on the fly, we want to leave the current state of our thread's +// pool allocator intact, so: +// - Switch to a new pool for parsing the built-ins +// - Do the parsing, which builds the symbol table, using the new pool +// - Switch to the process-global pool to save a copy of the resulting symbol table +// - Free up the new pool used to parse the built-ins +// - Switch back to the original thread's pool +// +// This only gets done the first time any thread needs a particular symbol table +// (lazy evaluation). +// +void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& spvVersion, EShSource source) +{ + TInfoSink infoSink; + + // Make sure only one thread tries to do this at a time + glslang::GetGlobalLock(); + + // See if it's already been done for this version/profile combination + int versionIndex = MapVersionToIndex(version); + int spvVersionIndex = MapSpvVersionToIndex(spvVersion); + int profileIndex = MapProfileToIndex(profile); + int sourceIndex = MapSourceToIndex(source); + if (CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][EPcGeneral]) { + glslang::ReleaseGlobalLock(); + + return; + } + + // Switch to a new pool + TPoolAllocator& previousAllocator = GetThreadPoolAllocator(); + TPoolAllocator* builtInPoolAllocator = new TPoolAllocator; + SetThreadPoolAllocator(builtInPoolAllocator); + + // Dynamically allocate the local symbol tables so we can control when they are deallocated WRT when the pool is popped. + TSymbolTable* commonTable[EPcCount]; + TSymbolTable* stageTables[EShLangCount]; + for (int precClass = 0; precClass < EPcCount; ++precClass) + commonTable[precClass] = new TSymbolTable; + for (int stage = 0; stage < EShLangCount; ++stage) + stageTables[stage] = new TSymbolTable; + + // Generate the local symbol tables using the new pool + InitializeSymbolTables(infoSink, commonTable, stageTables, version, profile, spvVersion, source); + + // Switch to the process-global pool + SetThreadPoolAllocator(PerProcessGPA); + + // Copy the local symbol tables from the new pool to the global tables using the process-global pool + for (int precClass = 0; precClass < EPcCount; ++precClass) { + if (! commonTable[precClass]->isEmpty()) { + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass] = new TSymbolTable; + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->copyTable(*commonTable[precClass]); + CommonSymbolTable[versionIndex][spvVersionIndex][profileIndex][sourceIndex][precClass]->readOnly(); + } + } + for (int stage = 0; stage < EShLangCount; ++stage) { + if (! stageTables[stage]->isEmpty()) { + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage] = new TSymbolTable; + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->adoptLevels(*CommonSymbolTable + [versionIndex][spvVersionIndex][profileIndex][sourceIndex][CommonIndex(profile, (EShLanguage)stage)]); + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->copyTable(*stageTables[stage]); + SharedSymbolTables[versionIndex][spvVersionIndex][profileIndex][sourceIndex][stage]->readOnly(); + } + } + + // Clean up the local tables before deleting the pool they used. + for (int precClass = 0; precClass < EPcCount; ++precClass) + delete commonTable[precClass]; + for (int stage = 0; stage < EShLangCount; ++stage) + delete stageTables[stage]; + + delete builtInPoolAllocator; + SetThreadPoolAllocator(&previousAllocator); + + glslang::ReleaseGlobalLock(); +} + +// Function to Print all builtins +void DumpBuiltinSymbolTable(TInfoSink& infoSink, const TSymbolTable& symbolTable) +{ +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + infoSink.debug << "BuiltinSymbolTable {\n"; + + symbolTable.dump(infoSink, true); + + infoSink.debug << "}\n"; +#endif +} + +// Return true if the shader was correctly specified for version/profile/stage. +bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion, + EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion) +{ + const int FirstProfileVersion = 150; + bool correct = true; + + if (source == EShSourceHlsl) { + version = 500; // shader model; currently a characteristic of glslang, not the input + profile = ECoreProfile; // allow doubles in prototype parsing + return correct; + } + + // Get a version... + if (version == 0) { + version = defaultVersion; + // infoSink.info.message(EPrefixWarning, "#version: statement missing; use #version on first line of shader"); + } + + // Get a good profile... + if (profile == ENoProfile) { + if (version == 300 || version == 310 || version == 320) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 require specifying the 'es' profile"); + profile = EEsProfile; + } else if (version == 100) + profile = EEsProfile; + else if (version >= FirstProfileVersion) + profile = ECoreProfile; + else + profile = ENoProfile; + } else { + // a profile was provided... + if (version < 150) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions before 150 do not allow a profile token"); + if (version == 100) + profile = EEsProfile; + else + profile = ENoProfile; + } else if (version == 300 || version == 310 || version == 320) { + if (profile != EEsProfile) { + correct = false; + infoSink.info.message(EPrefixError, "#version: versions 300, 310, and 320 support only the es profile"); + } + profile = EEsProfile; + } else { + if (profile == EEsProfile) { + correct = false; + infoSink.info.message(EPrefixError, "#version: only version 300, 310, and 320 support the es profile"); + if (version >= FirstProfileVersion) + profile = ECoreProfile; + else + profile = ENoProfile; + } + // else: typical desktop case... e.g., "#version 410 core" + } + } + + // Fix version... + switch (version) { + // ES versions + case 100: break; + case 300: break; + case 310: break; + case 320: break; + + // desktop versions + case 110: break; + case 120: break; + case 130: break; + case 140: break; + case 150: break; + case 330: break; + case 400: break; + case 410: break; + case 420: break; + case 430: break; + case 440: break; + case 450: break; + case 460: break; + + // unknown version + default: + correct = false; + infoSink.info.message(EPrefixError, "version not supported"); + if (profile == EEsProfile) + version = 310; + else { + version = 450; + profile = ECoreProfile; + } + break; + } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + // Correct for stage type... + switch (stage) { + case EShLangGeometry: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 150)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: geometry shaders require es profile with version 310 or non-es profile with version 150 or above"); + version = (profile == EEsProfile) ? 310 : 150; + if (profile == EEsProfile || profile == ENoProfile) + profile = ECoreProfile; + } + break; + case EShLangTessControl: + case EShLangTessEvaluation: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 150)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: tessellation shaders require es profile with version 310 or non-es profile with version 150 or above"); + version = (profile == EEsProfile) ? 310 : 400; // 150 supports the extension, correction is to 400 which does not + if (profile == EEsProfile || profile == ENoProfile) + profile = ECoreProfile; + } + break; + case EShLangCompute: + if ((profile == EEsProfile && version < 310) || + (profile != EEsProfile && version < 420)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: compute shaders require es profile with version 310 or above, or non-es profile with version 420 or above"); + version = profile == EEsProfile ? 310 : 420; + } + break; + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (profile == EEsProfile || version < 460) { + correct = false; + infoSink.info.message(EPrefixError, "#version: ray tracing shaders require non-es profile with version 460 or above"); + version = 460; + } + break; + case EShLangMeshNV: + case EShLangTaskNV: + if ((profile == EEsProfile && version < 320) || + (profile != EEsProfile && version < 450)) { + correct = false; + infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above"); + version = profile == EEsProfile ? 320 : 450; + } + default: + break; + } + + if (profile == EEsProfile && version >= 300 && versionNotFirst) { + correct = false; + infoSink.info.message(EPrefixError, "#version: statement must appear first in es-profile shader; before comments or newlines"); + } + + // Check for SPIR-V compatibility + if (spvVersion.spv != 0) { + switch (profile) { + case EEsProfile: + if (version < 310) { + correct = false; + infoSink.info.message(EPrefixError, "#version: ES shaders for SPIR-V require version 310 or higher"); + version = 310; + } + break; + case ECompatibilityProfile: + infoSink.info.message(EPrefixError, "#version: compilation for SPIR-V does not support the compatibility profile"); + break; + default: + if (spvVersion.vulkan > 0 && version < 140) { + correct = false; + infoSink.info.message(EPrefixError, "#version: Desktop shaders for Vulkan SPIR-V require version 140 or higher"); + version = 140; + } + if (spvVersion.openGl >= 100 && version < 330) { + correct = false; + infoSink.info.message(EPrefixError, "#version: Desktop shaders for OpenGL SPIR-V require version 330 or higher"); + version = 330; + } + break; + } + } +#endif + + return correct; +} + +// There are multiple paths in for setting environment stuff. +// TEnvironment takes precedence, for what it sets, so sort all this out. +// Ideally, the internal code could be made to use TEnvironment, but for +// now, translate it to the historically used parameters. +void TranslateEnvironment(const TEnvironment* environment, EShMessages& messages, EShSource& source, + EShLanguage& stage, SpvVersion& spvVersion) +{ + // Set up environmental defaults, first ignoring 'environment'. + if (messages & EShMsgSpvRules) + spvVersion.spv = EShTargetSpv_1_0; + if (messages & EShMsgVulkanRules) { + spvVersion.vulkan = EShTargetVulkan_1_0; + spvVersion.vulkanGlsl = 100; + } else if (spvVersion.spv != 0) + spvVersion.openGl = 100; + + // Now, override, based on any content set in 'environment'. + // 'environment' must be cleared to ESh*None settings when items + // are not being set. + if (environment != nullptr) { + // input language + if (environment->input.languageFamily != EShSourceNone) { + stage = environment->input.stage; + switch (environment->input.dialect) { + case EShClientNone: + break; + case EShClientVulkan: + spvVersion.vulkanGlsl = environment->input.dialectVersion; + break; + case EShClientOpenGL: + spvVersion.openGl = environment->input.dialectVersion; + break; + case EShClientCount: + assert(0); + break; + } + switch (environment->input.languageFamily) { + case EShSourceNone: + break; + case EShSourceGlsl: + source = EShSourceGlsl; + messages = static_cast(messages & ~EShMsgReadHlsl); + break; + case EShSourceHlsl: + source = EShSourceHlsl; + messages = static_cast(messages | EShMsgReadHlsl); + break; + case EShSourceCount: + assert(0); + break; + } + } + + // client + switch (environment->client.client) { + case EShClientVulkan: + spvVersion.vulkan = environment->client.version; + break; + default: + break; + } + + // generated code + switch (environment->target.language) { + case EshTargetSpv: + spvVersion.spv = environment->target.version; + break; + default: + break; + } + } +} + +// Most processes are recorded when set in the intermediate representation, +// These are the few that are not. +void RecordProcesses(TIntermediate& intermediate, EShMessages messages, const std::string& sourceEntryPointName) +{ + if ((messages & EShMsgRelaxedErrors) != 0) + intermediate.addProcess("relaxed-errors"); + if ((messages & EShMsgSuppressWarnings) != 0) + intermediate.addProcess("suppress-warnings"); + if ((messages & EShMsgKeepUncalled) != 0) + intermediate.addProcess("keep-uncalled"); + if (sourceEntryPointName.size() > 0) { + intermediate.addProcess("source-entrypoint"); + intermediate.addProcessArgument(sourceEntryPointName); + } +} + +// This is the common setup and cleanup code for PreprocessDeferred and +// CompileDeferred. +// It takes any callable with a signature of +// bool (TParseContextBase& parseContext, TPpContext& ppContext, +// TInputScanner& input, bool versionWillBeError, +// TSymbolTable& , TIntermediate& , +// EShOptimizationLevel , EShMessages ); +// Which returns false if a failure was detected and true otherwise. +// +template +bool ProcessDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* customPreamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop; this is the GLSL version, not SPIR-V or Vulkan + EProfile defaultProfile, + // set version/profile to defaultVersion/defaultProfile regardless of the #version + // directive in the source code + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TIntermediate& intermediate, // returned tree, etc. + ProcessingContext& processingContext, + bool requireNonempty, + TShader::Includer& includer, + const std::string sourceEntryPointName = "", + const TEnvironment* environment = nullptr) // optional way of fully setting all versions, overriding the above +{ + // This must be undone (.pop()) by the caller, after it finishes consuming the created tree. + GetThreadPoolAllocator().push(); + + if (numStrings == 0) + return true; + + // Move to length-based strings, rather than null-terminated strings. + // Also, add strings to include the preamble and to ensure the shader is not null, + // which lets the grammar accept what was a null (post preprocessing) shader. + // + // Shader will look like + // string 0: system preamble + // string 1: custom preamble + // string 2...numStrings+1: user's shader + // string numStrings+2: "int;" + const int numPre = 2; + const int numPost = requireNonempty? 1 : 0; + const int numTotal = numPre + numStrings + numPost; + std::unique_ptr lengths(new size_t[numTotal]); + std::unique_ptr strings(new const char*[numTotal]); + std::unique_ptr names(new const char*[numTotal]); + for (int s = 0; s < numStrings; ++s) { + strings[s + numPre] = shaderStrings[s]; + if (inputLengths == nullptr || inputLengths[s] < 0) + lengths[s + numPre] = strlen(shaderStrings[s]); + else + lengths[s + numPre] = inputLengths[s]; + } + if (stringNames != nullptr) { + for (int s = 0; s < numStrings; ++s) + names[s + numPre] = stringNames[s]; + } else { + for (int s = 0; s < numStrings; ++s) + names[s + numPre] = nullptr; + } + + // Get all the stages, languages, clients, and other environment + // stuff sorted out. + EShSource sourceGuess = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl; + SpvVersion spvVersion; + EShLanguage stage = compiler->getLanguage(); + TranslateEnvironment(environment, messages, sourceGuess, stage, spvVersion); +#ifdef ENABLE_HLSL + EShSource source = sourceGuess; + if (environment != nullptr && environment->target.hlslFunctionality1) + intermediate.setHlslFunctionality1(); +#else + const EShSource source = EShSourceGlsl; +#endif + // First, without using the preprocessor or parser, find the #version, so we know what + // symbol tables, processing rules, etc. to set up. This does not need the extra strings + // outlined above, just the user shader, after the system and user preambles. + glslang::TInputScanner userInput(numStrings, &strings[numPre], &lengths[numPre]); + int version = 0; + EProfile profile = ENoProfile; + bool versionNotFirstToken = false; + bool versionNotFirst = (source == EShSourceHlsl) + ? true + : userInput.scanVersion(version, profile, versionNotFirstToken); + bool versionNotFound = version == 0; + if (forceDefaultVersionAndProfile && source == EShSourceGlsl) { +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound && + (version != defaultVersion || profile != defaultProfile)) { + compiler->infoSink.info << "Warning, (version, profile) forced to be (" + << defaultVersion << ", " << ProfileName(defaultProfile) + << "), while in source code it is (" + << version << ", " << ProfileName(profile) << ")\n"; + } +#endif + if (versionNotFound) { + versionNotFirstToken = false; + versionNotFirst = false; + versionNotFound = false; + } + version = defaultVersion; + profile = defaultProfile; + } + + bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage, + versionNotFirst, defaultVersion, source, version, profile, spvVersion); +#ifdef GLSLANG_WEB + profile = EEsProfile; + version = 310; +#elif defined(GLSLANG_ANGLE) + profile = ECoreProfile; + version = 450; +#endif + + bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst)); +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + bool warnVersionNotFirst = false; + if (! versionWillBeError && versionNotFirstToken) { + if (messages & EShMsgRelaxedErrors) + warnVersionNotFirst = true; + else + versionWillBeError = true; + } +#endif + + intermediate.setSource(source); + intermediate.setVersion(version); + intermediate.setProfile(profile); + intermediate.setSpv(spvVersion); + RecordProcesses(intermediate, messages, sourceEntryPointName); + if (spvVersion.vulkan > 0) + intermediate.setOriginUpperLeft(); +#ifdef ENABLE_HLSL + if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl) + intermediate.setHlslOffsets(); +#endif + if (messages & EShMsgDebugInfo) { + intermediate.setSourceFile(names[numPre]); + for (int s = 0; s < numStrings; ++s) { + // The string may not be null-terminated, so make sure we provide + // the length along with the string. + intermediate.addSourceText(strings[numPre + s], lengths[numPre + s]); + } + } + SetupBuiltinSymbolTable(version, profile, spvVersion, source); + + TSymbolTable* cachedTable = SharedSymbolTables[MapVersionToIndex(version)] + [MapSpvVersionToIndex(spvVersion)] + [MapProfileToIndex(profile)] + [MapSourceToIndex(source)] + [stage]; + + // Dynamically allocate the symbol table so we can control when it is deallocated WRT the pool. + std::unique_ptr symbolTable(new TSymbolTable); + if (cachedTable) + symbolTable->adoptLevels(*cachedTable); + + // Add built-in symbols that are potentially context dependent; + // they get popped again further down. + if (! AddContextSpecificSymbols(resources, compiler->infoSink, *symbolTable, version, profile, spvVersion, + stage, source)) { + return false; + } + + if (messages & EShMsgBuiltinSymbolTable) + DumpBuiltinSymbolTable(compiler->infoSink, *symbolTable); + + // + // Now we can process the full shader under proper symbols and rules. + // + + std::unique_ptr parseContext(CreateParseContext(*symbolTable, intermediate, version, profile, source, + stage, compiler->infoSink, + spvVersion, forwardCompatible, messages, false, sourceEntryPointName)); + TPpContext ppContext(*parseContext, names[numPre] ? names[numPre] : "", includer); + + // only GLSL (bison triggered, really) needs an externally set scan context + glslang::TScanContext scanContext(*parseContext); + if (source == EShSourceGlsl) + parseContext->setScanContext(&scanContext); + + parseContext->setPpContext(&ppContext); + parseContext->setLimits(*resources); + if (! goodVersion) + parseContext->addError(); +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + if (warnVersionNotFirst) { + TSourceLoc loc; + loc.init(); + parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", ""); + } +#endif + + parseContext->initializeExtensionBehavior(); + + // Fill in the strings as outlined above. + std::string preamble; + parseContext->getPreamble(preamble); + strings[0] = preamble.c_str(); + lengths[0] = strlen(strings[0]); + names[0] = nullptr; + strings[1] = customPreamble; + lengths[1] = strlen(strings[1]); + names[1] = nullptr; + assert(2 == numPre); + if (requireNonempty) { + const int postIndex = numStrings + numPre; + strings[postIndex] = "\n int;"; + lengths[postIndex] = strlen(strings[numStrings + numPre]); + names[postIndex] = nullptr; + } + TInputScanner fullInput(numStrings + numPre + numPost, strings.get(), lengths.get(), names.get(), numPre, numPost); + + // Push a new symbol allocation scope that will get used for the shader's globals. + symbolTable->push(); + + bool success = processingContext(*parseContext, ppContext, fullInput, + versionWillBeError, *symbolTable, + intermediate, optLevel, messages); + return success; +} + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +// Responsible for keeping track of the most recent source string and line in +// the preprocessor and outputting newlines appropriately if the source string +// or line changes. +class SourceLineSynchronizer { +public: + SourceLineSynchronizer(const std::function& lastSourceIndex, + std::string* output) + : getLastSourceIndex(lastSourceIndex), output(output), lastSource(-1), lastLine(0) {} +// SourceLineSynchronizer(const SourceLineSynchronizer&) = delete; +// SourceLineSynchronizer& operator=(const SourceLineSynchronizer&) = delete; + + // Sets the internally tracked source string index to that of the most + // recently read token. If we switched to a new source string, returns + // true and inserts a newline. Otherwise, returns false and outputs nothing. + bool syncToMostRecentString() { + if (getLastSourceIndex() != lastSource) { + // After switching to a new source string, we need to reset lastLine + // because line number resets every time a new source string is + // used. We also need to output a newline to separate the output + // from the previous source string (if there is one). + if (lastSource != -1 || lastLine != 0) + *output += '\n'; + lastSource = getLastSourceIndex(); + lastLine = -1; + return true; + } + return false; + } + + // Calls syncToMostRecentString() and then sets the internally tracked line + // number to tokenLine. If we switched to a new line, returns true and inserts + // newlines appropriately. Otherwise, returns false and outputs nothing. + bool syncToLine(int tokenLine) { + syncToMostRecentString(); + const bool newLineStarted = lastLine < tokenLine; + for (; lastLine < tokenLine; ++lastLine) { + if (lastLine > 0) *output += '\n'; + } + return newLineStarted; + } + + // Sets the internally tracked line number to newLineNum. + void setLineNum(int newLineNum) { lastLine = newLineNum; } + +private: + SourceLineSynchronizer& operator=(const SourceLineSynchronizer&); + + // A function for getting the index of the last valid source string we've + // read tokens from. + const std::function getLastSourceIndex; + // output string for newlines. + std::string* output; + // lastSource is the source string index (starting from 0) of the last token + // processed. It is tracked in order for newlines to be inserted when a new + // source string starts. -1 means we haven't started processing any source + // string. + int lastSource; + // lastLine is the line number (starting from 1) of the last token processed. + // It is tracked in order for newlines to be inserted when a token appears + // on a new line. 0 means we haven't started processing any line in the + // current source string. + int lastLine; +}; + +// DoPreprocessing is a valid ProcessingContext template argument, +// which only performs the preprocessing step of compilation. +// It places the result in the "string" argument to its constructor. +// +// This is not an officially supported or fully working path. +struct DoPreprocessing { + explicit DoPreprocessing(std::string* string): outputString(string) {} + bool operator()(TParseContextBase& parseContext, TPpContext& ppContext, + TInputScanner& input, bool versionWillBeError, + TSymbolTable&, TIntermediate&, + EShOptimizationLevel, EShMessages) + { + // This is a list of tokens that do not require a space before or after. + static const std::string unNeededSpaceTokens = ";()[]"; + static const std::string noSpaceBeforeTokens = ","; + glslang::TPpToken ppToken; + + parseContext.setScanner(&input); + ppContext.setInput(input, versionWillBeError); + + std::string outputBuffer; + SourceLineSynchronizer lineSync( + std::bind(&TInputScanner::getLastValidSourceIndex, &input), &outputBuffer); + + parseContext.setExtensionCallback([&lineSync, &outputBuffer]( + int line, const char* extension, const char* behavior) { + lineSync.syncToLine(line); + outputBuffer += "#extension "; + outputBuffer += extension; + outputBuffer += " : "; + outputBuffer += behavior; + }); + + parseContext.setLineCallback([&lineSync, &outputBuffer, &parseContext]( + int curLineNum, int newLineNum, bool hasSource, int sourceNum, const char* sourceName) { + // SourceNum is the number of the source-string that is being parsed. + lineSync.syncToLine(curLineNum); + outputBuffer += "#line "; + outputBuffer += std::to_string(newLineNum); + if (hasSource) { + outputBuffer += ' '; + if (sourceName != nullptr) { + outputBuffer += '\"'; + outputBuffer += sourceName; + outputBuffer += '\"'; + } else { + outputBuffer += std::to_string(sourceNum); + } + } + if (parseContext.lineDirectiveShouldSetNextLine()) { + // newLineNum is the new line number for the line following the #line + // directive. So the new line number for the current line is + newLineNum -= 1; + } + outputBuffer += '\n'; + // And we are at the next line of the #line directive now. + lineSync.setLineNum(newLineNum + 1); + }); + + parseContext.setVersionCallback( + [&lineSync, &outputBuffer](int line, int version, const char* str) { + lineSync.syncToLine(line); + outputBuffer += "#version "; + outputBuffer += std::to_string(version); + if (str) { + outputBuffer += ' '; + outputBuffer += str; + } + }); + + parseContext.setPragmaCallback([&lineSync, &outputBuffer]( + int line, const glslang::TVector& ops) { + lineSync.syncToLine(line); + outputBuffer += "#pragma "; + for(size_t i = 0; i < ops.size(); ++i) { + outputBuffer += ops[i].c_str(); + } + }); + + parseContext.setErrorCallback([&lineSync, &outputBuffer]( + int line, const char* errorMessage) { + lineSync.syncToLine(line); + outputBuffer += "#error "; + outputBuffer += errorMessage; + }); + + int lastToken = EndOfInput; // lastToken records the last token processed. + do { + int token = ppContext.tokenize(ppToken); + if (token == EndOfInput) + break; + + bool isNewString = lineSync.syncToMostRecentString(); + bool isNewLine = lineSync.syncToLine(ppToken.loc.line); + + if (isNewLine) { + // Don't emit whitespace onto empty lines. + // Copy any whitespace characters at the start of a line + // from the input to the output. + outputBuffer += std::string(ppToken.loc.column - 1, ' '); + } + + // Output a space in between tokens, but not at the start of a line, + // and also not around special tokens. This helps with readability + // and consistency. + if (!isNewString && !isNewLine && lastToken != EndOfInput && + (unNeededSpaceTokens.find((char)token) == std::string::npos) && + (unNeededSpaceTokens.find((char)lastToken) == std::string::npos) && + (noSpaceBeforeTokens.find((char)token) == std::string::npos)) { + outputBuffer += ' '; + } + lastToken = token; + if (token == PpAtomConstString) + outputBuffer += "\""; + outputBuffer += ppToken.name; + if (token == PpAtomConstString) + outputBuffer += "\""; + } while (true); + outputBuffer += '\n'; + *outputString = std::move(outputBuffer); + + bool success = true; + if (parseContext.getNumErrors() > 0) { + success = false; + parseContext.infoSink.info.prefix(EPrefixError); + parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n"; + } + return success; + } + std::string* outputString; +}; + +#endif + +// DoFullParse is a valid ProcessingConext template argument for fully +// parsing the shader. It populates the "intermediate" with the AST. +struct DoFullParse{ + bool operator()(TParseContextBase& parseContext, TPpContext& ppContext, + TInputScanner& fullInput, bool versionWillBeError, + TSymbolTable&, TIntermediate& intermediate, + EShOptimizationLevel optLevel, EShMessages messages) + { + bool success = true; + // Parse the full shader. + if (! parseContext.parseShaderStrings(ppContext, fullInput, versionWillBeError)) + success = false; + + if (success && intermediate.getTreeRoot()) { + if (optLevel == EShOptNoGeneration) + parseContext.infoSink.info.message(EPrefixNone, "No errors. No code generation or linking was requested."); + else + success = intermediate.postProcess(intermediate.getTreeRoot(), parseContext.getLanguage()); + } else if (! success) { + parseContext.infoSink.info.prefix(EPrefixError); + parseContext.infoSink.info << parseContext.getNumErrors() << " compilation errors. No code generated.\n\n"; + } + +#ifndef GLSLANG_ANGLE + if (messages & EShMsgAST) + intermediate.output(parseContext.infoSink, true); +#endif + + return success; + } +}; + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) +// Take a single compilation unit, and run the preprocessor on it. +// Return: True if there were no issues found in preprocessing, +// False if during preprocessing any unknown version, pragmas or +// extensions were found. +// +// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string +// is not an officially supported or fully working path. +bool PreprocessDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* preamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop + EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TShader::Includer& includer, + TIntermediate& intermediate, // returned tree, etc. + std::string* outputString) +{ + DoPreprocessing parser(outputString); + return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames, + preamble, optLevel, resources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, intermediate, parser, + false, includer); +} +#endif + +// +// do a partial compile on the given strings for a single compilation unit +// for a potential deferred link into a single stage (and deferred full compile of that +// stage through machine-dependent compilation). +// +// all preprocessing, parsing, semantic checks, etc. for a single compilation unit +// are done here. +// +// return: the tree and other information is filled into the intermediate argument, +// and true is returned by the function for success. +// +bool CompileDeferred( + TCompiler* compiler, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const char* const stringNames[], + const char* preamble, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int defaultVersion, // use 100 for ES environment, 110 for desktop + EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages, // warnings/errors/AST; things to print out + TIntermediate& intermediate,// returned tree, etc. + TShader::Includer& includer, + const std::string sourceEntryPointName = "", + TEnvironment* environment = nullptr) +{ + DoFullParse parser; + return ProcessDeferred(compiler, shaderStrings, numStrings, inputLengths, stringNames, + preamble, optLevel, resources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, intermediate, parser, + true, includer, sourceEntryPointName, environment); +} + +} // end anonymous namespace for local functions + +// +// ShInitialize() should be called exactly once per process, not per thread. +// +int ShInitialize() +{ + glslang::InitGlobalLock(); + + if (! InitProcess()) + return 0; + + glslang::GetGlobalLock(); + ++NumberOfClients; + glslang::ReleaseGlobalLock(); + + if (PerProcessGPA == nullptr) + PerProcessGPA = new TPoolAllocator(); + + glslang::TScanContext::fillInKeywordMap(); +#ifdef ENABLE_HLSL + glslang::HlslScanContext::fillInKeywordMap(); +#endif + + return 1; +} + +// +// Driver calls these to create and destroy compiler/linker +// objects. +// + +ShHandle ShConstructCompiler(const EShLanguage language, int debugOptions) +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructCompiler(language, debugOptions)); + + return reinterpret_cast(base); +} + +ShHandle ShConstructLinker(const EShExecutable executable, int debugOptions) +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructLinker(executable, debugOptions)); + + return reinterpret_cast(base); +} + +ShHandle ShConstructUniformMap() +{ + if (!InitThread()) + return 0; + + TShHandleBase* base = static_cast(ConstructUniformMap()); + + return reinterpret_cast(base); +} + +void ShDestruct(ShHandle handle) +{ + if (handle == 0) + return; + + TShHandleBase* base = static_cast(handle); + + if (base->getAsCompiler()) + DeleteCompiler(base->getAsCompiler()); + else if (base->getAsLinker()) + DeleteLinker(base->getAsLinker()); + else if (base->getAsUniformMap()) + DeleteUniformMap(base->getAsUniformMap()); +} + +// +// Cleanup symbol tables +// +int ShFinalize() +{ + glslang::GetGlobalLock(); + --NumberOfClients; + assert(NumberOfClients >= 0); + bool finalize = NumberOfClients == 0; + glslang::ReleaseGlobalLock(); + if (! finalize) + return 1; + + for (int version = 0; version < VersionCount; ++version) { + for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) { + for (int p = 0; p < ProfileCount; ++p) { + for (int source = 0; source < SourceCount; ++source) { + for (int stage = 0; stage < EShLangCount; ++stage) { + delete SharedSymbolTables[version][spvVersion][p][source][stage]; + SharedSymbolTables[version][spvVersion][p][source][stage] = 0; + } + } + } + } + } + + for (int version = 0; version < VersionCount; ++version) { + for (int spvVersion = 0; spvVersion < SpvVersionCount; ++spvVersion) { + for (int p = 0; p < ProfileCount; ++p) { + for (int source = 0; source < SourceCount; ++source) { + for (int pc = 0; pc < EPcCount; ++pc) { + delete CommonSymbolTable[version][spvVersion][p][source][pc]; + CommonSymbolTable[version][spvVersion][p][source][pc] = 0; + } + } + } + } + } + + if (PerProcessGPA != nullptr) { + delete PerProcessGPA; + PerProcessGPA = nullptr; + } + + glslang::TScanContext::deleteKeywordMap(); +#ifdef ENABLE_HLSL + glslang::HlslScanContext::deleteKeywordMap(); +#endif + + return 1; +} + +// +// Do a full compile on the given strings for a single compilation unit +// forming a complete stage. The result of the machine dependent compilation +// is left in the provided compile object. +// +// Return: The return value is really boolean, indicating +// success (1) or failure (0). +// +int ShCompile( + const ShHandle handle, + const char* const shaderStrings[], + const int numStrings, + const int* inputLengths, + const EShOptimizationLevel optLevel, + const TBuiltInResource* resources, + int /*debugOptions*/, + int defaultVersion, // use 100 for ES environment, 110 for desktop + bool forwardCompatible, // give errors for use of deprecated features + EShMessages messages // warnings/errors/AST; things to print out + ) +{ + // Map the generic handle to the C++ object + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TCompiler* compiler = base->getAsCompiler(); + if (compiler == 0) + return 0; + + SetThreadPoolAllocator(compiler->getPool()); + + compiler->infoSink.info.erase(); + compiler->infoSink.debug.erase(); + + TIntermediate intermediate(compiler->getLanguage()); + TShader::ForbidIncluder includer; + bool success = CompileDeferred(compiler, shaderStrings, numStrings, inputLengths, nullptr, + "", optLevel, resources, defaultVersion, ENoProfile, false, + forwardCompatible, messages, intermediate, includer); + + // + // Call the machine dependent compiler + // + if (success && intermediate.getTreeRoot() && optLevel != EShOptNoGeneration) + success = compiler->compile(intermediate.getTreeRoot(), intermediate.getVersion(), intermediate.getProfile()); + + intermediate.removeTree(); + + // Throw away all the temporary memory used by the compilation process. + // The push was done in the CompileDeferred() call above. + GetThreadPoolAllocator().pop(); + + return success ? 1 : 0; +} + +// +// Link the given compile objects. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShLinkExt( + const ShHandle linkHandle, + const ShHandle compHandles[], + const int numHandles) +{ + if (linkHandle == 0 || numHandles == 0) + return 0; + + THandleList cObjects; + + for (int i = 0; i < numHandles; ++i) { + if (compHandles[i] == 0) + return 0; + TShHandleBase* base = reinterpret_cast(compHandles[i]); + if (base->getAsLinker()) { + cObjects.push_back(base->getAsLinker()); + } + if (base->getAsCompiler()) + cObjects.push_back(base->getAsCompiler()); + + if (cObjects[i] == 0) + return 0; + } + + TShHandleBase* base = reinterpret_cast(linkHandle); + TLinker* linker = static_cast(base->getAsLinker()); + + SetThreadPoolAllocator(linker->getPool()); + + if (linker == 0) + return 0; + + linker->infoSink.info.erase(); + + for (int i = 0; i < numHandles; ++i) { + if (cObjects[i]->getAsCompiler()) { + if (! cObjects[i]->getAsCompiler()->linkable()) { + linker->infoSink.info.message(EPrefixError, "Not all shaders have valid object code."); + return 0; + } + } + } + + bool ret = linker->link(cObjects); + + return ret ? 1 : 0; +} + +// +// ShSetEncrpytionMethod is a place-holder for specifying +// how source code is encrypted. +// +void ShSetEncryptionMethod(ShHandle handle) +{ + if (handle == 0) + return; +} + +// +// Return any compiler/linker/uniformmap log of messages for the application. +// +const char* ShGetInfoLog(const ShHandle handle) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = static_cast(handle); + TInfoSink* infoSink; + + if (base->getAsCompiler()) + infoSink = &(base->getAsCompiler()->getInfoSink()); + else if (base->getAsLinker()) + infoSink = &(base->getAsLinker()->getInfoSink()); + else + return 0; + + infoSink->info << infoSink->debug.c_str(); + return infoSink->info.c_str(); +} + +// +// Return the resulting binary code from the link process. Structure +// is machine dependent. +// +const void* ShGetExecutable(const ShHandle handle) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + + TLinker* linker = static_cast(base->getAsLinker()); + if (linker == 0) + return 0; + + return linker->getObjectCode(); +} + +// +// Let the linker know where the application said it's attributes are bound. +// The linker does not use these values, they are remapped by the ICD or +// hardware. It just needs them to know what's aliased. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShSetVirtualAttributeBindings(const ShHandle handle, const ShBindingTable* table) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + + if (linker == 0) + return 0; + + linker->setAppAttributeBindings(table); + + return 1; +} + +// +// Let the linker know where the predefined attributes have to live. +// +int ShSetFixedAttributeBindings(const ShHandle handle, const ShBindingTable* table) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + + if (linker == 0) + return 0; + + linker->setFixedAttributeBindings(table); + return 1; +} + +// +// Some attribute locations are off-limits to the linker... +// +int ShExcludeAttributes(const ShHandle handle, int *attributes, int count) +{ + if (handle == 0) + return 0; + + TShHandleBase* base = reinterpret_cast(handle); + TLinker* linker = static_cast(base->getAsLinker()); + if (linker == 0) + return 0; + + linker->setExcludedAttributes(attributes, count); + + return 1; +} + +// +// Return the index for OpenGL to use for knowing where a uniform lives. +// +// Return: The return value of is really boolean, indicating +// success or failure. +// +int ShGetUniformLocation(const ShHandle handle, const char* name) +{ + if (handle == 0) + return -1; + + TShHandleBase* base = reinterpret_cast(handle); + TUniformMap* uniformMap= base->getAsUniformMap(); + if (uniformMap == 0) + return -1; + + return uniformMap->getLocation(name); +} + +//////////////////////////////////////////////////////////////////////////////////////////// +// +// Deferred-Lowering C++ Interface +// ----------------------------------- +// +// Below is a new alternate C++ interface that might potentially replace the above +// opaque handle-based interface. +// +// See more detailed comment in ShaderLang.h +// + +namespace glslang { + +Version GetVersion() +{ + Version version; + version.major = GLSLANG_VERSION_MAJOR; + version.minor = GLSLANG_VERSION_MINOR; + version.patch = GLSLANG_VERSION_PATCH; + version.flavor = GLSLANG_VERSION_FLAVOR; + return version; +} + +#define QUOTE(s) #s +#define STR(n) QUOTE(n) + +const char* GetEsslVersionString() +{ + return "OpenGL ES GLSL 3.20 glslang Khronos. " STR(GLSLANG_VERSION_MAJOR) "." STR(GLSLANG_VERSION_MINOR) "." STR( + GLSLANG_VERSION_PATCH) GLSLANG_VERSION_FLAVOR; +} + +const char* GetGlslVersionString() +{ + return "4.60 glslang Khronos. " STR(GLSLANG_VERSION_MAJOR) "." STR(GLSLANG_VERSION_MINOR) "." STR( + GLSLANG_VERSION_PATCH) GLSLANG_VERSION_FLAVOR; +} + +int GetKhronosToolId() +{ + return 8; +} + +bool InitializeProcess() +{ + return ShInitialize() != 0; +} + +void FinalizeProcess() +{ + ShFinalize(); +} + +class TDeferredCompiler : public TCompiler { +public: + TDeferredCompiler(EShLanguage s, TInfoSink& i) : TCompiler(s, i) { } + virtual bool compile(TIntermNode*, int = 0, EProfile = ENoProfile) { return true; } +}; + +TShader::TShader(EShLanguage s) + : stage(s), lengths(nullptr), stringNames(nullptr), preamble("") +{ + pool = new TPoolAllocator; + infoSink = new TInfoSink; + compiler = new TDeferredCompiler(stage, *infoSink); + intermediate = new TIntermediate(s); + + // clear environment (avoid constructors in them for use in a C interface) + environment.input.languageFamily = EShSourceNone; + environment.input.dialect = EShClientNone; + environment.client.client = EShClientNone; + environment.target.language = EShTargetNone; + environment.target.hlslFunctionality1 = false; +} + +TShader::~TShader() +{ + delete infoSink; + delete compiler; + delete intermediate; + delete pool; +} + +void TShader::setStrings(const char* const* s, int n) +{ + strings = s; + numStrings = n; + lengths = nullptr; +} + +void TShader::setStringsWithLengths(const char* const* s, const int* l, int n) +{ + strings = s; + numStrings = n; + lengths = l; +} + +void TShader::setStringsWithLengthsAndNames( + const char* const* s, const int* l, const char* const* names, int n) +{ + strings = s; + numStrings = n; + lengths = l; + stringNames = names; +} + +void TShader::setEntryPoint(const char* entryPoint) +{ + intermediate->setEntryPointName(entryPoint); +} + +void TShader::setSourceEntryPoint(const char* name) +{ + sourceEntryPointName = name; +} + +// Log initial settings and transforms. +// See comment for class TProcesses. +void TShader::addProcesses(const std::vector& p) +{ + intermediate->addProcesses(p); +} + +void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); } +void TShader::setNanMinMaxClamp(bool useNonNan) { intermediate->setNanMinMaxClamp(useNonNan); } + +#ifndef GLSLANG_WEB + +// Set binding base for given resource type +void TShader::setShiftBinding(TResourceType res, unsigned int base) { + intermediate->setShiftBinding(res, base); +} + +// Set binding base for given resource type for a given binding set. +void TShader::setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set) { + intermediate->setShiftBindingForSet(res, base, set); +} + +// Set binding base for sampler types +void TShader::setShiftSamplerBinding(unsigned int base) { setShiftBinding(EResSampler, base); } +// Set binding base for texture types (SRV) +void TShader::setShiftTextureBinding(unsigned int base) { setShiftBinding(EResTexture, base); } +// Set binding base for image types +void TShader::setShiftImageBinding(unsigned int base) { setShiftBinding(EResImage, base); } +// Set binding base for uniform buffer objects (CBV) +void TShader::setShiftUboBinding(unsigned int base) { setShiftBinding(EResUbo, base); } +// Synonym for setShiftUboBinding, to match HLSL language. +void TShader::setShiftCbufferBinding(unsigned int base) { setShiftBinding(EResUbo, base); } +// Set binding base for UAV (unordered access view) +void TShader::setShiftUavBinding(unsigned int base) { setShiftBinding(EResUav, base); } +// Set binding base for SSBOs +void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSsbo, base); } +// Enables binding automapping using TIoMapper +void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); } +// Enables position.Y output negation in vertex shader + +// Fragile: currently within one stage: simple auto-assignment of location +void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); } +void TShader::addUniformLocationOverride(const char* name, int loc) +{ + intermediate->addUniformLocationOverride(name, loc); +} +void TShader::setUniformLocationBase(int base) +{ + intermediate->setUniformLocationBase(base); +} +void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); } +void TShader::setResourceSetBinding(const std::vector& base) { intermediate->setResourceSetBinding(base); } +void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); } +#endif + +#ifdef ENABLE_HLSL +// See comment above TDefaultHlslIoMapper in iomapper.cpp: +void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); } +void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); } +#endif + +// +// Turn the shader strings into a parse tree in the TIntermediate. +// +// Returns true for success. +// +bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages messages, Includer& includer) +{ + if (! InitThread()) + return false; + SetThreadPoolAllocator(pool); + + if (! preamble) + preamble = ""; + + return CompileDeferred(compiler, strings, numStrings, lengths, stringNames, + preamble, EShOptNone, builtInResources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, messages, *intermediate, includer, sourceEntryPointName, + &environment); +} + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) +// Fill in a string with the result of preprocessing ShaderStrings +// Returns true if all extensions, pragmas and version strings were valid. +// +// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string +// is not an officially supported or fully working path. +bool TShader::preprocess(const TBuiltInResource* builtInResources, + int defaultVersion, EProfile defaultProfile, + bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages message, + std::string* output_string, + Includer& includer) +{ + if (! InitThread()) + return false; + SetThreadPoolAllocator(pool); + + if (! preamble) + preamble = ""; + + return PreprocessDeferred(compiler, strings, numStrings, lengths, stringNames, preamble, + EShOptNone, builtInResources, defaultVersion, + defaultProfile, forceDefaultVersionAndProfile, + forwardCompatible, message, includer, *intermediate, output_string); +} +#endif + +const char* TShader::getInfoLog() +{ + return infoSink->info.c_str(); +} + +const char* TShader::getInfoDebugLog() +{ + return infoSink->debug.c_str(); +} + +TProgram::TProgram() : +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + reflection(0), +#endif + linked(false) +{ + pool = new TPoolAllocator; + infoSink = new TInfoSink; + for (int s = 0; s < EShLangCount; ++s) { + intermediate[s] = 0; + newedIntermediate[s] = false; + } +} + +TProgram::~TProgram() +{ + delete infoSink; +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + delete reflection; +#endif + + for (int s = 0; s < EShLangCount; ++s) + if (newedIntermediate[s]) + delete intermediate[s]; + + delete pool; +} + +// +// Merge the compilation units within each stage into a single TIntermediate. +// All starting compilation units need to be the result of calling TShader::parse(). +// +// Return true for success. +// +bool TProgram::link(EShMessages messages) +{ + if (linked) + return false; + linked = true; + + bool error = false; + + SetThreadPoolAllocator(pool); + + for (int s = 0; s < EShLangCount; ++s) { + if (! linkStage((EShLanguage)s, messages)) + error = true; + } + + // TODO: Link: cross-stage error checking + + return ! error; +} + +// +// Merge the compilation units within the given stage into a single TIntermediate. +// +// Return true for success. +// +bool TProgram::linkStage(EShLanguage stage, EShMessages messages) +{ + if (stages[stage].size() == 0) + return true; + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + int numEsShaders = 0, numNonEsShaders = 0; + for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) { + if ((*it)->intermediate->getProfile() == EEsProfile) { + numEsShaders++; + } else { + numNonEsShaders++; + } + } + + if (numEsShaders > 0 && numNonEsShaders > 0) { + infoSink->info.message(EPrefixError, "Cannot mix ES profile with non-ES profile shaders"); + return false; + } else if (numEsShaders > 1) { + infoSink->info.message(EPrefixError, "Cannot attach multiple ES shaders of the same type to a single program"); + return false; + } + + // + // Be efficient for the common single compilation unit per stage case, + // reusing it's TIntermediate instead of merging into a new one. + // + TIntermediate *firstIntermediate = stages[stage].front()->intermediate; + if (stages[stage].size() == 1) + intermediate[stage] = firstIntermediate; + else { + intermediate[stage] = new TIntermediate(stage, + firstIntermediate->getVersion(), + firstIntermediate->getProfile()); + + + // The new TIntermediate must use the same origin as the original TIntermediates. + // Otherwise linking will fail due to different coordinate systems. + if (firstIntermediate->getOriginUpperLeft()) { + intermediate[stage]->setOriginUpperLeft(); + } + intermediate[stage]->setSpv(firstIntermediate->getSpv()); + + newedIntermediate[stage] = true; + } + + if (messages & EShMsgAST) + infoSink->info << "\nLinked " << StageName(stage) << " stage:\n\n"; + + if (stages[stage].size() > 1) { + std::list::const_iterator it; + for (it = stages[stage].begin(); it != stages[stage].end(); ++it) + intermediate[stage]->merge(*infoSink, *(*it)->intermediate); + } +#else + intermediate[stage] = stages[stage].front()->intermediate; +#endif + intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0); + +#ifndef GLSLANG_ANGLE + if (messages & EShMsgAST) + intermediate[stage]->output(*infoSink, true); +#endif + + return intermediate[stage]->getNumErrors() == 0; +} + +const char* TProgram::getInfoLog() +{ + return infoSink->info.c_str(); +} + +const char* TProgram::getInfoDebugLog() +{ + return infoSink->debug.c_str(); +} + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +// +// Reflection implementation. +// + +bool TProgram::buildReflection(int opts) +{ + if (! linked || reflection != nullptr) + return false; + + int firstStage = EShLangVertex, lastStage = EShLangFragment; + + if (opts & EShReflectionIntermediateIO) { + // if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the + // boundaries for which stages generate pipeline inputs/outputs + firstStage = EShLangCount; + lastStage = 0; + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + firstStage = std::min(firstStage, s); + lastStage = std::max(lastStage, s); + } + } + } + + reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage); + + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + if (! reflection->addStage((EShLanguage)s, *intermediate[s])) + return false; + } + } + + return true; +} + +unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); } +int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); } +int TProgram::getReflectionPipeIOIndex(const char* name, const bool inOrOut) const + { return reflection->getPipeIOIndex(name, inOrOut); } + +int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); } +const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); } +int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); } +const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); } +int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); } +const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); } +int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); } +const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); } +int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); } +const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); } +int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); } +const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); } +int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); } +const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); } +void TProgram::dumpReflection() { if (reflection != nullptr) reflection->dump(); } + +// +// I/O mapping implementation. +// +bool TProgram::mapIO(TIoMapResolver* pResolver, TIoMapper* pIoMapper) +{ + if (! linked) + return false; + TIoMapper* ioMapper = nullptr; + TIoMapper defaultIOMapper; + if (pIoMapper == nullptr) + ioMapper = &defaultIOMapper; + else + ioMapper = pIoMapper; + for (int s = 0; s < EShLangCount; ++s) { + if (intermediate[s]) { + if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, pResolver)) + return false; + } + } + + return ioMapper->doMap(pResolver, *infoSink); +} + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp b/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp new file mode 100644 index 0000000..007f22c --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.cpp @@ -0,0 +1,446 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Symbol table for parsing. Most functionality and main ideas +// are documented in the header file. +// + +#include "SymbolTable.h" + +namespace glslang { + +// +// TType helper function needs a place to live. +// + +// +// Recursively generate mangled names. +// +void TType::buildMangledName(TString& mangledName) const +{ + if (isMatrix()) + mangledName += 'm'; + else if (isVector()) + mangledName += 'v'; + + switch (basicType) { + case EbtFloat: mangledName += 'f'; break; + case EbtInt: mangledName += 'i'; break; + case EbtUint: mangledName += 'u'; break; + case EbtBool: mangledName += 'b'; break; +#ifndef GLSLANG_WEB + case EbtDouble: mangledName += 'd'; break; + case EbtFloat16: mangledName += "f16"; break; + case EbtInt8: mangledName += "i8"; break; + case EbtUint8: mangledName += "u8"; break; + case EbtInt16: mangledName += "i16"; break; + case EbtUint16: mangledName += "u16"; break; + case EbtInt64: mangledName += "i64"; break; + case EbtUint64: mangledName += "u64"; break; + case EbtAtomicUint: mangledName += "au"; break; + case EbtAccStruct: mangledName += "as"; break; + case EbtRayQuery: mangledName += "rq"; break; +#endif + case EbtSampler: + switch (sampler.type) { +#ifndef GLSLANG_WEB + case EbtFloat16: mangledName += "f16"; break; +#endif + case EbtInt: mangledName += "i"; break; + case EbtUint: mangledName += "u"; break; + default: break; // some compilers want this + } + if (sampler.isImageClass()) + mangledName += "I"; // a normal image or subpass + else if (sampler.isPureSampler()) + mangledName += "p"; // a "pure" sampler + else if (!sampler.isCombined()) + mangledName += "t"; // a "pure" texture + else + mangledName += "s"; // traditional combined sampler + if (sampler.isArrayed()) + mangledName += "A"; + if (sampler.isShadow()) + mangledName += "S"; + if (sampler.isExternal()) + mangledName += "E"; + if (sampler.isYuv()) + mangledName += "Y"; + switch (sampler.dim) { + case Esd2D: mangledName += "2"; break; + case Esd3D: mangledName += "3"; break; + case EsdCube: mangledName += "C"; break; +#ifndef GLSLANG_WEB + case Esd1D: mangledName += "1"; break; + case EsdRect: mangledName += "R2"; break; + case EsdBuffer: mangledName += "B"; break; + case EsdSubpass: mangledName += "P"; break; +#endif + default: break; // some compilers want this + } + +#ifdef ENABLE_HLSL + if (sampler.hasReturnStruct()) { + // Name mangle for sampler return struct uses struct table index. + mangledName += "-tx-struct"; + + char text[16]; // plenty enough space for the small integers. + snprintf(text, sizeof(text), "%u-", sampler.getStructReturnIndex()); + mangledName += text; + } else { + switch (sampler.getVectorSize()) { + case 1: mangledName += "1"; break; + case 2: mangledName += "2"; break; + case 3: mangledName += "3"; break; + case 4: break; // default to prior name mangle behavior + } + } +#endif + + if (sampler.isMultiSample()) + mangledName += "M"; + break; + case EbtStruct: + case EbtBlock: + if (basicType == EbtStruct) + mangledName += "struct-"; + else + mangledName += "block-"; + if (typeName) + mangledName += *typeName; + for (unsigned int i = 0; i < structure->size(); ++i) { + mangledName += '-'; + (*structure)[i].type->buildMangledName(mangledName); + } + default: + break; + } + + if (getVectorSize() > 0) + mangledName += static_cast('0' + getVectorSize()); + else { + mangledName += static_cast('0' + getMatrixCols()); + mangledName += static_cast('0' + getMatrixRows()); + } + + if (arraySizes) { + const int maxSize = 11; + char buf[maxSize]; + for (int i = 0; i < arraySizes->getNumDims(); ++i) { + if (arraySizes->getDimNode(i)) { + if (arraySizes->getDimNode(i)->getAsSymbolNode()) + snprintf(buf, maxSize, "s%d", arraySizes->getDimNode(i)->getAsSymbolNode()->getId()); + else + snprintf(buf, maxSize, "s%p", arraySizes->getDimNode(i)); + } else + snprintf(buf, maxSize, "%d", arraySizes->getDimSize(i)); + mangledName += '['; + mangledName += buf; + mangledName += ']'; + } + } +} + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +// +// Dump functions. +// + +void TSymbol::dumpExtensions(TInfoSink& infoSink) const +{ + int numExtensions = getNumExtensions(); + if (numExtensions) { + infoSink.debug << " <"; + + for (int i = 0; i < numExtensions; i++) + infoSink.debug << getExtensions()[i] << ","; + + infoSink.debug << ">"; + } +} + +void TVariable::dump(TInfoSink& infoSink, bool complete) const +{ + if (complete) { + infoSink.debug << getName().c_str() << ": " << type.getCompleteString(); + dumpExtensions(infoSink); + } else { + infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " + << type.getBasicTypeString(); + + if (type.isArray()) + infoSink.debug << "[0]"; + } + + infoSink.debug << "\n"; +} + +void TFunction::dump(TInfoSink& infoSink, bool complete) const +{ + if (complete) { + infoSink.debug << getName().c_str() << ": " << returnType.getCompleteString() << " " << getName().c_str() + << "("; + + int numParams = getParamCount(); + for (int i = 0; i < numParams; i++) { + const TParameter ¶m = parameters[i]; + infoSink.debug << param.type->getCompleteString() << " " + << (param.type->isStruct() ? "of " + param.type->getTypeName() + " " : "") + << (param.name ? *param.name : "") << (i < numParams - 1 ? "," : ""); + } + + infoSink.debug << ")"; + dumpExtensions(infoSink); + } else { + infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " + << getMangledName().c_str() << "n"; + } + + infoSink.debug << "\n"; +} + +void TAnonMember::dump(TInfoSink& TInfoSink, bool) const +{ + TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() + << "\n"; +} + +void TSymbolTableLevel::dump(TInfoSink& infoSink, bool complete) const +{ + tLevel::const_iterator it; + for (it = level.begin(); it != level.end(); ++it) + (*it).second->dump(infoSink, complete); +} + +void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const +{ + for (int level = currentLevel(); level >= 0; --level) { + infoSink.debug << "LEVEL " << level << "\n"; + table[level]->dump(infoSink, complete); + } +} + +#endif + +// +// Functions have buried pointers to delete. +// +TFunction::~TFunction() +{ + for (TParamList::iterator i = parameters.begin(); i != parameters.end(); ++i) + delete (*i).type; +} + +// +// Symbol table levels are a map of pointers to symbols that have to be deleted. +// +TSymbolTableLevel::~TSymbolTableLevel() +{ + for (tLevel::iterator it = level.begin(); it != level.end(); ++it) + delete (*it).second; + + delete [] defaultPrecision; +} + +// +// Change all function entries in the table with the non-mangled name +// to be related to the provided built-in operation. +// +void TSymbolTableLevel::relateToOperator(const char* name, TOperator op) +{ + tLevel::const_iterator candidate = level.lower_bound(name); + while (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) { + TFunction* function = (*candidate).second->getAsFunction(); + function->relateToOperator(op); + } else + break; + ++candidate; + } +} + +// Make all function overloads of the given name require an extension(s). +// Should only be used for a version/profile that actually needs the extension(s). +void TSymbolTableLevel::setFunctionExtensions(const char* name, int num, const char* const extensions[]) +{ + tLevel::const_iterator candidate = level.lower_bound(name); + while (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) { + TSymbol* symbol = candidate->second; + symbol->setExtensions(num, extensions); + } else + break; + ++candidate; + } +} + +// +// Make all symbols in this table level read only. +// +void TSymbolTableLevel::readOnly() +{ + for (tLevel::iterator it = level.begin(); it != level.end(); ++it) + (*it).second->makeReadOnly(); +} + +// +// Copy a symbol, but the copy is writable; call readOnly() afterward if that's not desired. +// +TSymbol::TSymbol(const TSymbol& copyOf) +{ + name = NewPoolTString(copyOf.name->c_str()); + uniqueId = copyOf.uniqueId; + writable = true; +} + +TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf) +{ + type.deepCopy(copyOf.type); + userType = copyOf.userType; + + // we don't support specialization-constant subtrees in cloned tables, only extensions + constSubtree = nullptr; + extensions = nullptr; + memberExtensions = nullptr; + if (copyOf.getNumExtensions() > 0) + setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions()); + if (copyOf.hasMemberExtensions()) { + for (int m = 0; m < (int)copyOf.type.getStruct()->size(); ++m) { + if (copyOf.getNumMemberExtensions(m) > 0) + setMemberExtensions(m, copyOf.getNumMemberExtensions(m), copyOf.getMemberExtensions(m)); + } + } + + if (! copyOf.constArray.empty()) { + assert(! copyOf.type.isStruct()); + TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size()); + constArray = newArray; + } +} + +TVariable* TVariable::clone() const +{ + TVariable *variable = new TVariable(*this); + + return variable; +} + +TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf) +{ + for (unsigned int i = 0; i < copyOf.parameters.size(); ++i) { + TParameter param; + parameters.push_back(param); + parameters.back().copyParam(copyOf.parameters[i]); + } + + extensions = nullptr; + if (copyOf.getNumExtensions() > 0) + setExtensions(copyOf.getNumExtensions(), copyOf.getExtensions()); + returnType.deepCopy(copyOf.returnType); + mangledName = copyOf.mangledName; + op = copyOf.op; + defined = copyOf.defined; + prototyped = copyOf.prototyped; + implicitThis = copyOf.implicitThis; + illegalImplicitThis = copyOf.illegalImplicitThis; + defaultParamCount = copyOf.defaultParamCount; +} + +TFunction* TFunction::clone() const +{ + TFunction *function = new TFunction(*this); + + return function; +} + +TAnonMember* TAnonMember::clone() const +{ + // Anonymous members of a given block should be cloned at a higher level, + // where they can all be assured to still end up pointing to a single + // copy of the original container. + assert(0); + + return 0; +} + +TSymbolTableLevel* TSymbolTableLevel::clone() const +{ + TSymbolTableLevel *symTableLevel = new TSymbolTableLevel(); + symTableLevel->anonId = anonId; + symTableLevel->thisLevel = thisLevel; + std::vector containerCopied(anonId, false); + tLevel::const_iterator iter; + for (iter = level.begin(); iter != level.end(); ++iter) { + const TAnonMember* anon = iter->second->getAsAnonMember(); + if (anon) { + // Insert all the anonymous members of this same container at once, + // avoid inserting the remaining members in the future, once this has been done, + // allowing them to all be part of the same new container. + if (! containerCopied[anon->getAnonId()]) { + TVariable* container = anon->getAnonContainer().clone(); + container->changeName(NewPoolTString("")); + // insert the container and all its members + symTableLevel->insert(*container, false); + containerCopied[anon->getAnonId()] = true; + } + } else + symTableLevel->insert(*iter->second->clone(), false); + } + + return symTableLevel; +} + +void TSymbolTable::copyTable(const TSymbolTable& copyOf) +{ + assert(adoptedLevels == copyOf.adoptedLevels); + + uniqueId = copyOf.uniqueId; + noBuiltInRedeclarations = copyOf.noBuiltInRedeclarations; + separateNameSpaces = copyOf.separateNameSpaces; + for (unsigned int i = copyOf.adoptedLevels; i < copyOf.table.size(); ++i) + table.push_back(copyOf.table[i]->clone()); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.h b/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.h new file mode 100644 index 0000000..ec4bc3c --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/SymbolTable.h @@ -0,0 +1,885 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _SYMBOL_TABLE_INCLUDED_ +#define _SYMBOL_TABLE_INCLUDED_ + +// +// Symbol table for parsing. Has these design characteristics: +// +// * Same symbol table can be used to compile many shaders, to preserve +// effort of creating and loading with the large numbers of built-in +// symbols. +// +// --> This requires a copy mechanism, so initial pools used to create +// the shared information can be popped. Done through "clone" +// methods. +// +// * Name mangling will be used to give each function a unique name +// so that symbol table lookups are never ambiguous. This allows +// a simpler symbol table structure. +// +// * Pushing and popping of scope, so symbol table will really be a stack +// of symbol tables. Searched from the top, with new inserts going into +// the top. +// +// * Constants: Compile time constant symbols will keep their values +// in the symbol table. The parser can substitute constants at parse +// time, including doing constant folding and constant propagation. +// +// * No temporaries: Temporaries made from operations (+, --, .xy, etc.) +// are tracked in the intermediate representation, not the symbol table. +// + +#include "../Include/Common.h" +#include "../Include/intermediate.h" +#include "../Include/InfoSink.h" + +namespace glslang { + +// +// Symbol base class. (Can build functions or variables out of these...) +// + +class TVariable; +class TFunction; +class TAnonMember; + +typedef TVector TExtensionList; + +class TSymbol { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + explicit TSymbol(const TString *n) : name(n), extensions(0), writable(true) { } + virtual TSymbol* clone() const = 0; + virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool + + virtual const TString& getName() const { return *name; } + virtual void changeName(const TString* newName) { name = newName; } + virtual void addPrefix(const char* prefix) + { + TString newName(prefix); + newName.append(*name); + changeName(NewPoolTString(newName.c_str())); + } + virtual const TString& getMangledName() const { return getName(); } + virtual TFunction* getAsFunction() { return 0; } + virtual const TFunction* getAsFunction() const { return 0; } + virtual TVariable* getAsVariable() { return 0; } + virtual const TVariable* getAsVariable() const { return 0; } + virtual const TAnonMember* getAsAnonMember() const { return 0; } + virtual const TType& getType() const = 0; + virtual TType& getWritableType() = 0; + virtual void setUniqueId(int id) { uniqueId = id; } + virtual int getUniqueId() const { return uniqueId; } + virtual void setExtensions(int numExts, const char* const exts[]) + { + assert(extensions == 0); + assert(numExts > 0); + extensions = NewPoolObject(extensions); + for (int e = 0; e < numExts; ++e) + extensions->push_back(exts[e]); + } + virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); } + virtual const char** getExtensions() const { return extensions->data(); } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0; + void dumpExtensions(TInfoSink& infoSink) const; +#endif + + virtual bool isReadOnly() const { return ! writable; } + virtual void makeReadOnly() { writable = false; } + +protected: + explicit TSymbol(const TSymbol&); + TSymbol& operator=(const TSymbol&); + + const TString *name; + unsigned int uniqueId; // For cross-scope comparing during code generation + + // For tracking what extensions must be present + // (don't use if correct version/profile is present). + TExtensionList* extensions; // an array of pointers to existing constant char strings + + // + // N.B.: Non-const functions that will be generally used should assert on this, + // to avoid overwriting shared symbol-table information. + // + bool writable; +}; + +// +// Variable class, meaning a symbol that's not a function. +// +// There could be a separate class hierarchy for Constant variables; +// Only one of int, bool, or float, (or none) is correct for +// any particular use, but it's easy to do this way, and doesn't +// seem worth having separate classes, and "getConst" can't simply return +// different values for different types polymorphically, so this is +// just simple and pragmatic. +// +class TVariable : public TSymbol { +public: + TVariable(const TString *name, const TType& t, bool uT = false ) + : TSymbol(name), + userType(uT), + constSubtree(nullptr), + memberExtensions(nullptr), + anonId(-1) + { type.shallowCopy(t); } + virtual TVariable* clone() const; + virtual ~TVariable() { } + + virtual TVariable* getAsVariable() { return this; } + virtual const TVariable* getAsVariable() const { return this; } + virtual const TType& getType() const { return type; } + virtual TType& getWritableType() { assert(writable); return type; } + virtual bool isUserType() const { return userType; } + virtual const TConstUnionArray& getConstArray() const { return constArray; } + virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; } + virtual void setConstArray(const TConstUnionArray& array) { constArray = array; } + virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } + virtual TIntermTyped* getConstSubtree() const { return constSubtree; } + virtual void setAnonId(int i) { anonId = i; } + virtual int getAnonId() const { return anonId; } + + virtual void setMemberExtensions(int member, int numExts, const char* const exts[]) + { + assert(type.isStruct()); + assert(numExts > 0); + if (memberExtensions == nullptr) { + memberExtensions = NewPoolObject(memberExtensions); + memberExtensions->resize(type.getStruct()->size()); + } + for (int e = 0; e < numExts; ++e) + (*memberExtensions)[member].push_back(exts[e]); + } + virtual bool hasMemberExtensions() const { return memberExtensions != nullptr; } + virtual int getNumMemberExtensions(int member) const + { + return memberExtensions == nullptr ? 0 : (int)(*memberExtensions)[member].size(); + } + virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + virtual void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + +protected: + explicit TVariable(const TVariable&); + TVariable& operator=(const TVariable&); + + TType type; + bool userType; + + // we are assuming that Pool Allocator will free the memory allocated to unionArray + // when this object is destroyed + + TConstUnionArray constArray; // for compile-time constant value + TIntermTyped* constSubtree; // for specialization constant computation + TVector* memberExtensions; // per-member extension list, allocated only when needed + int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose +}; + +// +// The function sub-class of symbols and the parser will need to +// share this definition of a function parameter. +// +struct TParameter { + TString *name; + TType* type; + TIntermTyped* defaultValue; + void copyParam(const TParameter& param) + { + if (param.name) + name = NewPoolTString(param.name->c_str()); + else + name = 0; + type = param.type->clone(); + defaultValue = param.defaultValue; + } + TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; } +}; + +// +// The function sub-class of a symbol. +// +class TFunction : public TSymbol { +public: + explicit TFunction(TOperator o) : + TSymbol(0), + op(o), + defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { } + TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) : + TSymbol(name), + mangledName(*name + '('), + op(tOp), + defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) + { + returnType.shallowCopy(retType); + declaredBuiltIn = retType.getQualifier().builtIn; + } + virtual TFunction* clone() const override; + virtual ~TFunction(); + + virtual TFunction* getAsFunction() override { return this; } + virtual const TFunction* getAsFunction() const override { return this; } + + // Install 'p' as the (non-'this') last parameter. + // Non-'this' parameters are reflected in both the list of parameters and the + // mangled name. + virtual void addParameter(TParameter& p) + { + assert(writable); + parameters.push_back(p); + p.type->appendMangledName(mangledName); + + if (p.defaultValue != nullptr) + defaultParamCount++; + } + + // Install 'this' as the first parameter. + // 'this' is reflected in the list of parameters, but not the mangled name. + virtual void addThisParameter(TType& type, const char* name) + { + TParameter p = { NewPoolTString(name), new TType, nullptr }; + p.type->shallowCopy(type); + parameters.insert(parameters.begin(), p); + } + + virtual void addPrefix(const char* prefix) override + { + TSymbol::addPrefix(prefix); + mangledName.insert(0, prefix); + } + + virtual void removePrefix(const TString& prefix) + { + assert(mangledName.compare(0, prefix.size(), prefix) == 0); + mangledName.erase(0, prefix.size()); + } + + virtual const TString& getMangledName() const override { return mangledName; } + virtual const TType& getType() const override { return returnType; } + virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; } + virtual TType& getWritableType() override { return returnType; } + virtual void relateToOperator(TOperator o) { assert(writable); op = o; } + virtual TOperator getBuiltInOp() const { return op; } + virtual void setDefined() { assert(writable); defined = true; } + virtual bool isDefined() const { return defined; } + virtual void setPrototyped() { assert(writable); prototyped = true; } + virtual bool isPrototyped() const { return prototyped; } + virtual void setImplicitThis() { assert(writable); implicitThis = true; } + virtual bool hasImplicitThis() const { return implicitThis; } + virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; } + virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; } + + // Return total number of parameters + virtual int getParamCount() const { return static_cast(parameters.size()); } + // Return number of parameters with default values. + virtual int getDefaultParamCount() const { return defaultParamCount; } + // Return number of fixed parameters (without default values) + virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); } + + virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; } + virtual const TParameter& operator[](int i) const { return parameters[i]; } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + virtual void dump(TInfoSink& infoSink, bool complete = false) const override; +#endif + +protected: + explicit TFunction(const TFunction&); + TFunction& operator=(const TFunction&); + + typedef TVector TParamList; + TParamList parameters; + TType returnType; + TBuiltInVariable declaredBuiltIn; + + TString mangledName; + TOperator op; + bool defined; + bool prototyped; + bool implicitThis; // True if this function is allowed to see all members of 'this' + bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this', + // even if it finds member variables in the symbol table. + // This is important for a static member function that has member variables in scope, + // but is not allowed to use them, or see hidden symbols instead. + int defaultParamCount; +}; + +// +// Members of anonymous blocks are a kind of TSymbol. They are not hidden in +// the symbol table behind a container; rather they are visible and point to +// their anonymous container. (The anonymous container is found through the +// member, not the other way around.) +// +class TAnonMember : public TSymbol { +public: + TAnonMember(const TString* n, unsigned int m, TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { } + virtual TAnonMember* clone() const override; + virtual ~TAnonMember() { } + + virtual const TAnonMember* getAsAnonMember() const override { return this; } + virtual const TVariable& getAnonContainer() const { return anonContainer; } + virtual unsigned int getMemberNumber() const { return memberNumber; } + + virtual const TType& getType() const override + { + const TTypeList& types = *anonContainer.getType().getStruct(); + return *types[memberNumber].type; + } + + virtual TType& getWritableType() override + { + assert(writable); + const TTypeList& types = *anonContainer.getType().getStruct(); + return *types[memberNumber].type; + } + + virtual void setExtensions(int numExts, const char* const exts[]) override + { + anonContainer.setMemberExtensions(memberNumber, numExts, exts); + } + virtual int getNumExtensions() const override { return anonContainer.getNumMemberExtensions(memberNumber); } + virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); } + + virtual int getAnonId() const { return anonId; } +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + virtual void dump(TInfoSink& infoSink, bool complete = false) const override; +#endif + +protected: + explicit TAnonMember(const TAnonMember&); + TAnonMember& operator=(const TAnonMember&); + + TVariable& anonContainer; + unsigned int memberNumber; + int anonId; +}; + +class TSymbolTableLevel { +public: + POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator()) + TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { } + ~TSymbolTableLevel(); + + bool insert(TSymbol& symbol, bool separateNameSpaces) + { + // + // returning true means symbol was added to the table with no semantic errors + // + const TString& name = symbol.getName(); + if (name == "") { + symbol.getAsVariable()->setAnonId(anonId++); + // An empty name means an anonymous container, exposing its members to the external scope. + // Give it a name and insert its members in the symbol table, pointing to the container. + char buf[20]; + snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId()); + symbol.changeName(NewPoolTString(buf)); + + return insertAnonymousMembers(symbol, 0); + } else { + // Check for redefinition errors: + // - STL itself will tell us if there is a direct name collision, with name mangling, at this level + // - additionally, check for function-redefining-variable name collisions + const TString& insertName = symbol.getMangledName(); + if (symbol.getAsFunction()) { + // make sure there isn't a variable of this name + if (! separateNameSpaces && level.find(name) != level.end()) + return false; + + // insert, and whatever happens is okay + level.insert(tLevelPair(insertName, &symbol)); + + return true; + } else + return level.insert(tLevelPair(insertName, &symbol)).second; + } + } + + // Add more members to an already inserted aggregate object + bool amend(TSymbol& symbol, int firstNewMember) + { + // See insert() for comments on basic explanation of insert. + // This operates similarly, but more simply. + // Only supporting amend of anonymous blocks so far. + if (IsAnonymous(symbol.getName())) + return insertAnonymousMembers(symbol, firstNewMember); + else + return false; + } + + bool insertAnonymousMembers(TSymbol& symbol, int firstMember) + { + const TTypeList& types = *symbol.getAsVariable()->getType().getStruct(); + for (unsigned int m = firstMember; m < types.size(); ++m) { + TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId()); + if (! level.insert(tLevelPair(member->getMangledName(), member)).second) + return false; + } + + return true; + } + + TSymbol* find(const TString& name) const + { + tLevel::const_iterator it = level.find(name); + if (it == level.end()) + return 0; + else + return (*it).second; + } + + void findFunctionNameList(const TString& name, TVector& list) + { + size_t parenAt = name.find_first_of('('); + TString base(name, 0, parenAt + 1); + + tLevel::const_iterator begin = level.lower_bound(base); + base[parenAt] = ')'; // assume ')' is lexically after '(' + tLevel::const_iterator end = level.upper_bound(base); + for (tLevel::const_iterator it = begin; it != end; ++it) + list.push_back(it->second->getAsFunction()); + } + + // See if there is already a function in the table having the given non-function-style name. + bool hasFunctionName(const TString& name) const + { + tLevel::const_iterator candidate = level.lower_bound(name); + if (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) + + return true; + } + + return false; + } + + // See if there is a variable at this level having the given non-function-style name. + // Return true if name is found, and set variable to true if the name was a variable. + bool findFunctionVariableName(const TString& name, bool& variable) const + { + tLevel::const_iterator candidate = level.lower_bound(name); + if (candidate != level.end()) { + const TString& candidateName = (*candidate).first; + TString::size_type parenAt = candidateName.find_first_of('('); + if (parenAt == candidateName.npos) { + // not a mangled name + if (candidateName == name) { + // found a variable name match + variable = true; + return true; + } + } else { + // a mangled name + if (candidateName.compare(0, parenAt, name) == 0) { + // found a function name match + variable = false; + return true; + } + } + } + + return false; + } + + // Use this to do a lazy 'push' of precision defaults the first time + // a precision statement is seen in a new scope. Leave it at 0 for + // when no push was needed. Thus, it is not the current defaults, + // it is what to restore the defaults to when popping a level. + void setPreviousDefaultPrecisions(const TPrecisionQualifier *p) + { + // can call multiple times at one scope, will only latch on first call, + // as we're tracking the previous scope's values, not the current values + if (defaultPrecision != 0) + return; + + defaultPrecision = new TPrecisionQualifier[EbtNumTypes]; + for (int t = 0; t < EbtNumTypes; ++t) + defaultPrecision[t] = p[t]; + } + + void getPreviousDefaultPrecisions(TPrecisionQualifier *p) + { + // can be called for table level pops that didn't set the + // defaults + if (defaultPrecision == 0 || p == 0) + return; + + for (int t = 0; t < EbtNumTypes; ++t) + p[t] = defaultPrecision[t]; + } + + void relateToOperator(const char* name, TOperator op); + void setFunctionExtensions(const char* name, int num, const char* const extensions[]); +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + TSymbolTableLevel* clone() const; + void readOnly(); + + void setThisLevel() { thisLevel = true; } + bool isThisLevel() const { return thisLevel; } + +protected: + explicit TSymbolTableLevel(TSymbolTableLevel&); + TSymbolTableLevel& operator=(TSymbolTableLevel&); + + typedef std::map, pool_allocator > > tLevel; + typedef const tLevel::value_type tLevelPair; + typedef std::pair tInsertResult; + + tLevel level; // named mappings + TPrecisionQualifier *defaultPrecision; + int anonId; + bool thisLevel; // True if this level of the symbol table is a structure scope containing member function + // that are supposed to see anonymous access to member variables. +}; + +class TSymbolTable { +public: + TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0) + { + // + // This symbol table cannot be used until push() is called. + // + } + ~TSymbolTable() + { + // this can be called explicitly; safest to code it so it can be called multiple times + + // don't deallocate levels passed in from elsewhere + while (table.size() > adoptedLevels) + pop(0); + } + + void adoptLevels(TSymbolTable& symTable) + { + for (unsigned int level = 0; level < symTable.table.size(); ++level) { + table.push_back(symTable.table[level]); + ++adoptedLevels; + } + uniqueId = symTable.uniqueId; + noBuiltInRedeclarations = symTable.noBuiltInRedeclarations; + separateNameSpaces = symTable.separateNameSpaces; + } + + // + // While level adopting is generic, the methods below enact a the following + // convention for levels: + // 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables + // 1: per-stage built-ins, shared across all compiles, but a different copy per stage + // 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins + // 3: user-shader globals + // +protected: + static const int globalLevel = 3; + bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels + bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals + bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals +public: + bool isEmpty() { return table.size() == 0; } + bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); } + bool atGlobalLevel() { return isGlobalLevel(currentLevel()); } + + void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; } + void setSeparateNameSpaces() { separateNameSpaces = true; } + + void push() + { + table.push_back(new TSymbolTableLevel); + } + + // Make a new symbol-table level to represent the scope introduced by a structure + // containing member functions, such that the member functions can find anonymous + // references to member variables. + // + // 'thisSymbol' should have a name of "" to trigger anonymous structure-member + // symbol finds. + void pushThis(TSymbol& thisSymbol) + { + assert(thisSymbol.getName().size() == 0); + table.push_back(new TSymbolTableLevel); + table.back()->setThisLevel(); + insert(thisSymbol); + } + + void pop(TPrecisionQualifier *p) + { + table[currentLevel()]->getPreviousDefaultPrecisions(p); + delete table.back(); + table.pop_back(); + } + + // + // Insert a visible symbol into the symbol table so it can + // be found later by name. + // + // Returns false if the was a name collision. + // + bool insert(TSymbol& symbol) + { + symbol.setUniqueId(++uniqueId); + + // make sure there isn't a function of this variable name + if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName())) + return false; + + // check for not overloading or redefining a built-in function + if (noBuiltInRedeclarations) { + if (atGlobalLevel() && currentLevel() > 0) { + if (table[0]->hasFunctionName(symbol.getName())) + return false; + if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName())) + return false; + } + } + + return table[currentLevel()]->insert(symbol, separateNameSpaces); + } + + // Add more members to an already inserted aggregate object + bool amend(TSymbol& symbol, int firstNewMember) + { + // See insert() for comments on basic explanation of insert. + // This operates similarly, but more simply. + return table[currentLevel()]->amend(symbol, firstNewMember); + } + + // + // To allocate an internal temporary, which will need to be uniquely + // identified by the consumer of the AST, but never need to + // found by doing a symbol table search by name, hence allowed an + // arbitrary name in the symbol with no worry of collision. + // + void makeInternalVariable(TSymbol& symbol) + { + symbol.setUniqueId(++uniqueId); + } + + // + // Copy a variable or anonymous member's structure from a shared level so that + // it can be added (soon after return) to the symbol table where it can be + // modified without impacting other users of the shared table. + // + TSymbol* copyUpDeferredInsert(TSymbol* shared) + { + if (shared->getAsVariable()) { + TSymbol* copy = shared->clone(); + copy->setUniqueId(shared->getUniqueId()); + return copy; + } else { + const TAnonMember* anon = shared->getAsAnonMember(); + assert(anon); + TVariable* container = anon->getAnonContainer().clone(); + container->changeName(NewPoolTString("")); + container->setUniqueId(anon->getAnonContainer().getUniqueId()); + return container; + } + } + + TSymbol* copyUp(TSymbol* shared) + { + TSymbol* copy = copyUpDeferredInsert(shared); + table[globalLevel]->insert(*copy, separateNameSpaces); + if (shared->getAsVariable()) + return copy; + else { + // return the copy of the anonymous member + return table[globalLevel]->find(shared->getName()); + } + } + + // Normal find of a symbol, that can optionally say whether the symbol was found + // at a built-in level or the current top-scope level. + TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0) + { + int level = currentLevel(); + TSymbol* symbol; + int thisDepth = 0; + do { + if (table[level]->isThisLevel()) + ++thisDepth; + symbol = table[level]->find(name); + --level; + } while (symbol == nullptr && level >= 0); + level++; + if (builtIn) + *builtIn = isBuiltInLevel(level); + if (currentScope) + *currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals + if (thisDepthP != nullptr) { + if (! table[level]->isThisLevel()) + thisDepth = 0; + *thisDepthP = thisDepth; + } + + return symbol; + } + + // Find of a symbol that returns how many layers deep of nested + // structures-with-member-functions ('this' scopes) deep the symbol was + // found in. + TSymbol* find(const TString& name, int& thisDepth) + { + int level = currentLevel(); + TSymbol* symbol; + thisDepth = 0; + do { + if (table[level]->isThisLevel()) + ++thisDepth; + symbol = table[level]->find(name); + --level; + } while (symbol == 0 && level >= 0); + + if (! table[level + 1]->isThisLevel()) + thisDepth = 0; + + return symbol; + } + + bool isFunctionNameVariable(const TString& name) const + { + if (separateNameSpaces) + return false; + + int level = currentLevel(); + do { + bool variable; + bool found = table[level]->findFunctionVariableName(name, variable); + if (found) + return variable; + --level; + } while (level >= 0); + + return false; + } + + void findFunctionNameList(const TString& name, TVector& list, bool& builtIn) + { + // For user levels, return the set found in the first scope with a match + builtIn = false; + int level = currentLevel(); + do { + table[level]->findFunctionNameList(name, list); + --level; + } while (list.empty() && level >= globalLevel); + + if (! list.empty()) + return; + + // Gather across all built-in levels; they don't hide each other + builtIn = true; + do { + table[level]->findFunctionNameList(name, list); + --level; + } while (level >= 0); + } + + void relateToOperator(const char* name, TOperator op) + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->relateToOperator(name, op); + } + + void setFunctionExtensions(const char* name, int num, const char* const extensions[]) + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->setFunctionExtensions(name, num, extensions); + } + + void setVariableExtensions(const char* name, int numExts, const char* const extensions[]) + { + TSymbol* symbol = find(TString(name)); + if (symbol == nullptr) + return; + + symbol->setExtensions(numExts, extensions); + } + + void setVariableExtensions(const char* blockName, const char* name, int numExts, const char* const extensions[]) + { + TSymbol* symbol = find(TString(blockName)); + if (symbol == nullptr) + return; + TVariable* variable = symbol->getAsVariable(); + assert(variable != nullptr); + + const TTypeList& structure = *variable->getAsVariable()->getType().getStruct(); + for (int member = 0; member < (int)structure.size(); ++member) { + if (structure[member].type->getFieldName().compare(name) == 0) { + variable->setMemberExtensions(member, numExts, extensions); + return; + } + } + } + + int getMaxSymbolId() { return uniqueId; } +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + void dump(TInfoSink& infoSink, bool complete = false) const; +#endif + void copyTable(const TSymbolTable& copyOf); + + void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); } + + void readOnly() + { + for (unsigned int level = 0; level < table.size(); ++level) + table[level]->readOnly(); + } + +protected: + TSymbolTable(TSymbolTable&); + TSymbolTable& operator=(TSymbolTableLevel&); + + int currentLevel() const { return static_cast(table.size()) - 1; } + + std::vector table; + int uniqueId; // for unique identification in code generation + bool noBuiltInRedeclarations; + bool separateNameSpaces; + unsigned int adoptedLevels; +}; + +} // end namespace glslang + +#endif // _SYMBOL_TABLE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/Versions.cpp b/src/third_party/glslang/glslang/MachineIndependent/Versions.cpp new file mode 100644 index 0000000..896fd5a --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Versions.cpp @@ -0,0 +1,1285 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2020 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Help manage multiple profiles, versions, extensions etc. +// +// These don't return error codes, as the presumption is parsing will +// always continue as if the tested feature were enabled, and thus there +// is no error recovery needed. +// + +// +// HOW TO add a feature enabled by an extension. +// +// To add a new hypothetical "Feature F" to the front end, where an extension +// "XXX_extension_X" can be used to enable the feature, do the following. +// +// OVERVIEW: Specific features are what are error-checked for, not +// extensions: A specific Feature F might be enabled by an extension, or a +// particular version in a particular profile, or a stage, or combinations, etc. +// +// The basic mechanism is to use the following to "declare" all the things that +// enable/disable Feature F, in a code path that implements Feature F: +// +// requireProfile() +// profileRequires() +// requireStage() +// checkDeprecated() +// requireNotRemoved() +// requireExtensions() +// extensionRequires() +// +// Typically, only the first two calls are needed. They go into a code path that +// implements Feature F, and will log the proper error/warning messages. Parsing +// will then always continue as if the tested feature was enabled. +// +// There is typically no if-testing or conditional parsing, just insertion of the calls above. +// However, if symbols specific to the extension are added (step 5), they will +// only be added under tests that the minimum version and profile are present. +// +// 1) Add a symbol name for the extension string at the bottom of Versions.h: +// +// const char* const XXX_extension_X = "XXX_extension_X"; +// +// 2) Add extension initialization to TParseVersions::initializeExtensionBehavior(), +// the first function below and optionally a entry to extensionData for additional +// error checks: +// +// extensionBehavior[XXX_extension_X] = EBhDisable; +// (Optional) exts[] = {XXX_extension_X, EShTargetSpv_1_4} +// +// 3) Add any preprocessor directives etc. in the next function, TParseVersions::getPreamble(): +// +// "#define XXX_extension_X 1\n" +// +// The new-line is important, as that ends preprocess tokens. +// +// 4) Insert a profile check in the feature's path (unless all profiles support the feature, +// for some version level). That is, call requireProfile() to constrain the profiles, e.g.: +// +// // ... in a path specific to Feature F... +// requireProfile(loc, +// ECoreProfile | ECompatibilityProfile, +// "Feature F"); +// +// 5) For each profile that supports the feature, insert version/extension checks: +// +// The mostly likely scenario is that Feature F can only be used with a +// particular profile if XXX_extension_X is present or the version is +// high enough that the core specification already incorporated it. +// +// // following the requireProfile() call... +// profileRequires(loc, +// ECoreProfile | ECompatibilityProfile, +// 420, // 0 if no version incorporated the feature into the core spec. +// XXX_extension_X, // can be a list of extensions that all add the feature +// "Feature F Description"); +// +// This allows the feature if either A) one of the extensions is enabled or +// B) the version is high enough. If no version yet incorporates the feature +// into core, pass in 0. +// +// This can be called multiple times, if different profiles support the +// feature starting at different version numbers or with different +// extensions. +// +// This must be called for each profile allowed by the initial call to requireProfile(). +// +// Profiles are all masks, which can be "or"-ed together. +// +// ENoProfile +// ECoreProfile +// ECompatibilityProfile +// EEsProfile +// +// The ENoProfile profile is only for desktop, before profiles showed up in version 150; +// All other #version with no profile default to either es or core, and so have profiles. +// +// You can select all but a particular profile using ~. The following basically means "desktop": +// +// ~EEsProfile +// +// 6) If built-in symbols are added by the extension, add them in Initialize.cpp: Their use +// will be automatically error checked against the extensions enabled at that moment. +// see the comment at the top of Initialize.cpp for where to put them. Establish them at +// the earliest release that supports the extension. Then, tag them with the +// set of extensions that both enable them and are necessary, given the version of the symbol +// table. (There is a different symbol table for each version.) +// +// 7) If the extension has additional requirements like minimum SPIR-V version required, add them +// to extensionRequires() + +#include "parseVersions.h" +#include "localintermediate.h" + +namespace glslang { + +#ifndef GLSLANG_WEB + +// +// Initialize all extensions, almost always to 'disable', as once their features +// are incorporated into a core version, their features are supported through allowing that +// core version, not through a pseudo-enablement of the extension. +// +void TParseVersions::initializeExtensionBehavior() +{ + typedef struct { + const char *const extensionName; + EShTargetLanguageVersion minSpvVersion; + } extensionData; + + const extensionData exts[] = { {E_GL_EXT_ray_tracing, EShTargetSpv_1_4} }; + + for (size_t ii = 0; ii < sizeof(exts) / sizeof(exts[0]); ii++) { + // Add only extensions which require > spv1.0 to save space in map + if (exts[ii].minSpvVersion > EShTargetSpv_1_0) { + extensionMinSpv[E_GL_EXT_ray_tracing] = exts[ii].minSpvVersion; + } + } + + extensionBehavior[E_GL_OES_texture_3D] = EBhDisable; + extensionBehavior[E_GL_OES_standard_derivatives] = EBhDisable; + extensionBehavior[E_GL_EXT_frag_depth] = EBhDisable; + extensionBehavior[E_GL_OES_EGL_image_external] = EBhDisable; + extensionBehavior[E_GL_OES_EGL_image_external_essl3] = EBhDisable; + extensionBehavior[E_GL_EXT_YUV_target] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_texture_lod] = EBhDisable; + extensionBehavior[E_GL_EXT_shadow_samplers] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_rectangle] = EBhDisable; + extensionBehavior[E_GL_3DL_array_objects] = EBhDisable; + extensionBehavior[E_GL_ARB_shading_language_420pack] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_gather] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader5] = EBhDisablePartial; + extensionBehavior[E_GL_ARB_separate_shader_objects] = EBhDisable; + extensionBehavior[E_GL_ARB_compute_shader] = EBhDisable; + extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_multisample] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable; + extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable; + extensionBehavior[E_GL_ARB_explicit_uniform_location] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_group_vote] = EBhDisable; + extensionBehavior[E_GL_ARB_derivative_control] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_texture_image_samples] = EBhDisable; + extensionBehavior[E_GL_ARB_viewport_array] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader_int64] = EBhDisable; + extensionBehavior[E_GL_ARB_gpu_shader_fp64] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_ballot] = EBhDisable; + extensionBehavior[E_GL_ARB_sparse_texture2] = EBhDisable; + extensionBehavior[E_GL_ARB_sparse_texture_clamp] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_stencil_export] = EBhDisable; +// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members + extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable; + extensionBehavior[E_GL_ARB_fragment_shader_interlock] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_clock] = EBhDisable; + extensionBehavior[E_GL_ARB_uniform_buffer_object] = EBhDisable; + extensionBehavior[E_GL_ARB_sample_shading] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_bit_encoding] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_image_size] = EBhDisable; + extensionBehavior[E_GL_ARB_shader_storage_buffer_object] = EBhDisable; + extensionBehavior[E_GL_ARB_shading_language_packing] = EBhDisable; + extensionBehavior[E_GL_ARB_texture_query_lod] = EBhDisable; + extensionBehavior[E_GL_ARB_vertex_attrib_64bit] = EBhDisable; + + extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_arithmetic] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_ballot] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_shuffle] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_shuffle_relative] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_clustered] = EBhDisable; + extensionBehavior[E_GL_KHR_shader_subgroup_quad] = EBhDisable; + extensionBehavior[E_GL_KHR_memory_scope_semantics] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_atomic_int64] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_non_constant_global_initializers] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_image_load_formatted] = EBhDisable; + extensionBehavior[E_GL_EXT_post_depth_coverage] = EBhDisable; + extensionBehavior[E_GL_EXT_control_flow_attributes] = EBhDisable; + extensionBehavior[E_GL_EXT_nonuniform_qualifier] = EBhDisable; + extensionBehavior[E_GL_EXT_samplerless_texture_functions] = EBhDisable; + extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable; + extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable; + extensionBehavior[E_GL_EXT_buffer_reference_uvec2] = EBhDisable; + extensionBehavior[E_GL_EXT_demote_to_helper_invocation] = EBhDisable; + extensionBehavior[E_GL_EXT_debug_printf] = EBhDisable; + + extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable; + + // #line and #include + extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable; + extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable; + + extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable; + extensionBehavior[E_GL_AMD_gcn_shader] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_half_float] = EBhDisable; + extensionBehavior[E_GL_AMD_texture_gather_bias_lod] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_int16] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable; + extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable; + extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable; + + extensionBehavior[E_GL_INTEL_shader_integer_functions2] = EBhDisable; + + extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable; + extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable; + extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable; + extensionBehavior[E_GL_NV_stereo_view_rendering] = EBhDisable; + extensionBehavior[E_GL_NVX_multiview_per_view_attributes] = EBhDisable; + extensionBehavior[E_GL_NV_shader_atomic_int64] = EBhDisable; + extensionBehavior[E_GL_NV_conservative_raster_underestimation] = EBhDisable; + extensionBehavior[E_GL_NV_shader_noperspective_interpolation] = EBhDisable; + extensionBehavior[E_GL_NV_shader_subgroup_partitioned] = EBhDisable; + extensionBehavior[E_GL_NV_shading_rate_image] = EBhDisable; + extensionBehavior[E_GL_NV_ray_tracing] = EBhDisable; + extensionBehavior[E_GL_NV_fragment_shader_barycentric] = EBhDisable; + extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable; + extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable; + extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable; + + extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable; + extensionBehavior[E_GL_NV_shader_sm_builtins] = EBhDisable; + extensionBehavior[E_GL_NV_integer_cooperative_matrix] = EBhDisable; + + // AEP + extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable; + extensionBehavior[E_GL_KHR_blend_equation_advanced] = EBhDisable; + extensionBehavior[E_GL_OES_sample_variables] = EBhDisable; + extensionBehavior[E_GL_OES_shader_image_atomic] = EBhDisable; + extensionBehavior[E_GL_OES_shader_multisample_interpolation] = EBhDisable; + extensionBehavior[E_GL_OES_texture_storage_multisample_2d_array] = EBhDisable; + extensionBehavior[E_GL_EXT_geometry_shader] = EBhDisable; + extensionBehavior[E_GL_EXT_geometry_point_size] = EBhDisable; + extensionBehavior[E_GL_EXT_gpu_shader5] = EBhDisable; + extensionBehavior[E_GL_EXT_primitive_bounding_box] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_io_blocks] = EBhDisable; + extensionBehavior[E_GL_EXT_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_EXT_tessellation_point_size] = EBhDisable; + extensionBehavior[E_GL_EXT_texture_buffer] = EBhDisable; + extensionBehavior[E_GL_EXT_texture_cube_map_array] = EBhDisable; + + // OES matching AEP + extensionBehavior[E_GL_OES_geometry_shader] = EBhDisable; + extensionBehavior[E_GL_OES_geometry_point_size] = EBhDisable; + extensionBehavior[E_GL_OES_gpu_shader5] = EBhDisable; + extensionBehavior[E_GL_OES_primitive_bounding_box] = EBhDisable; + extensionBehavior[E_GL_OES_shader_io_blocks] = EBhDisable; + extensionBehavior[E_GL_OES_tessellation_shader] = EBhDisable; + extensionBehavior[E_GL_OES_tessellation_point_size] = EBhDisable; + extensionBehavior[E_GL_OES_texture_buffer] = EBhDisable; + extensionBehavior[E_GL_OES_texture_cube_map_array] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_integer_mix] = EBhDisable; + + // EXT extensions + extensionBehavior[E_GL_EXT_device_group] = EBhDisable; + extensionBehavior[E_GL_EXT_multiview] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_realtime_clock] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_tracing] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_query] = EBhDisable; + extensionBehavior[E_GL_EXT_ray_flags_primitive_culling] = EBhDisable; + extensionBehavior[E_GL_EXT_blend_func_extended] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_implicit_conversions] = EBhDisable; + + // OVR extensions + extensionBehavior[E_GL_OVR_multiview] = EBhDisable; + extensionBehavior[E_GL_OVR_multiview2] = EBhDisable; + + // explicit types + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int8] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int32] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_int64] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable; + + // subgroup extended types + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int8] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int64] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_float16] = EBhDisable; + extensionBehavior[E_GL_EXT_shader_atomic_float] = EBhDisable; +} + +#endif // GLSLANG_WEB + +// Get code that is not part of a shared symbol table, is specific to this shader, +// or needed by the preprocessor (which does not use a shared symbol table). +void TParseVersions::getPreamble(std::string& preamble) +{ + if (isEsProfile()) { + preamble = + "#define GL_ES 1\n" + "#define GL_FRAGMENT_PRECISION_HIGH 1\n" +#ifdef GLSLANG_WEB + ; +#else + "#define GL_OES_texture_3D 1\n" + "#define GL_OES_standard_derivatives 1\n" + "#define GL_EXT_frag_depth 1\n" + "#define GL_OES_EGL_image_external 1\n" + "#define GL_OES_EGL_image_external_essl3 1\n" + "#define GL_EXT_YUV_target 1\n" + "#define GL_EXT_shader_texture_lod 1\n" + "#define GL_EXT_shadow_samplers 1\n" + + // AEP + "#define GL_ANDROID_extension_pack_es31a 1\n" + "#define GL_OES_sample_variables 1\n" + "#define GL_OES_shader_image_atomic 1\n" + "#define GL_OES_shader_multisample_interpolation 1\n" + "#define GL_OES_texture_storage_multisample_2d_array 1\n" + "#define GL_EXT_geometry_shader 1\n" + "#define GL_EXT_geometry_point_size 1\n" + "#define GL_EXT_gpu_shader5 1\n" + "#define GL_EXT_primitive_bounding_box 1\n" + "#define GL_EXT_shader_io_blocks 1\n" + "#define GL_EXT_tessellation_shader 1\n" + "#define GL_EXT_tessellation_point_size 1\n" + "#define GL_EXT_texture_buffer 1\n" + "#define GL_EXT_texture_cube_map_array 1\n" + "#define GL_EXT_shader_implicit_conversions 1\n" + "#define GL_EXT_shader_integer_mix 1\n" + "#define GL_EXT_blend_func_extended 1\n" + + // OES matching AEP + "#define GL_OES_geometry_shader 1\n" + "#define GL_OES_geometry_point_size 1\n" + "#define GL_OES_gpu_shader5 1\n" + "#define GL_OES_primitive_bounding_box 1\n" + "#define GL_OES_shader_io_blocks 1\n" + "#define GL_OES_tessellation_shader 1\n" + "#define GL_OES_tessellation_point_size 1\n" + "#define GL_OES_texture_buffer 1\n" + "#define GL_OES_texture_cube_map_array 1\n" + "#define GL_EXT_shader_non_constant_global_initializers 1\n" + ; + + if (isEsProfile() && version >= 300) { + preamble += "#define GL_NV_shader_noperspective_interpolation 1\n"; + } + + } else { + preamble = + "#define GL_FRAGMENT_PRECISION_HIGH 1\n" + "#define GL_ARB_texture_rectangle 1\n" + "#define GL_ARB_shading_language_420pack 1\n" + "#define GL_ARB_texture_gather 1\n" + "#define GL_ARB_gpu_shader5 1\n" + "#define GL_ARB_separate_shader_objects 1\n" + "#define GL_ARB_compute_shader 1\n" + "#define GL_ARB_tessellation_shader 1\n" + "#define GL_ARB_enhanced_layouts 1\n" + "#define GL_ARB_texture_cube_map_array 1\n" + "#define GL_ARB_texture_multisample 1\n" + "#define GL_ARB_shader_texture_lod 1\n" + "#define GL_ARB_explicit_attrib_location 1\n" + "#define GL_ARB_explicit_uniform_location 1\n" + "#define GL_ARB_shader_image_load_store 1\n" + "#define GL_ARB_shader_atomic_counters 1\n" + "#define GL_ARB_shader_draw_parameters 1\n" + "#define GL_ARB_shader_group_vote 1\n" + "#define GL_ARB_derivative_control 1\n" + "#define GL_ARB_shader_texture_image_samples 1\n" + "#define GL_ARB_viewport_array 1\n" + "#define GL_ARB_gpu_shader_int64 1\n" + "#define GL_ARB_gpu_shader_fp64 1\n" + "#define GL_ARB_shader_ballot 1\n" + "#define GL_ARB_sparse_texture2 1\n" + "#define GL_ARB_sparse_texture_clamp 1\n" + "#define GL_ARB_shader_stencil_export 1\n" + "#define GL_ARB_sample_shading 1\n" + "#define GL_ARB_shader_image_size 1\n" + "#define GL_ARB_shading_language_packing 1\n" +// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members + "#define GL_ARB_post_depth_coverage 1\n" + "#define GL_ARB_fragment_shader_interlock 1\n" + "#define GL_ARB_uniform_buffer_object 1\n" + "#define GL_ARB_shader_bit_encoding 1\n" + "#define GL_ARB_shader_storage_buffer_object 1\n" + "#define GL_ARB_texture_query_lod 1\n" + "#define GL_ARB_vertex_attrib_64bit 1\n" + "#define GL_EXT_shader_non_constant_global_initializers 1\n" + "#define GL_EXT_shader_image_load_formatted 1\n" + "#define GL_EXT_post_depth_coverage 1\n" + "#define GL_EXT_control_flow_attributes 1\n" + "#define GL_EXT_nonuniform_qualifier 1\n" + "#define GL_EXT_shader_16bit_storage 1\n" + "#define GL_EXT_shader_8bit_storage 1\n" + "#define GL_EXT_samplerless_texture_functions 1\n" + "#define GL_EXT_scalar_block_layout 1\n" + "#define GL_EXT_fragment_invocation_density 1\n" + "#define GL_EXT_buffer_reference 1\n" + "#define GL_EXT_buffer_reference2 1\n" + "#define GL_EXT_buffer_reference_uvec2 1\n" + "#define GL_EXT_demote_to_helper_invocation 1\n" + "#define GL_EXT_debug_printf 1\n" + + // GL_KHR_shader_subgroup + "#define GL_KHR_shader_subgroup_basic 1\n" + "#define GL_KHR_shader_subgroup_vote 1\n" + "#define GL_KHR_shader_subgroup_arithmetic 1\n" + "#define GL_KHR_shader_subgroup_ballot 1\n" + "#define GL_KHR_shader_subgroup_shuffle 1\n" + "#define GL_KHR_shader_subgroup_shuffle_relative 1\n" + "#define GL_KHR_shader_subgroup_clustered 1\n" + "#define GL_KHR_shader_subgroup_quad 1\n" + + "#define GL_EXT_shader_atomic_int64 1\n" + "#define GL_EXT_shader_realtime_clock 1\n" + "#define GL_EXT_ray_tracing 1\n" + "#define GL_EXT_ray_query 1\n" + "#define GL_EXT_ray_flags_primitive_culling 1\n" + + "#define GL_AMD_shader_ballot 1\n" + "#define GL_AMD_shader_trinary_minmax 1\n" + "#define GL_AMD_shader_explicit_vertex_parameter 1\n" + "#define GL_AMD_gcn_shader 1\n" + "#define GL_AMD_gpu_shader_half_float 1\n" + "#define GL_AMD_texture_gather_bias_lod 1\n" + "#define GL_AMD_gpu_shader_int16 1\n" + "#define GL_AMD_shader_image_load_store_lod 1\n" + "#define GL_AMD_shader_fragment_mask 1\n" + "#define GL_AMD_gpu_shader_half_float_fetch 1\n" + + "#define GL_INTEL_shader_integer_functions2 1\n" + + "#define GL_NV_sample_mask_override_coverage 1\n" + "#define GL_NV_geometry_shader_passthrough 1\n" + "#define GL_NV_viewport_array2 1\n" + "#define GL_NV_shader_atomic_int64 1\n" + "#define GL_NV_conservative_raster_underestimation 1\n" + "#define GL_NV_shader_subgroup_partitioned 1\n" + "#define GL_NV_shading_rate_image 1\n" + "#define GL_NV_ray_tracing 1\n" + "#define GL_NV_fragment_shader_barycentric 1\n" + "#define GL_NV_compute_shader_derivatives 1\n" + "#define GL_NV_shader_texture_footprint 1\n" + "#define GL_NV_mesh_shader 1\n" + "#define GL_NV_cooperative_matrix 1\n" + "#define GL_NV_integer_cooperative_matrix 1\n" + + "#define GL_EXT_shader_explicit_arithmetic_types 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int32 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_int64 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n" + "#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n" + + "#define GL_EXT_shader_subgroup_extended_types_int8 1\n" + "#define GL_EXT_shader_subgroup_extended_types_int16 1\n" + "#define GL_EXT_shader_subgroup_extended_types_int64 1\n" + "#define GL_EXT_shader_subgroup_extended_types_float16 1\n" + + "#define GL_EXT_shader_atomic_float 1\n" + ; + + if (version >= 150) { + // define GL_core_profile and GL_compatibility_profile + preamble += "#define GL_core_profile 1\n"; + + if (profile == ECompatibilityProfile) + preamble += "#define GL_compatibility_profile 1\n"; + } +#endif // GLSLANG_WEB + } + +#ifndef GLSLANG_WEB + if ((!isEsProfile() && version >= 140) || + (isEsProfile() && version >= 310)) { + preamble += + "#define GL_EXT_device_group 1\n" + "#define GL_EXT_multiview 1\n" + "#define GL_NV_shader_sm_builtins 1\n" + ; + } + + if (version >= 300 /* both ES and non-ES */) { + preamble += + "#define GL_OVR_multiview 1\n" + "#define GL_OVR_multiview2 1\n" + ; + } + + // #line and #include + preamble += + "#define GL_GOOGLE_cpp_style_line_directive 1\n" + "#define GL_GOOGLE_include_directive 1\n" + "#define GL_KHR_blend_equation_advanced 1\n" + ; +#endif + + // #define VULKAN XXXX + const int numberBufSize = 12; + char numberBuf[numberBufSize]; + if (spvVersion.vulkanGlsl > 0) { + preamble += "#define VULKAN "; + snprintf(numberBuf, numberBufSize, "%d", spvVersion.vulkanGlsl); + preamble += numberBuf; + preamble += "\n"; + } + +#ifndef GLSLANG_WEB + // #define GL_SPIRV XXXX + if (spvVersion.openGl > 0) { + preamble += "#define GL_SPIRV "; + snprintf(numberBuf, numberBufSize, "%d", spvVersion.openGl); + preamble += numberBuf; + preamble += "\n"; + } +#endif +} + +// +// Map from stage enum to externally readable text name. +// +const char* StageName(EShLanguage stage) +{ + switch(stage) { + case EShLangVertex: return "vertex"; + case EShLangFragment: return "fragment"; + case EShLangCompute: return "compute"; +#ifndef GLSLANG_WEB + case EShLangTessControl: return "tessellation control"; + case EShLangTessEvaluation: return "tessellation evaluation"; + case EShLangGeometry: return "geometry"; + case EShLangRayGen: return "ray-generation"; + case EShLangIntersect: return "intersection"; + case EShLangAnyHit: return "any-hit"; + case EShLangClosestHit: return "closest-hit"; + case EShLangMiss: return "miss"; + case EShLangCallable: return "callable"; + case EShLangMeshNV: return "mesh"; + case EShLangTaskNV: return "task"; +#endif + default: return "unknown stage"; + } +} + +// +// When to use requireStage() +// +// If only some stages support a feature. +// +// Operation: If the current stage is not present, give an error message. +// +void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguageMask languageMask, const char* featureDesc) +{ + if (((1 << language) & languageMask) == 0) + error(loc, "not supported in this stage:", featureDesc, StageName(language)); +} + +// If only one stage supports a feature, this can be called. But, all supporting stages +// must be specified with one call. +void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, const char* featureDesc) +{ + requireStage(loc, static_cast(1 << stage), featureDesc); +} + +#ifndef GLSLANG_WEB +// +// When to use requireProfile(): +// +// Use if only some profiles support a feature. However, if within a profile the feature +// is version or extension specific, follow this call with calls to profileRequires(). +// +// Operation: If the current profile is not one of the profileMask, +// give an error message. +// +void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc) +{ + if (! (profile & profileMask)) + error(loc, "not supported with this profile:", featureDesc, ProfileName(profile)); +} + +// +// When to use profileRequires(): +// +// If a set of profiles have the same requirements for what version or extensions +// are needed to support a feature. +// +// It must be called for each profile that needs protection. Use requireProfile() first +// to reduce that set of profiles. +// +// Operation: Will issue warnings/errors based on the current profile, version, and extension +// behaviors. It only checks extensions when the current profile is one of the profileMask. +// +// A minVersion of 0 means no version of the profileMask support this in core, +// the extension must be present. +// + +// entry point that takes multiple extensions +void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc) +{ + if (profile & profileMask) { + bool okay = minVersion > 0 && version >= minVersion; +#ifndef GLSLANG_WEB + for (int i = 0; i < numExtensions; ++i) { + switch (getExtensionBehavior(extensions[i])) { + case EBhWarn: + infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc); + // fall through + case EBhRequire: + case EBhEnable: + okay = true; + break; + default: break; // some compilers want this + } + } +#endif + if (! okay) + error(loc, "not supported for this version or the enabled extensions", featureDesc, ""); + } +} + +// entry point for the above that takes a single extension +void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc) +{ + profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc); +} + +void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc) +{ + error(loc, "feature not yet implemented", featureDesc, ""); +} + +// +// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether +// a future compatibility context is being use. +// +void TParseVersions::checkDeprecated(const TSourceLoc& loc, int profileMask, int depVersion, const char* featureDesc) +{ + if (profile & profileMask) { + if (version >= depVersion) { + if (forwardCompatible) + error(loc, "deprecated, may be removed in future release", featureDesc, ""); + else if (! suppressWarnings()) + infoSink.info.message(EPrefixWarning, (TString(featureDesc) + " deprecated in version " + + String(depVersion) + "; may be removed in future release").c_str(), loc); + } + } +} + +// +// Within a set of profiles, see if a feature has now been removed and if so, give an error. +// The version argument is the first version no longer having the feature. +// +void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, int removedVersion, const char* featureDesc) +{ + if (profile & profileMask) { + if (version >= removedVersion) { + const int maxSize = 60; + char buf[maxSize]; + snprintf(buf, maxSize, "%s profile; removed in version %d", ProfileName(profile), removedVersion); + error(loc, "no longer supported in", featureDesc, buf); + } + } +} + +// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false. +// Warns appropriately if the requested behavior of an extension is "warn". +bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc) +{ + // First, see if any of the extensions are enabled + for (int i = 0; i < numExtensions; ++i) { + TExtensionBehavior behavior = getExtensionBehavior(extensions[i]); + if (behavior == EBhEnable || behavior == EBhRequire) + return true; + } + + // See if any extensions want to give a warning on use; give warnings for all such extensions + bool warned = false; + for (int i = 0; i < numExtensions; ++i) { + TExtensionBehavior behavior = getExtensionBehavior(extensions[i]); + if (behavior == EBhDisable && relaxedErrors()) { + infoSink.info.message(EPrefixWarning, "The following extension must be enabled to use this feature:", loc); + behavior = EBhWarn; + } + if (behavior == EBhWarn) { + infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc); + warned = true; + } + } + if (warned) + return true; + return false; +} + +// +// Use when there are no profile/version to check, it's just an error if one of the +// extensions is not present. +// +void TParseVersions::requireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], + const char* featureDesc) +{ + if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc)) + return; + + // If we get this far, give errors explaining what extensions are needed + if (numExtensions == 1) + error(loc, "required extension not requested:", featureDesc, extensions[0]); + else { + error(loc, "required extension not requested:", featureDesc, "Possible extensions include:"); + for (int i = 0; i < numExtensions; ++i) + infoSink.info.message(EPrefixNone, extensions[i]); + } +} + +// +// Use by preprocessor when there are no profile/version to check, it's just an error if one of the +// extensions is not present. +// +void TParseVersions::ppRequireExtensions(const TSourceLoc& loc, int numExtensions, const char* const extensions[], + const char* featureDesc) +{ + if (checkExtensionsRequested(loc, numExtensions, extensions, featureDesc)) + return; + + // If we get this far, give errors explaining what extensions are needed + if (numExtensions == 1) + ppError(loc, "required extension not requested:", featureDesc, extensions[0]); + else { + ppError(loc, "required extension not requested:", featureDesc, "Possible extensions include:"); + for (int i = 0; i < numExtensions; ++i) + infoSink.info.message(EPrefixNone, extensions[i]); + } +} + +TExtensionBehavior TParseVersions::getExtensionBehavior(const char* extension) +{ + auto iter = extensionBehavior.find(TString(extension)); + if (iter == extensionBehavior.end()) + return EBhMissing; + else + return iter->second; +} + +// Returns true if the given extension is set to enable, require, or warn. +bool TParseVersions::extensionTurnedOn(const char* const extension) +{ + switch (getExtensionBehavior(extension)) { + case EBhEnable: + case EBhRequire: + case EBhWarn: + return true; + default: + break; + } + return false; +} +// See if any of the extensions are set to enable, require, or warn. +bool TParseVersions::extensionsTurnedOn(int numExtensions, const char* const extensions[]) +{ + for (int i = 0; i < numExtensions; ++i) { + if (extensionTurnedOn(extensions[i])) + return true; + } + return false; +} + +// +// Change the current state of an extension's behavior. +// +void TParseVersions::updateExtensionBehavior(int line, const char* extension, const char* behaviorString) +{ + // Translate from text string of extension's behavior to an enum. + TExtensionBehavior behavior = EBhDisable; + if (! strcmp("require", behaviorString)) + behavior = EBhRequire; + else if (! strcmp("enable", behaviorString)) + behavior = EBhEnable; + else if (! strcmp("disable", behaviorString)) + behavior = EBhDisable; + else if (! strcmp("warn", behaviorString)) + behavior = EBhWarn; + else { + error(getCurrentLoc(), "behavior not supported:", "#extension", behaviorString); + return; + } + bool on = behavior != EBhDisable; + + // check if extension is used with correct shader stage + checkExtensionStage(getCurrentLoc(), extension); + + // check if extension has additional requirements + extensionRequires(getCurrentLoc(), extension ,behaviorString); + + // update the requested extension + updateExtensionBehavior(extension, behavior); + + // see if need to propagate to implicitly modified things + if (strcmp(extension, "GL_ANDROID_extension_pack_es31a") == 0) { + // to everything in AEP + updateExtensionBehavior(line, "GL_KHR_blend_equation_advanced", behaviorString); + updateExtensionBehavior(line, "GL_OES_sample_variables", behaviorString); + updateExtensionBehavior(line, "GL_OES_shader_image_atomic", behaviorString); + updateExtensionBehavior(line, "GL_OES_shader_multisample_interpolation", behaviorString); + updateExtensionBehavior(line, "GL_OES_texture_storage_multisample_2d_array", behaviorString); + updateExtensionBehavior(line, "GL_EXT_geometry_shader", behaviorString); + updateExtensionBehavior(line, "GL_EXT_gpu_shader5", behaviorString); + updateExtensionBehavior(line, "GL_EXT_primitive_bounding_box", behaviorString); + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + updateExtensionBehavior(line, "GL_EXT_tessellation_shader", behaviorString); + updateExtensionBehavior(line, "GL_EXT_texture_buffer", behaviorString); + updateExtensionBehavior(line, "GL_EXT_texture_cube_map_array", behaviorString); + } + // geometry to io_blocks + else if (strcmp(extension, "GL_EXT_geometry_shader") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_OES_geometry_shader") == 0) + updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString); + // tessellation to io_blocks + else if (strcmp(extension, "GL_EXT_tessellation_shader") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_OES_tessellation_shader") == 0) + updateExtensionBehavior(line, "GL_OES_shader_io_blocks", behaviorString); + else if (strcmp(extension, "GL_GOOGLE_include_directive") == 0) + updateExtensionBehavior(line, "GL_GOOGLE_cpp_style_line_directive", behaviorString); + // subgroup_* to subgroup_basic + else if (strcmp(extension, "GL_KHR_shader_subgroup_vote") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_arithmetic") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_ballot") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_shuffle_relative") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_clustered") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0) + updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString); + else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0 || + strcmp(extension, "GL_EXT_buffer_reference_uvec2") == 0) + updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString); + else if (strcmp(extension, "GL_NV_integer_cooperative_matrix") == 0) + updateExtensionBehavior(line, "GL_NV_cooperative_matrix", behaviorString); + // subgroup extended types to explicit types + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int8") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int8", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int16") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int16", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int64") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int64", behaviorString); + else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_float16") == 0) + updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_float16", behaviorString); + + // see if we need to update the numeric features + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_int8") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_int8, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_int16") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_int16, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_int32") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_int32, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_int64") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_int64, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_float16") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_float16, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_float32") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_float32, on); + else if (strcmp(extension, "GL_EXT_shader_explicit_arithmetic_types_float64") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_explicit_arithmetic_types_float64, on); + else if (strcmp(extension, "GL_EXT_shader_implicit_conversions") == 0) + intermediate.updateNumericFeature(TNumericFeatures::shader_implicit_conversions, on); + else if (strcmp(extension, "GL_ARB_gpu_shader_fp64") == 0) + intermediate.updateNumericFeature(TNumericFeatures::gpu_shader_fp64, on); + else if (strcmp(extension, "GL_AMD_gpu_shader_int16") == 0) + intermediate.updateNumericFeature(TNumericFeatures::gpu_shader_int16, on); + else if (strcmp(extension, "GL_AMD_gpu_shader_half_float") == 0) + intermediate.updateNumericFeature(TNumericFeatures::gpu_shader_half_float, on); +} + +void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior) +{ + // Update the current behavior + if (strcmp(extension, "all") == 0) { + // special case for the 'all' extension; apply it to every extension present + if (behavior == EBhRequire || behavior == EBhEnable) { + error(getCurrentLoc(), "extension 'all' cannot have 'require' or 'enable' behavior", "#extension", ""); + return; + } else { + for (auto iter = extensionBehavior.begin(); iter != extensionBehavior.end(); ++iter) + iter->second = behavior; + } + } else { + // Do the update for this single extension + auto iter = extensionBehavior.find(TString(extension)); + if (iter == extensionBehavior.end()) { + switch (behavior) { + case EBhRequire: + error(getCurrentLoc(), "extension not supported:", "#extension", extension); + break; + case EBhEnable: + case EBhWarn: + case EBhDisable: + warn(getCurrentLoc(), "extension not supported:", "#extension", extension); + break; + default: + assert(0 && "unexpected behavior"); + } + + return; + } else { + if (iter->second == EBhDisablePartial) + warn(getCurrentLoc(), "extension is only partially supported:", "#extension", extension); + if (behavior != EBhDisable) + intermediate.addRequestedExtension(extension); + iter->second = behavior; + } + } +} + +// Check if extension is used with correct shader stage. +void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension) +{ + // GL_NV_mesh_shader extension is only allowed in task/mesh shaders + if (strcmp(extension, "GL_NV_mesh_shader") == 0) { + requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask), + "#extension GL_NV_mesh_shader"); + profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader"); + profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader"); + } +} + +// Check if extension has additional requirements +void TParseVersions::extensionRequires(const TSourceLoc &loc, const char * const extension, const char *behaviorString) +{ + bool isEnabled = false; + if (!strcmp("require", behaviorString)) + isEnabled = true; + else if (!strcmp("enable", behaviorString)) + isEnabled = true; + + if (isEnabled) { + unsigned int minSpvVersion = 0; + auto iter = extensionMinSpv.find(TString(extension)); + if (iter != extensionMinSpv.end()) + minSpvVersion = iter->second; + requireSpv(loc, extension, minSpvVersion); + } +} + +// Call for any operation needing full GLSL integer data-type support. +void TParseVersions::fullIntegerCheck(const TSourceLoc& loc, const char* op) +{ + profileRequires(loc, ENoProfile, 130, nullptr, op); + profileRequires(loc, EEsProfile, 300, nullptr, op); +} + +// Call for any operation needing GLSL double data-type support. +void TParseVersions::doubleCheck(const TSourceLoc& loc, const char* op) +{ + + //requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + if (language == EShLangVertex) { + const char* const f64_Extensions[] = {E_GL_ARB_gpu_shader_fp64, E_GL_ARB_vertex_attrib_64bit}; + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, 2, f64_Extensions, op); + } else + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader_fp64, op); +} + +// Call for any operation needing GLSL float16 data-type support. +void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +bool TParseVersions::float16Arithmetic() +{ + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +bool TParseVersions::int16Arithmetic() +{ + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +bool TParseVersions::int8Arithmetic() +{ + const char* const extensions[] = { + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions); +} + +void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) +{ + TString combined; + combined = op; + combined += ": "; + combined += featureDesc; + + const char* const extensions[] = { + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str()); +} + +void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_half_float, + E_GL_EXT_shader_16bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +// Call for any operation needing GLSL float32 data-type support. +void TParseVersions::explicitFloat32Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float32}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL float64 data-type support. +void TParseVersions::explicitFloat64Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_float64}; + requireExtensions(loc, 2, extensions, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +// Call for any operation needing GLSL explicit int8 data-type support. +void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL float16 opaque-type support +void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + requireExtensions(loc, 1, &E_GL_AMD_gpu_shader_half_float_fetch, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +// Call for any operation needing GLSL explicit int16 data-type support. +void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_16bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::int8ScalarVectorCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[] = { + E_GL_EXT_shader_8bit_storage, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int8}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +// Call for any operation needing GLSL explicit int32 data-type support. +void TParseVersions::explicitInt32Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[2] = {E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int32}; + requireExtensions(loc, 2, extensions, op); + } +} + +// Call for any operation needing GLSL 64-bit integer data-type support. +void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (! builtIn) { + const char* const extensions[3] = {E_GL_ARB_gpu_shader_int64, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int64}; + requireExtensions(loc, 3, extensions, op); + requireProfile(loc, ECoreProfile | ECompatibilityProfile, op); + profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op); + } +} + +void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = {E_GL_NV_cooperative_matrix}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} + +void TParseVersions::intcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn) +{ + if (!builtIn) { + const char* const extensions[] = {E_GL_NV_integer_cooperative_matrix}; + requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op); + } +} +#endif // GLSLANG_WEB +// Call for any operation removed because SPIR-V is in use. +void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op) +{ + if (spvVersion.spv != 0) + error(loc, "not allowed when generating SPIR-V", op, ""); +} + +// Call for any operation removed because Vulkan SPIR-V is being generated. +void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op) +{ + if (spvVersion.vulkan > 0) + error(loc, "not allowed when using GLSL for Vulkan", op, ""); +} + +// Call for any operation that requires Vulkan. +void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op) +{ +#ifndef GLSLANG_WEB + if (spvVersion.vulkan == 0) + error(loc, "only allowed when using GLSL for Vulkan", op, ""); +#endif +} + +// Call for any operation that requires SPIR-V. +void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op) +{ +#ifndef GLSLANG_WEB + if (spvVersion.spv == 0) + error(loc, "only allowed when generating SPIR-V", op, ""); +#endif +} +void TParseVersions::requireSpv(const TSourceLoc& loc, const char *op, unsigned int version) +{ +#ifndef GLSLANG_WEB + if (spvVersion.spv < version) + error(loc, "not supported for current targeted SPIR-V version", op, ""); +#endif +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/Versions.h b/src/third_party/glslang/glslang/MachineIndependent/Versions.h new file mode 100644 index 0000000..f52f605 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/Versions.h @@ -0,0 +1,334 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _VERSIONS_INCLUDED_ +#define _VERSIONS_INCLUDED_ + +#define LAST_ELEMENT_MARKER(x) x + +// +// Help manage multiple profiles, versions, extensions etc. +// + +// +// Profiles are set up for masking operations, so queries can be done on multiple +// profiles at the same time. +// +// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible +// defects from mixing the two different forms. +// +typedef enum : unsigned { + EBadProfile = 0, + ENoProfile = (1 << 0), // only for desktop, before profiles showed up + ECoreProfile = (1 << 1), + ECompatibilityProfile = (1 << 2), + EEsProfile = (1 << 3), + LAST_ELEMENT_MARKER(EProfileCount), +} EProfile; + +namespace glslang { + +// +// Map from profile enum to externally readable text name. +// +inline const char* ProfileName(EProfile profile) +{ + switch (profile) { + case ENoProfile: return "none"; + case ECoreProfile: return "core"; + case ECompatibilityProfile: return "compatibility"; + case EEsProfile: return "es"; + default: return "unknown profile"; + } +} + +// +// What source rules, validation rules, target language, etc. are needed or +// desired for SPIR-V? +// +// 0 means a target or rule set is not enabled (ignore rules from that entity). +// Non-0 means to apply semantic rules arising from that version of its rule set. +// The union of all requested rule sets will be applied. +// +struct SpvVersion { + SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0) {} + unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header + int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX" + int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use + int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX" +}; + +// +// The behaviors from the GLSL "#extension extension_name : behavior" +// +typedef enum { + EBhMissing = 0, + EBhRequire, + EBhEnable, + EBhWarn, + EBhDisable, + EBhDisablePartial // use as initial state of an extension that is only partially implemented +} TExtensionBehavior; + +// +// Symbolic names for extensions. Strings may be directly used when calling the +// functions, but better to have the compiler do spelling checks. +// +const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D"; +const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives"; +const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth"; +const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external"; +const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3"; +const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target"; +const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod"; +const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers"; + +const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle"; +const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects"; +const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack"; +const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather"; +const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5"; +const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects"; +const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader"; +const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader"; +const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts"; +const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array"; +const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample"; +const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod"; +const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location"; +const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location"; +const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store"; +const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters"; +const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters"; +const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote"; +const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control"; +const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples"; +const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array"; +const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64"; +const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64"; +const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot"; +const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2"; +const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp"; +const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export"; +// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members +const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage"; +const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array"; +const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock"; +const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock"; +const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object"; +const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading"; +const char* const E_GL_ARB_shader_bit_encoding = "GL_ARB_shader_bit_encoding"; +const char* const E_GL_ARB_shader_image_size = "GL_ARB_shader_image_size"; +const char* const E_GL_ARB_shader_storage_buffer_object = "GL_ARB_shader_storage_buffer_object"; +const char* const E_GL_ARB_shading_language_packing = "GL_ARB_shading_language_packing"; +const char* const E_GL_ARB_texture_query_lod = "GL_ARB_texture_query_lod"; +const char* const E_GL_ARB_vertex_attrib_64bit = "GL_ARB_vertex_attrib_64bit"; + +const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic"; +const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote"; +const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic"; +const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot"; +const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle"; +const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative"; +const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered"; +const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad"; +const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics"; + +const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64"; + +const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers"; +const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted"; + +const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage"; +const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage"; + + +// EXT extensions +const char* const E_GL_EXT_device_group = "GL_EXT_device_group"; +const char* const E_GL_EXT_multiview = "GL_EXT_multiview"; +const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage"; +const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes"; +const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier"; +const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions"; +const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout"; +const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density"; +const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference"; +const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2"; +const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2"; +const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation"; +const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock"; +const char* const E_GL_EXT_debug_printf = "GL_EXT_debug_printf"; +const char* const E_GL_EXT_ray_tracing = "GL_EXT_ray_tracing"; +const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query"; +const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling"; +const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended"; +const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions"; + +// Arrays of extensions for the above viewportEXTs duplications + +const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage }; +const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]); + +// OVR extensions +const char* const E_GL_OVR_multiview = "GL_OVR_multiview"; +const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2"; + +const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 }; +const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]); + +// #line and #include +const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive"; +const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive"; + +const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot"; +const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax"; +const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter"; +const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader"; +const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float"; +const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod"; +const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16"; +const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod"; +const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask"; +const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch"; + +const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2"; + +const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage"; +const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough"; +const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2"; +const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering"; +const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes"; +const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64"; +const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation"; +const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation"; +const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned"; +const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image"; +const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing"; +const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric"; +const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives"; +const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint"; +const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader"; + +// Arrays of extensions for the above viewportEXTs duplications + +const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 }; +const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]); + +const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix"; +const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins"; +const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix"; + +// AEP +const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a"; +const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced"; +const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables"; +const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic"; +const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation"; +const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array"; +const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader"; +const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size"; +const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5"; +const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box"; +const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks"; +const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader"; +const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size"; +const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer"; +const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array"; +const char* const E_GL_EXT_shader_integer_mix = "GL_EXT_shader_integer_mix"; + +// OES matching AEP +const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader"; +const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size"; +const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5"; +const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box"; +const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks"; +const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader"; +const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size"; +const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer"; +const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array"; + +// EXT +const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32"; +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64"; + +const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8"; +const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16"; +const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64"; +const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16"; + +const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float"; + +// Arrays of extensions for the above AEP duplications + +const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader }; +const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]); + +const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size }; +const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]); + +const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 }; +const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]); + +const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box }; +const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]); + +const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks }; +const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]); + +const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader }; +const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]); + +const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size }; +const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]); + +const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer }; +const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]); + +const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array }; +const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]); + +} // end namespace glslang + +#endif // _VERSIONS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/attribute.cpp b/src/third_party/glslang/glslang/MachineIndependent/attribute.cpp new file mode 100644 index 0000000..9585518 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/attribute.cpp @@ -0,0 +1,346 @@ +// +// Copyright (C) 2017 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google, Inc., nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef GLSLANG_WEB + +#include "attribute.h" +#include "../Include/intermediate.h" +#include "ParseHelper.h" + +namespace glslang { + +// extract integers out of attribute arguments stored in attribute aggregate +bool TAttributeArgs::getInt(int& value, int argNum) const +{ + const TConstUnion* intConst = getConstUnion(EbtInt, argNum); + + if (intConst == nullptr) + return false; + + value = intConst->getIConst(); + return true; +} + + +// extract strings out of attribute arguments stored in attribute aggregate. +// convert to lower case if converToLower is true (for case-insensitive compare convenience) +bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const +{ + const TConstUnion* stringConst = getConstUnion(EbtString, argNum); + + if (stringConst == nullptr) + return false; + + value = *stringConst->getSConst(); + + // Convenience. + if (convertToLower) + std::transform(value.begin(), value.end(), value.begin(), ::tolower); + + return true; +} + +// How many arguments were supplied? +int TAttributeArgs::size() const +{ + return args == nullptr ? 0 : (int)args->getSequence().size(); +} + +// Helper to get attribute const union. Returns nullptr on failure. +const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNum) const +{ + if (args == nullptr) + return nullptr; + + if (argNum >= (int)args->getSequence().size()) + return nullptr; + + if (args->getSequence()[argNum]->getAsConstantUnion() == nullptr) + return nullptr; + + const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0]; + if (constVal == nullptr || constVal->getType() != basicType) + return nullptr; + + return constVal; +} + +// Implementation of TParseContext parts of attributes +TAttributeType TParseContext::attributeFromName(const TString& name) const +{ + if (name == "branch" || name == "dont_flatten") + return EatBranch; + else if (name == "flatten") + return EatFlatten; + else if (name == "unroll") + return EatUnroll; + else if (name == "loop" || name == "dont_unroll") + return EatLoop; + else if (name == "dependency_infinite") + return EatDependencyInfinite; + else if (name == "dependency_length") + return EatDependencyLength; + else if (name == "min_iterations") + return EatMinIterations; + else if (name == "max_iterations") + return EatMaxIterations; + else if (name == "iteration_multiple") + return EatIterationMultiple; + else if (name == "peel_count") + return EatPeelCount; + else if (name == "partial_count") + return EatPartialCount; + else + return EatNone; +} + +// Make an initial leaf for the grammar from a no-argument attribute +TAttributes* TParseContext::makeAttributes(const TString& identifier) const +{ + TAttributes *attributes = nullptr; + attributes = NewPoolObject(attributes); + TAttributeArgs args = { attributeFromName(identifier), nullptr }; + attributes->push_back(args); + return attributes; +} + +// Make an initial leaf for the grammar from a one-argument attribute +TAttributes* TParseContext::makeAttributes(const TString& identifier, TIntermNode* node) const +{ + TAttributes *attributes = nullptr; + attributes = NewPoolObject(attributes); + + // for now, node is always a simple single expression, but other code expects + // a list, so make it so + TIntermAggregate* agg = intermediate.makeAggregate(node); + TAttributeArgs args = { attributeFromName(identifier), agg }; + attributes->push_back(args); + return attributes; +} + +// Merge two sets of attributes into a single set. +// The second argument is destructively consumed. +TAttributes* TParseContext::mergeAttributes(TAttributes* attr1, TAttributes* attr2) const +{ + attr1->splice(attr1->end(), *attr2); + return attr1; +} + +// +// Selection attributes +// +void TParseContext::handleSelectionAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermSelection* selection = node->getAsSelectionNode(); + if (selection == nullptr) + return; + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + if (it->size() > 0) { + warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", ""); + continue; + } + + switch (it->name) { + case EatFlatten: + selection->setFlatten(); + break; + case EatBranch: + selection->setDontFlatten(); + break; + default: + warn(node->getLoc(), "attribute does not apply to a selection", "", ""); + break; + } + } +} + +// +// Switch attributes +// +void TParseContext::handleSwitchAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermSwitch* selection = node->getAsSwitchNode(); + if (selection == nullptr) + return; + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + if (it->size() > 0) { + warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", ""); + continue; + } + + switch (it->name) { + case EatFlatten: + selection->setFlatten(); + break; + case EatBranch: + selection->setDontFlatten(); + break; + default: + warn(node->getLoc(), "attribute does not apply to a switch", "", ""); + break; + } + } +} + +// +// Loop attributes +// +void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermNode* node) +{ + TIntermLoop* loop = node->getAsLoopNode(); + if (loop == nullptr) { + // the actual loop might be part of a sequence + TIntermAggregate* agg = node->getAsAggregate(); + if (agg == nullptr) + return; + for (auto it = agg->getSequence().begin(); it != agg->getSequence().end(); ++it) { + loop = (*it)->getAsLoopNode(); + if (loop != nullptr) + break; + } + if (loop == nullptr) + return; + } + + for (auto it = attributes.begin(); it != attributes.end(); ++it) { + + const auto noArgument = [&](const char* feature) { + if (it->size() > 0) { + warn(node->getLoc(), "expected no arguments", feature, ""); + return false; + } + return true; + }; + + const auto positiveSignedArgument = [&](const char* feature, int& value) { + if (it->size() == 1 && it->getInt(value)) { + if (value <= 0) { + error(node->getLoc(), "must be positive", feature, ""); + return false; + } + } else { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + return true; + }; + + const auto unsignedArgument = [&](const char* feature, unsigned int& uiValue) { + int value; + if (!(it->size() == 1 && it->getInt(value))) { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + uiValue = (unsigned int)value; + return true; + }; + + const auto positiveUnsignedArgument = [&](const char* feature, unsigned int& uiValue) { + int value; + if (it->size() == 1 && it->getInt(value)) { + if (value == 0) { + error(node->getLoc(), "must be greater than or equal to 1", feature, ""); + return false; + } + } else { + warn(node->getLoc(), "expected a single integer argument", feature, ""); + return false; + } + uiValue = (unsigned int)value; + return true; + }; + + const auto spirv14 = [&](const char* feature) { + if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4) + warn(node->getLoc(), "attribute requires a SPIR-V 1.4 target-env", feature, ""); + }; + + int value = 0; + unsigned uiValue = 0; + switch (it->name) { + case EatUnroll: + if (noArgument("unroll")) + loop->setUnroll(); + break; + case EatLoop: + if (noArgument("dont_unroll")) + loop->setDontUnroll(); + break; + case EatDependencyInfinite: + if (noArgument("dependency_infinite")) + loop->setLoopDependency(TIntermLoop::dependencyInfinite); + break; + case EatDependencyLength: + if (positiveSignedArgument("dependency_length", value)) + loop->setLoopDependency(value); + break; + case EatMinIterations: + spirv14("min_iterations"); + if (unsignedArgument("min_iterations", uiValue)) + loop->setMinIterations(uiValue); + break; + case EatMaxIterations: + spirv14("max_iterations"); + if (unsignedArgument("max_iterations", uiValue)) + loop->setMaxIterations(uiValue); + break; + case EatIterationMultiple: + spirv14("iteration_multiple"); + if (positiveUnsignedArgument("iteration_multiple", uiValue)) + loop->setIterationMultiple(uiValue); + break; + case EatPeelCount: + spirv14("peel_count"); + if (unsignedArgument("peel_count", uiValue)) + loop->setPeelCount(uiValue); + break; + case EatPartialCount: + spirv14("partial_count"); + if (unsignedArgument("partial_count", uiValue)) + loop->setPartialCount(uiValue); + break; + default: + warn(node->getLoc(), "attribute does not apply to a loop", "", ""); + break; + } + } +} + +} // end namespace glslang + +#endif // GLSLANG_WEB diff --git a/src/third_party/glslang/glslang/MachineIndependent/attribute.h b/src/third_party/glslang/glslang/MachineIndependent/attribute.h new file mode 100644 index 0000000..38a943d --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/attribute.h @@ -0,0 +1,149 @@ +// +// Copyright (C) 2017 LunarG, Inc. +// Copyright (C) 2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _ATTRIBUTE_INCLUDED_ +#define _ATTRIBUTE_INCLUDED_ + +#include "../Include/Common.h" +#include "../Include/ConstantUnion.h" + +namespace glslang { + + enum TAttributeType { + EatNone, + EatAllow_uav_condition, + EatBranch, + EatCall, + EatDomain, + EatEarlyDepthStencil, + EatFastOpt, + EatFlatten, + EatForceCase, + EatInstance, + EatMaxTessFactor, + EatNumThreads, + EatMaxVertexCount, + EatOutputControlPoints, + EatOutputTopology, + EatPartitioning, + EatPatchConstantFunc, + EatPatchSize, + EatUnroll, + EatLoop, + EatBinding, + EatGlobalBinding, + EatLocation, + EatInputAttachment, + EatBuiltIn, + EatPushConstant, + EatConstantId, + EatDependencyInfinite, + EatDependencyLength, + EatMinIterations, + EatMaxIterations, + EatIterationMultiple, + EatPeelCount, + EatPartialCount, + EatFormatRgba32f, + EatFormatRgba16f, + EatFormatR32f, + EatFormatRgba8, + EatFormatRgba8Snorm, + EatFormatRg32f, + EatFormatRg16f, + EatFormatR11fG11fB10f, + EatFormatR16f, + EatFormatRgba16, + EatFormatRgb10A2, + EatFormatRg16, + EatFormatRg8, + EatFormatR16, + EatFormatR8, + EatFormatRgba16Snorm, + EatFormatRg16Snorm, + EatFormatRg8Snorm, + EatFormatR16Snorm, + EatFormatR8Snorm, + EatFormatRgba32i, + EatFormatRgba16i, + EatFormatRgba8i, + EatFormatR32i, + EatFormatRg32i, + EatFormatRg16i, + EatFormatRg8i, + EatFormatR16i, + EatFormatR8i, + EatFormatRgba32ui, + EatFormatRgba16ui, + EatFormatRgba8ui, + EatFormatR32ui, + EatFormatRgb10a2ui, + EatFormatRg32ui, + EatFormatRg16ui, + EatFormatRg8ui, + EatFormatR16ui, + EatFormatR8ui, + EatFormatUnknown, + EatNonWritable, + EatNonReadable + }; + + class TIntermAggregate; + + struct TAttributeArgs { + TAttributeType name; + const TIntermAggregate* args; + + // Obtain attribute as integer + // Return false if it cannot be obtained + bool getInt(int& value, int argNum = 0) const; + + // Obtain attribute as string, with optional to-lower transform + // Return false if it cannot be obtained + bool getString(TString& value, int argNum = 0, bool convertToLower = true) const; + + // How many arguments were provided to the attribute? + int size() const; + + protected: + const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const; + }; + + typedef TList TAttributes; + +} // end namespace glslang + +#endif // _ATTRIBUTE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/gl_types.h b/src/third_party/glslang/glslang/MachineIndependent/gl_types.h new file mode 100644 index 0000000..b9372d4 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/gl_types.h @@ -0,0 +1,218 @@ +/* +** Copyright (c) 2013 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a +** copy of this software and/or associated documentation files (the +** "Materials"), to deal in the Materials without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Materials, and to +** permit persons to whom the Materials are furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be included +** in all copies or substantial portions of the Materials. +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +*/ + +#pragma once + +#define GL_FLOAT 0x1406 +#define GL_FLOAT_VEC2 0x8B50 +#define GL_FLOAT_VEC3 0x8B51 +#define GL_FLOAT_VEC4 0x8B52 + +#define GL_DOUBLE 0x140A +#define GL_DOUBLE_VEC2 0x8FFC +#define GL_DOUBLE_VEC3 0x8FFD +#define GL_DOUBLE_VEC4 0x8FFE + +#define GL_INT 0x1404 +#define GL_INT_VEC2 0x8B53 +#define GL_INT_VEC3 0x8B54 +#define GL_INT_VEC4 0x8B55 + +#define GL_UNSIGNED_INT 0x1405 +#define GL_UNSIGNED_INT_VEC2 0x8DC6 +#define GL_UNSIGNED_INT_VEC3 0x8DC7 +#define GL_UNSIGNED_INT_VEC4 0x8DC8 + +#define GL_INT64_ARB 0x140E +#define GL_INT64_VEC2_ARB 0x8FE9 +#define GL_INT64_VEC3_ARB 0x8FEA +#define GL_INT64_VEC4_ARB 0x8FEB + +#define GL_UNSIGNED_INT64_ARB 0x140F +#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FE5 +#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FE6 +#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FE7 +#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1 +#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2 +#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3 + +#define GL_INT16_NV 0x8FE4 +#define GL_INT16_VEC2_NV 0x8FE5 +#define GL_INT16_VEC3_NV 0x8FE6 +#define GL_INT16_VEC4_NV 0x8FE7 + +#define GL_BOOL 0x8B56 +#define GL_BOOL_VEC2 0x8B57 +#define GL_BOOL_VEC3 0x8B58 +#define GL_BOOL_VEC4 0x8B59 + +#define GL_FLOAT_MAT2 0x8B5A +#define GL_FLOAT_MAT3 0x8B5B +#define GL_FLOAT_MAT4 0x8B5C +#define GL_FLOAT_MAT2x3 0x8B65 +#define GL_FLOAT_MAT2x4 0x8B66 +#define GL_FLOAT_MAT3x2 0x8B67 +#define GL_FLOAT_MAT3x4 0x8B68 +#define GL_FLOAT_MAT4x2 0x8B69 +#define GL_FLOAT_MAT4x3 0x8B6A + +#define GL_DOUBLE_MAT2 0x8F46 +#define GL_DOUBLE_MAT3 0x8F47 +#define GL_DOUBLE_MAT4 0x8F48 +#define GL_DOUBLE_MAT2x3 0x8F49 +#define GL_DOUBLE_MAT2x4 0x8F4A +#define GL_DOUBLE_MAT3x2 0x8F4B +#define GL_DOUBLE_MAT3x4 0x8F4C +#define GL_DOUBLE_MAT4x2 0x8F4D +#define GL_DOUBLE_MAT4x3 0x8F4E + +// Those constants are borrowed from extension NV_gpu_shader5 +#define GL_FLOAT16_NV 0x8FF8 +#define GL_FLOAT16_VEC2_NV 0x8FF9 +#define GL_FLOAT16_VEC3_NV 0x8FFA +#define GL_FLOAT16_VEC4_NV 0x8FFB + +#define GL_FLOAT16_MAT2_AMD 0x91C5 +#define GL_FLOAT16_MAT3_AMD 0x91C6 +#define GL_FLOAT16_MAT4_AMD 0x91C7 +#define GL_FLOAT16_MAT2x3_AMD 0x91C8 +#define GL_FLOAT16_MAT2x4_AMD 0x91C9 +#define GL_FLOAT16_MAT3x2_AMD 0x91CA +#define GL_FLOAT16_MAT3x4_AMD 0x91CB +#define GL_FLOAT16_MAT4x2_AMD 0x91CC +#define GL_FLOAT16_MAT4x3_AMD 0x91CD + +#define GL_SAMPLER_1D 0x8B5D +#define GL_SAMPLER_2D 0x8B5E +#define GL_SAMPLER_3D 0x8B5F +#define GL_SAMPLER_CUBE 0x8B60 +#define GL_SAMPLER_BUFFER 0x8DC2 +#define GL_SAMPLER_1D_ARRAY 0x8DC0 +#define GL_SAMPLER_2D_ARRAY 0x8DC1 +#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3 +#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4 +#define GL_SAMPLER_CUBE_SHADOW 0x8DC5 +#define GL_SAMPLER_1D_SHADOW 0x8B61 +#define GL_SAMPLER_2D_SHADOW 0x8B62 +#define GL_SAMPLER_2D_RECT 0x8B63 +#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64 +#define GL_SAMPLER_2D_MULTISAMPLE 0x9108 +#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B +#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D +#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C +#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D + +#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE +#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF +#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0 +#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1 +#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2 +#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3 +#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4 +#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5 +#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6 +#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7 +#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8 + +#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9 +#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA +#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB +#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC +#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD +#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE +#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF + +#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0 +#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1 +#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2 +#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3 +#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4 +#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5 +#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6 +#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7 +#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8 +#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9 +#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA + +#define GL_INT_SAMPLER_1D 0x8DC9 +#define GL_INT_SAMPLER_2D 0x8DCA +#define GL_INT_SAMPLER_3D 0x8DCB +#define GL_INT_SAMPLER_CUBE 0x8DCC +#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE +#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF +#define GL_INT_SAMPLER_2D_RECT 0x8DCD +#define GL_INT_SAMPLER_BUFFER 0x8DD0 +#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109 +#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E +#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E + +#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1 +#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2 +#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3 +#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4 +#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6 +#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7 +#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5 +#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8 +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F +#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F +#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A + +#define GL_IMAGE_1D 0x904C +#define GL_IMAGE_2D 0x904D +#define GL_IMAGE_3D 0x904E +#define GL_IMAGE_2D_RECT 0x904F +#define GL_IMAGE_CUBE 0x9050 +#define GL_IMAGE_BUFFER 0x9051 +#define GL_IMAGE_1D_ARRAY 0x9052 +#define GL_IMAGE_2D_ARRAY 0x9053 +#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054 +#define GL_IMAGE_2D_MULTISAMPLE 0x9055 +#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056 +#define GL_INT_IMAGE_1D 0x9057 +#define GL_INT_IMAGE_2D 0x9058 +#define GL_INT_IMAGE_3D 0x9059 +#define GL_INT_IMAGE_2D_RECT 0x905A +#define GL_INT_IMAGE_CUBE 0x905B +#define GL_INT_IMAGE_BUFFER 0x905C +#define GL_INT_IMAGE_1D_ARRAY 0x905D +#define GL_INT_IMAGE_2D_ARRAY 0x905E +#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F +#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060 +#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061 +#define GL_UNSIGNED_INT_IMAGE_1D 0x9062 +#define GL_UNSIGNED_INT_IMAGE_2D 0x9063 +#define GL_UNSIGNED_INT_IMAGE_3D 0x9064 +#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065 +#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066 +#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067 +#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068 +#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069 +#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B +#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C + +#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB diff --git a/src/third_party/glslang/glslang/MachineIndependent/glslang.y b/src/third_party/glslang/glslang/MachineIndependent/glslang.y new file mode 100644 index 0000000..23adcb0 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/glslang.y @@ -0,0 +1,3906 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2019 Google, Inc. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do not edit the .y file, only edit the .m4 file. +// The .y bison file is not a source file, it is a derivative of the .m4 file. +// The m4 file needs to be processed by m4 to generate the .y bison file. +// +// Code sandwiched between a pair: +// +// GLSLANG_WEB_EXCLUDE_ON +// ... +// ... +// ... +// GLSLANG_WEB_EXCLUDE_OFF +// +// Will be excluded from the grammar when m4 is executed as: +// +// m4 -P -DGLSLANG_WEB +// +// It will be included when m4 is executed as: +// +// m4 -P +// + + + + +/** + * This is bison grammar and productions for parsing all versions of the + * GLSL shading languages. + */ +%{ + +/* Based on: +ANSI C Yacc grammar + +In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a +matching Lex specification) for the April 30, 1985 draft version of the +ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that +original, as mentioned in the answer to question 17.25 of the comp.lang.c +FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z. + +I intend to keep this version as close to the current C Standard grammar as +possible; please let me know if you discover discrepancies. + +Jutta Degener, 1995 +*/ + +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "../Public/ShaderLang.h" +#include "attribute.h" + +using namespace glslang; + +%} + +%define parse.error verbose + +%union { + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; +} + +%{ + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4065) + #pragma warning(disable : 4127) + #pragma warning(disable : 4244) +#endif + +#define parseContext (*pParseContext) +#define yyerror(context, msg) context->parserError(msg) + +extern int yylex(YYSTYPE*, TParseContext&); + +%} + +%parse-param {glslang::TParseContext* pParseContext} +%lex-param {parseContext} +%pure-parser // enable thread safety +%expect 1 // One shift reduce conflict because of if | else + +%token CONST BOOL INT UINT FLOAT +%token BVEC2 BVEC3 BVEC4 +%token IVEC2 IVEC3 IVEC4 +%token UVEC2 UVEC3 UVEC4 +%token VEC2 VEC3 VEC4 +%token MAT2 MAT3 MAT4 +%token MAT2X2 MAT2X3 MAT2X4 +%token MAT3X2 MAT3X3 MAT3X4 +%token MAT4X2 MAT4X3 MAT4X4 + +// combined image/sampler +%token SAMPLER2D SAMPLER3D SAMPLERCUBE SAMPLER2DSHADOW +%token SAMPLERCUBESHADOW SAMPLER2DARRAY +%token SAMPLER2DARRAYSHADOW ISAMPLER2D ISAMPLER3D ISAMPLERCUBE +%token ISAMPLER2DARRAY USAMPLER2D USAMPLER3D +%token USAMPLERCUBE USAMPLER2DARRAY + +// separate image/sampler +%token SAMPLER SAMPLERSHADOW +%token TEXTURE2D TEXTURE3D TEXTURECUBE TEXTURE2DARRAY +%token ITEXTURE2D ITEXTURE3D ITEXTURECUBE ITEXTURE2DARRAY +%token UTEXTURE2D UTEXTURE3D UTEXTURECUBE UTEXTURE2DARRAY + + + +%token ATTRIBUTE VARYING +%token FLOAT16_T FLOAT32_T DOUBLE FLOAT64_T +%token INT64_T UINT64_T INT32_T UINT32_T INT16_T UINT16_T INT8_T UINT8_T +%token I64VEC2 I64VEC3 I64VEC4 +%token U64VEC2 U64VEC3 U64VEC4 +%token I32VEC2 I32VEC3 I32VEC4 +%token U32VEC2 U32VEC3 U32VEC4 +%token I16VEC2 I16VEC3 I16VEC4 +%token U16VEC2 U16VEC3 U16VEC4 +%token I8VEC2 I8VEC3 I8VEC4 +%token U8VEC2 U8VEC3 U8VEC4 +%token DVEC2 DVEC3 DVEC4 DMAT2 DMAT3 DMAT4 +%token F16VEC2 F16VEC3 F16VEC4 F16MAT2 F16MAT3 F16MAT4 +%token F32VEC2 F32VEC3 F32VEC4 F32MAT2 F32MAT3 F32MAT4 +%token F64VEC2 F64VEC3 F64VEC4 F64MAT2 F64MAT3 F64MAT4 +%token DMAT2X2 DMAT2X3 DMAT2X4 +%token DMAT3X2 DMAT3X3 DMAT3X4 +%token DMAT4X2 DMAT4X3 DMAT4X4 +%token F16MAT2X2 F16MAT2X3 F16MAT2X4 +%token F16MAT3X2 F16MAT3X3 F16MAT3X4 +%token F16MAT4X2 F16MAT4X3 F16MAT4X4 +%token F32MAT2X2 F32MAT2X3 F32MAT2X4 +%token F32MAT3X2 F32MAT3X3 F32MAT3X4 +%token F32MAT4X2 F32MAT4X3 F32MAT4X4 +%token F64MAT2X2 F64MAT2X3 F64MAT2X4 +%token F64MAT3X2 F64MAT3X3 F64MAT3X4 +%token F64MAT4X2 F64MAT4X3 F64MAT4X4 +%token ATOMIC_UINT +%token ACCSTRUCTNV +%token ACCSTRUCTEXT +%token RAYQUERYEXT +%token FCOOPMATNV ICOOPMATNV UCOOPMATNV + +// combined image/sampler +%token SAMPLERCUBEARRAY SAMPLERCUBEARRAYSHADOW +%token ISAMPLERCUBEARRAY USAMPLERCUBEARRAY +%token SAMPLER1D SAMPLER1DARRAY SAMPLER1DARRAYSHADOW ISAMPLER1D SAMPLER1DSHADOW +%token SAMPLER2DRECT SAMPLER2DRECTSHADOW ISAMPLER2DRECT USAMPLER2DRECT +%token SAMPLERBUFFER ISAMPLERBUFFER USAMPLERBUFFER +%token SAMPLER2DMS ISAMPLER2DMS USAMPLER2DMS +%token SAMPLER2DMSARRAY ISAMPLER2DMSARRAY USAMPLER2DMSARRAY +%token SAMPLEREXTERNALOES +%token SAMPLEREXTERNAL2DY2YEXT +%token ISAMPLER1DARRAY USAMPLER1D USAMPLER1DARRAY +%token F16SAMPLER1D F16SAMPLER2D F16SAMPLER3D F16SAMPLER2DRECT F16SAMPLERCUBE +%token F16SAMPLER1DARRAY F16SAMPLER2DARRAY F16SAMPLERCUBEARRAY +%token F16SAMPLERBUFFER F16SAMPLER2DMS F16SAMPLER2DMSARRAY +%token F16SAMPLER1DSHADOW F16SAMPLER2DSHADOW F16SAMPLER1DARRAYSHADOW F16SAMPLER2DARRAYSHADOW +%token F16SAMPLER2DRECTSHADOW F16SAMPLERCUBESHADOW F16SAMPLERCUBEARRAYSHADOW + +// images +%token IMAGE1D IIMAGE1D UIMAGE1D IMAGE2D IIMAGE2D +%token UIMAGE2D IMAGE3D IIMAGE3D UIMAGE3D +%token IMAGE2DRECT IIMAGE2DRECT UIMAGE2DRECT +%token IMAGECUBE IIMAGECUBE UIMAGECUBE +%token IMAGEBUFFER IIMAGEBUFFER UIMAGEBUFFER +%token IMAGE1DARRAY IIMAGE1DARRAY UIMAGE1DARRAY +%token IMAGE2DARRAY IIMAGE2DARRAY UIMAGE2DARRAY +%token IMAGECUBEARRAY IIMAGECUBEARRAY UIMAGECUBEARRAY +%token IMAGE2DMS IIMAGE2DMS UIMAGE2DMS +%token IMAGE2DMSARRAY IIMAGE2DMSARRAY UIMAGE2DMSARRAY + +%token F16IMAGE1D F16IMAGE2D F16IMAGE3D F16IMAGE2DRECT +%token F16IMAGECUBE F16IMAGE1DARRAY F16IMAGE2DARRAY F16IMAGECUBEARRAY +%token F16IMAGEBUFFER F16IMAGE2DMS F16IMAGE2DMSARRAY + +// texture without sampler +%token TEXTURECUBEARRAY ITEXTURECUBEARRAY UTEXTURECUBEARRAY +%token TEXTURE1D ITEXTURE1D UTEXTURE1D +%token TEXTURE1DARRAY ITEXTURE1DARRAY UTEXTURE1DARRAY +%token TEXTURE2DRECT ITEXTURE2DRECT UTEXTURE2DRECT +%token TEXTUREBUFFER ITEXTUREBUFFER UTEXTUREBUFFER +%token TEXTURE2DMS ITEXTURE2DMS UTEXTURE2DMS +%token TEXTURE2DMSARRAY ITEXTURE2DMSARRAY UTEXTURE2DMSARRAY + +%token F16TEXTURE1D F16TEXTURE2D F16TEXTURE3D F16TEXTURE2DRECT F16TEXTURECUBE +%token F16TEXTURE1DARRAY F16TEXTURE2DARRAY F16TEXTURECUBEARRAY +%token F16TEXTUREBUFFER F16TEXTURE2DMS F16TEXTURE2DMSARRAY + +// input attachments +%token SUBPASSINPUT SUBPASSINPUTMS ISUBPASSINPUT ISUBPASSINPUTMS USUBPASSINPUT USUBPASSINPUTMS +%token F16SUBPASSINPUT F16SUBPASSINPUTMS + + + +%token LEFT_OP RIGHT_OP +%token INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP +%token AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN +%token MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN +%token SUB_ASSIGN +%token STRING_LITERAL + +%token LEFT_PAREN RIGHT_PAREN LEFT_BRACKET RIGHT_BRACKET LEFT_BRACE RIGHT_BRACE DOT +%token COMMA COLON EQUAL SEMICOLON BANG DASH TILDE PLUS STAR SLASH PERCENT +%token LEFT_ANGLE RIGHT_ANGLE VERTICAL_BAR CARET AMPERSAND QUESTION + +%token INVARIANT +%token HIGH_PRECISION MEDIUM_PRECISION LOW_PRECISION PRECISION +%token PACKED RESOURCE SUPERP + +%token FLOATCONSTANT INTCONSTANT UINTCONSTANT BOOLCONSTANT +%token IDENTIFIER TYPE_NAME +%token CENTROID IN OUT INOUT +%token STRUCT VOID WHILE +%token BREAK CONTINUE DO ELSE FOR IF DISCARD RETURN SWITCH CASE DEFAULT +%token UNIFORM SHARED BUFFER +%token FLAT SMOOTH LAYOUT + + +%token DOUBLECONSTANT INT16CONSTANT UINT16CONSTANT FLOAT16CONSTANT INT32CONSTANT UINT32CONSTANT +%token INT64CONSTANT UINT64CONSTANT +%token SUBROUTINE DEMOTE +%token PAYLOADNV PAYLOADINNV HITATTRNV CALLDATANV CALLDATAINNV +%token PAYLOADEXT PAYLOADINEXT HITATTREXT CALLDATAEXT CALLDATAINEXT +%token PATCH SAMPLE NONUNIFORM +%token COHERENT VOLATILE RESTRICT READONLY WRITEONLY DEVICECOHERENT QUEUEFAMILYCOHERENT WORKGROUPCOHERENT +%token SUBGROUPCOHERENT NONPRIVATE SHADERCALLCOHERENT +%token NOPERSPECTIVE EXPLICITINTERPAMD PERVERTEXNV PERPRIMITIVENV PERVIEWNV PERTASKNV +%token PRECISE + + +%type assignment_operator unary_operator +%type variable_identifier primary_expression postfix_expression +%type expression integer_expression assignment_expression +%type unary_expression multiplicative_expression additive_expression +%type relational_expression equality_expression +%type conditional_expression constant_expression +%type logical_or_expression logical_xor_expression logical_and_expression +%type shift_expression and_expression exclusive_or_expression inclusive_or_expression +%type function_call initializer condition conditionopt + +%type translation_unit function_definition +%type statement simple_statement +%type statement_list switch_statement_list compound_statement +%type declaration_statement selection_statement selection_statement_nonattributed expression_statement +%type switch_statement switch_statement_nonattributed case_label +%type declaration external_declaration +%type for_init_statement compound_statement_no_new_scope +%type selection_rest_statement for_rest_statement +%type iteration_statement iteration_statement_nonattributed jump_statement statement_no_new_scope statement_scoped +%type single_declaration init_declarator_list + +%type parameter_declaration parameter_declarator parameter_type_specifier + +%type array_specifier +%type invariant_qualifier interpolation_qualifier storage_qualifier precision_qualifier +%type layout_qualifier layout_qualifier_id_list layout_qualifier_id + +%type type_parameter_specifier +%type type_parameter_specifier_opt +%type type_parameter_specifier_list + +%type type_qualifier fully_specified_type type_specifier +%type single_type_qualifier +%type type_specifier_nonarray +%type struct_specifier +%type struct_declarator +%type struct_declarator_list struct_declaration struct_declaration_list +%type block_structure +%type function_header function_declarator +%type function_header_with_parameters +%type function_call_header_with_parameters function_call_header_no_parameters function_call_generic function_prototype +%type function_call_or_method function_identifier function_call_header + +%type identifier_list + + +%type precise_qualifier non_uniform_qualifier +%type type_name_list +%type attribute attribute_list single_attribute +%type demote_statement +%type initializer_list + + +%start translation_unit +%% + +variable_identifier + : IDENTIFIER { + $$ = parseContext.handleVariable($1.loc, $1.symbol, $1.string); + } + ; + +primary_expression + : variable_identifier { + $$ = $1; + } + | LEFT_PAREN expression RIGHT_PAREN { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + | FLOATCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true); + } + | INTCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINTCONSTANT { + parseContext.fullIntegerCheck($1.loc, "unsigned literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | BOOLCONSTANT { + $$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true); + } + + | STRING_LITERAL { + $$ = parseContext.intermediate.addConstantUnion($1.string, $1.loc, true); + } + | INT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true); + } + | UINT32CONSTANT { + parseContext.explicitInt32Check($1.loc, "32-bit signed literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true); + } + | INT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.i64, $1.loc, true); + } + | UINT64CONSTANT { + parseContext.int64Check($1.loc, "64-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true); + } + | INT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit integer literal"); + $$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true); + } + | UINT16CONSTANT { + parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal"); + $$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true); + } + | DOUBLECONSTANT { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double literal"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true); + } + | FLOAT16CONSTANT { + parseContext.float16Check($1.loc, "half float literal"); + $$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true); + } + + ; + +postfix_expression + : primary_expression { + $$ = $1; + } + | postfix_expression LEFT_BRACKET integer_expression RIGHT_BRACKET { + $$ = parseContext.handleBracketDereference($2.loc, $1, $3); + } + | function_call { + $$ = $1; + } + | postfix_expression DOT IDENTIFIER { + $$ = parseContext.handleDotDereference($3.loc, $1, *$3.string); + } + | postfix_expression INC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "++", $1); + $$ = parseContext.handleUnaryMath($2.loc, "++", EOpPostIncrement, $1); + } + | postfix_expression DEC_OP { + parseContext.variableCheck($1); + parseContext.lValueErrorCheck($2.loc, "--", $1); + $$ = parseContext.handleUnaryMath($2.loc, "--", EOpPostDecrement, $1); + } + ; + +integer_expression + : expression { + parseContext.integerCheck($1, "[]"); + $$ = $1; + } + ; + +function_call + : function_call_or_method { + $$ = parseContext.handleFunctionCall($1.loc, $1.function, $1.intermNode); + delete $1.function; + } + ; + +function_call_or_method + : function_call_generic { + $$ = $1; + } + ; + +function_call_generic + : function_call_header_with_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + | function_call_header_no_parameters RIGHT_PAREN { + $$ = $1; + $$.loc = $2.loc; + } + ; + +function_call_header_no_parameters + : function_call_header VOID { + $$ = $1; + } + | function_call_header { + $$ = $1; + } + ; + +function_call_header_with_parameters + : function_call_header assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($2->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = $2; + } + | function_call_header_with_parameters COMMA assignment_expression { + TParameter param = { 0, new TType }; + param.type->shallowCopy($3->getType()); + $1.function->addParameter(param); + $$.function = $1.function; + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, $3, $2.loc); + } + ; + +function_call_header + : function_identifier LEFT_PAREN { + $$ = $1; + } + ; + +// Grammar Note: Constructors look like functions, but are recognized as types. + +function_identifier + : type_specifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } + | postfix_expression { + // + // Should be a method or subroutine call, but we haven't recognized the arguments yet. + // + $$.function = 0; + $$.intermNode = 0; + + TIntermMethod* method = $1->getAsMethodNode(); + if (method) { + $$.function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength); + $$.intermNode = method->getObject(); + } else { + TIntermSymbol* symbol = $1->getAsSymbolNode(); + if (symbol) { + parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName()); + TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid)); + $$.function = function; + } else + parseContext.error($1->getLoc(), "function call, method, or subroutine call expected", "", ""); + } + + if ($$.function == 0) { + // error recover + TString* empty = NewPoolTString(""); + $$.function = new TFunction(empty, TType(EbtVoid), EOpNull); + } + } + + | non_uniform_qualifier { + // Constructor + $$.intermNode = 0; + $$.function = parseContext.handleConstructorCall($1.loc, $1); + } + + ; + +unary_expression + : postfix_expression { + parseContext.variableCheck($1); + $$ = $1; + if (TIntermMethod* method = $1->getAsMethodNode()) + parseContext.error($1->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); + } + | INC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "++", $2); + $$ = parseContext.handleUnaryMath($1.loc, "++", EOpPreIncrement, $2); + } + | DEC_OP unary_expression { + parseContext.lValueErrorCheck($1.loc, "--", $2); + $$ = parseContext.handleUnaryMath($1.loc, "--", EOpPreDecrement, $2); + } + | unary_operator unary_expression { + if ($1.op != EOpNull) { + char errorOp[2] = {0, 0}; + switch($1.op) { + case EOpNegative: errorOp[0] = '-'; break; + case EOpLogicalNot: errorOp[0] = '!'; break; + case EOpBitwiseNot: errorOp[0] = '~'; break; + default: break; // some compilers want this + } + $$ = parseContext.handleUnaryMath($1.loc, errorOp, $1.op, $2); + } else { + $$ = $2; + if ($$->getAsConstantUnion()) + $$->getAsConstantUnion()->setExpression(); + } + } + ; +// Grammar Note: No traditional style type casts. + +unary_operator + : PLUS { $$.loc = $1.loc; $$.op = EOpNull; } + | DASH { $$.loc = $1.loc; $$.op = EOpNegative; } + | BANG { $$.loc = $1.loc; $$.op = EOpLogicalNot; } + | TILDE { $$.loc = $1.loc; $$.op = EOpBitwiseNot; + parseContext.fullIntegerCheck($1.loc, "bitwise not"); } + ; +// Grammar Note: No '*' or '&' unary ops. Pointers are not supported. + +multiplicative_expression + : unary_expression { $$ = $1; } + | multiplicative_expression STAR unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "*", EOpMul, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression SLASH unary_expression { + $$ = parseContext.handleBinaryMath($2.loc, "/", EOpDiv, $1, $3); + if ($$ == 0) + $$ = $1; + } + | multiplicative_expression PERCENT unary_expression { + parseContext.fullIntegerCheck($2.loc, "%"); + $$ = parseContext.handleBinaryMath($2.loc, "%", EOpMod, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +additive_expression + : multiplicative_expression { $$ = $1; } + | additive_expression PLUS multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "+", EOpAdd, $1, $3); + if ($$ == 0) + $$ = $1; + } + | additive_expression DASH multiplicative_expression { + $$ = parseContext.handleBinaryMath($2.loc, "-", EOpSub, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +shift_expression + : additive_expression { $$ = $1; } + | shift_expression LEFT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift left"); + $$ = parseContext.handleBinaryMath($2.loc, "<<", EOpLeftShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + | shift_expression RIGHT_OP additive_expression { + parseContext.fullIntegerCheck($2.loc, "bit shift right"); + $$ = parseContext.handleBinaryMath($2.loc, ">>", EOpRightShift, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +relational_expression + : shift_expression { $$ = $1; } + | relational_expression LEFT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<", EOpLessThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression RIGHT_ANGLE shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">", EOpGreaterThan, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression LE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, "<=", EOpLessThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | relational_expression GE_OP shift_expression { + $$ = parseContext.handleBinaryMath($2.loc, ">=", EOpGreaterThanEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +equality_expression + : relational_expression { $$ = $1; } + | equality_expression EQ_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "=="); + parseContext.specializationCheck($2.loc, $1->getType(), "=="); + parseContext.referenceCheck($2.loc, $1->getType(), "=="); + $$ = parseContext.handleBinaryMath($2.loc, "==", EOpEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + | equality_expression NE_OP relational_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array comparison"); + parseContext.opaqueCheck($2.loc, $1->getType(), "!="); + parseContext.specializationCheck($2.loc, $1->getType(), "!="); + parseContext.referenceCheck($2.loc, $1->getType(), "!="); + $$ = parseContext.handleBinaryMath($2.loc, "!=", EOpNotEqual, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +and_expression + : equality_expression { $$ = $1; } + | and_expression AMPERSAND equality_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise and"); + $$ = parseContext.handleBinaryMath($2.loc, "&", EOpAnd, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +exclusive_or_expression + : and_expression { $$ = $1; } + | exclusive_or_expression CARET and_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise exclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "^", EOpExclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +inclusive_or_expression + : exclusive_or_expression { $$ = $1; } + | inclusive_or_expression VERTICAL_BAR exclusive_or_expression { + parseContext.fullIntegerCheck($2.loc, "bitwise inclusive or"); + $$ = parseContext.handleBinaryMath($2.loc, "|", EOpInclusiveOr, $1, $3); + if ($$ == 0) + $$ = $1; + } + ; + +logical_and_expression + : inclusive_or_expression { $$ = $1; } + | logical_and_expression AND_OP inclusive_or_expression { + $$ = parseContext.handleBinaryMath($2.loc, "&&", EOpLogicalAnd, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_xor_expression + : logical_and_expression { $$ = $1; } + | logical_xor_expression XOR_OP logical_and_expression { + $$ = parseContext.handleBinaryMath($2.loc, "^^", EOpLogicalXor, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +logical_or_expression + : logical_xor_expression { $$ = $1; } + | logical_or_expression OR_OP logical_xor_expression { + $$ = parseContext.handleBinaryMath($2.loc, "||", EOpLogicalOr, $1, $3); + if ($$ == 0) + $$ = parseContext.intermediate.addConstantUnion(false, $2.loc); + } + ; + +conditional_expression + : logical_or_expression { $$ = $1; } + | logical_or_expression QUESTION { + ++parseContext.controlFlowNestingLevel; + } + expression COLON assignment_expression { + --parseContext.controlFlowNestingLevel; + parseContext.boolCheck($2.loc, $1); + parseContext.rValueErrorCheck($2.loc, "?", $1); + parseContext.rValueErrorCheck($5.loc, ":", $4); + parseContext.rValueErrorCheck($5.loc, ":", $6); + $$ = parseContext.intermediate.addSelection($1, $4, $6, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ":", $4->getCompleteString(), $6->getCompleteString()); + $$ = $6; + } + } + ; + +assignment_expression + : conditional_expression { $$ = $1; } + | unary_expression assignment_operator assignment_expression { + parseContext.arrayObjectCheck($2.loc, $1->getType(), "array assignment"); + parseContext.opaqueCheck($2.loc, $1->getType(), "="); + parseContext.storage16BitAssignmentCheck($2.loc, $1->getType(), "="); + parseContext.specializationCheck($2.loc, $1->getType(), "="); + parseContext.lValueErrorCheck($2.loc, "assign", $1); + parseContext.rValueErrorCheck($2.loc, "assign", $3); + $$ = parseContext.addAssign($2.loc, $2.op, $1, $3); + if ($$ == 0) { + parseContext.assignError($2.loc, "assign", $1->getCompleteString(), $3->getCompleteString()); + $$ = $1; + } + } + ; + +assignment_operator + : EQUAL { + $$.loc = $1.loc; + $$.op = EOpAssign; + } + | MUL_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpMulAssign; + } + | DIV_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpDivAssign; + } + | MOD_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "%="); + $$.loc = $1.loc; + $$.op = EOpModAssign; + } + | ADD_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpAddAssign; + } + | SUB_ASSIGN { + $$.loc = $1.loc; + $$.op = EOpSubAssign; + } + | LEFT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift left assign"); + $$.loc = $1.loc; $$.op = EOpLeftShiftAssign; + } + | RIGHT_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bit-shift right assign"); + $$.loc = $1.loc; $$.op = EOpRightShiftAssign; + } + | AND_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-and assign"); + $$.loc = $1.loc; $$.op = EOpAndAssign; + } + | XOR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-xor assign"); + $$.loc = $1.loc; $$.op = EOpExclusiveOrAssign; + } + | OR_ASSIGN { + parseContext.fullIntegerCheck($1.loc, "bitwise-or assign"); + $$.loc = $1.loc; $$.op = EOpInclusiveOrAssign; + } + ; + +expression + : assignment_expression { + $$ = $1; + } + | expression COMMA assignment_expression { + parseContext.samplerConstructorLocationCheck($2.loc, ",", $3); + $$ = parseContext.intermediate.addComma($1, $3, $2.loc); + if ($$ == 0) { + parseContext.binaryOpError($2.loc, ",", $1->getCompleteString(), $3->getCompleteString()); + $$ = $3; + } + } + ; + +constant_expression + : conditional_expression { + parseContext.constantValueCheck($1, ""); + $$ = $1; + } + ; + +declaration + : function_prototype SEMICOLON { + parseContext.handleFunctionDeclarator($1.loc, *$1.function, true /* prototype */); + $$ = 0; + // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature + } + | init_declarator_list SEMICOLON { + if ($1.intermNode && $1.intermNode->getAsAggregate()) + $1.intermNode->getAsAggregate()->setOperator(EOpSequence); + $$ = $1.intermNode; + } + | PRECISION precision_qualifier type_specifier SEMICOLON { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "precision statement"); + // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope + parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); + parseContext.setDefaultPrecision($1.loc, $3, $2.qualifier.precision); + $$ = 0; + } + | block_structure SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList); + $$ = 0; + } + | block_structure IDENTIFIER SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string); + $$ = 0; + } + | block_structure IDENTIFIER array_specifier SEMICOLON { + parseContext.declareBlock($1.loc, *$1.typeList, $2.string, $3.arraySizes); + $$ = 0; + } + | type_qualifier SEMICOLON { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.updateStandaloneQualifierDefaults($1.loc, $1); + $$ = 0; + } + | type_qualifier IDENTIFIER SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$2.string); + $$ = 0; + } + | type_qualifier IDENTIFIER identifier_list SEMICOLON { + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + $3->push_back($2.string); + parseContext.addQualifierToExisting($1.loc, $1.qualifier, *$3); + $$ = 0; + } + ; + +block_structure + : type_qualifier IDENTIFIER LEFT_BRACE { parseContext.nestedBlockCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + --parseContext.structNestingLevel; + parseContext.blockName = $2.string; + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.currentBlockQualifier = $1.qualifier; + $$.loc = $1.loc; + $$.typeList = $5; + } + +identifier_list + : COMMA IDENTIFIER { + $$ = new TIdentifierList; + $$->push_back($2.string); + } + | identifier_list COMMA IDENTIFIER { + $$ = $1; + $$->push_back($3.string); + } + ; + +function_prototype + : function_declarator RIGHT_PAREN { + $$.function = $1; + $$.loc = $2.loc; + } + ; + +function_declarator + : function_header { + $$ = $1; + } + | function_header_with_parameters { + $$ = $1; + } + ; + + +function_header_with_parameters + : function_header parameter_declaration { + // Add the parameter + $$ = $1; + if ($2.param.type->getBasicType() != EbtVoid) + $1->addParameter($2.param); + else + delete $2.param.type; + } + | function_header_with_parameters COMMA parameter_declaration { + // + // Only first parameter of one-parameter functions can be void + // The check for named parameters not being void is done in parameter_declarator + // + if ($3.param.type->getBasicType() == EbtVoid) { + // + // This parameter > first is void + // + parseContext.error($2.loc, "cannot be an argument type except for '(void)'", "void", ""); + delete $3.param.type; + } else { + // Add the parameter + $$ = $1; + $1->addParameter($3.param); + } + } + ; + +function_header + : fully_specified_type IDENTIFIER LEFT_PAREN { + if ($1.qualifier.storage != EvqGlobal && $1.qualifier.storage != EvqTemporary) { + parseContext.error($2.loc, "no qualifiers allowed for function return", + GetStorageQualifierString($1.qualifier.storage), ""); + } + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + + // Add the function as a prototype after parsing it (we do not support recursion) + TFunction *function; + TType type($1); + + // Potentially rename shader entry point function. No-op most of the time. + parseContext.renameShaderFunction($2.string); + + // Make the function + function = new TFunction($2.string, type); + $$ = function; + } + ; + +parameter_declarator + // Type + name + : type_specifier IDENTIFIER { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + if ($1.basicType == EbtVoid) { + parseContext.error($2.loc, "illegal use of type 'void'", $2.string->c_str(), ""); + } + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = {$2.string, new TType($1)}; + $$.loc = $2.loc; + $$.param = param; + } + | type_specifier IDENTIFIER array_specifier { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + TType* type = new TType($1); + type->transferArraySizes($3.arraySizes); + type->copyArrayInnerSizes($1.arraySizes); + + parseContext.arrayOfArrayVersionCheck($2.loc, type->getArraySizes()); + parseContext.arraySizeRequiredCheck($3.loc, *$3.arraySizes); + parseContext.reservedErrorCheck($2.loc, *$2.string); + + TParameter param = { $2.string, type }; + + $$.loc = $2.loc; + $$.param = param; + } + ; + +parameter_declaration + // + // With name + // + : type_qualifier parameter_declarator { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + + } + | parameter_declarator { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + // + // Without name + // + | type_qualifier parameter_type_specifier { + $$ = $2; + if ($1.qualifier.precision != EpqNone) + $$.param.type->getQualifier().precision = $1.qualifier.precision; + parseContext.precisionQualifierCheck($1.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + + parseContext.checkNoShaderLayouts($1.loc, $1.shaderQualifiers); + parseContext.parameterTypeCheck($2.loc, $1.qualifier.storage, *$$.param.type); + parseContext.paramCheckFix($1.loc, $1.qualifier, *$$.param.type); + } + | parameter_type_specifier { + $$ = $1; + + parseContext.parameterTypeCheck($1.loc, EvqIn, *$1.param.type); + parseContext.paramCheckFixStorage($1.loc, EvqTemporary, *$$.param.type); + parseContext.precisionQualifierCheck($$.loc, $$.param.type->getBasicType(), $$.param.type->getQualifier()); + } + ; + +parameter_type_specifier + : type_specifier { + TParameter param = { 0, new TType($1) }; + $$.param = param; + if ($1.arraySizes) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + ; + +init_declarator_list + : single_declaration { + $$ = $1; + } + | init_declarator_list COMMA IDENTIFIER { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type); + } + | init_declarator_list COMMA IDENTIFIER array_specifier { + $$ = $1; + parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes); + } + | init_declarator_list COMMA IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, $4.arraySizes, $6); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $5.loc); + } + | init_declarator_list COMMA IDENTIFIER EQUAL initializer { + $$.type = $1.type; + TIntermNode* initNode = parseContext.declareVariable($3.loc, *$3.string, $1.type, 0, $5); + $$.intermNode = parseContext.intermediate.growAggregate($1.intermNode, initNode, $4.loc); + } + ; + +single_declaration + : fully_specified_type { + $$.type = $1; + $$.intermNode = 0; + + parseContext.declareTypeDefaults($$.loc, $$.type); + + } + | fully_specified_type IDENTIFIER { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1); + } + | fully_specified_type IDENTIFIER array_specifier { + $$.type = $1; + $$.intermNode = 0; + parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes); + } + | fully_specified_type IDENTIFIER array_specifier EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, $3.arraySizes, $5); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $4.loc); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + $$.type = $1; + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + $$.intermNode = parseContext.intermediate.growAggregate(0, initNode, $3.loc); + } + +// Grammar Note: No 'enum', or 'typedef'. + +fully_specified_type + : type_specifier { + $$ = $1; + + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $$); + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + } + parseContext.precisionQualifierCheck($$.loc, $$.basicType, $$.qualifier); + } + | type_qualifier type_specifier { + parseContext.globalQualifierFixCheck($1.loc, $1.qualifier); + parseContext.globalQualifierTypeCheck($1.loc, $1.qualifier, $2); + + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + } + + if ($2.arraySizes && parseContext.arrayQualifierError($2.loc, $1.qualifier)) + $2.arraySizes = nullptr; + + parseContext.checkNoShaderLayouts($2.loc, $1.shaderQualifiers); + $2.shaderQualifiers.merge($1.shaderQualifiers); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + $$ = $2; + + if (! $$.qualifier.isInterpolation() && + ((parseContext.language == EShLangVertex && $$.qualifier.storage == EvqVaryingOut) || + (parseContext.language == EShLangFragment && $$.qualifier.storage == EvqVaryingIn))) + $$.qualifier.smooth = true; + } + ; + +invariant_qualifier + : INVARIANT { + parseContext.globalCheck($1.loc, "invariant"); + parseContext.profileRequires($$.loc, ENoProfile, 120, 0, "invariant"); + $$.init($1.loc); + $$.qualifier.invariant = true; + } + ; + +interpolation_qualifier + : SMOOTH { + parseContext.globalCheck($1.loc, "smooth"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "smooth"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "smooth"); + $$.init($1.loc); + $$.qualifier.smooth = true; + } + | FLAT { + parseContext.globalCheck($1.loc, "flat"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "flat"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "flat"); + $$.init($1.loc); + $$.qualifier.flat = true; + } + + | NOPERSPECTIVE { + parseContext.globalCheck($1.loc, "noperspective"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective"); + $$.init($1.loc); + $$.qualifier.nopersp = true; + } + | EXPLICITINTERPAMD { + parseContext.globalCheck($1.loc, "__explicitInterpAMD"); + parseContext.profileRequires($1.loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + $$.init($1.loc); + $$.qualifier.explicitInterp = true; + } + | PERVERTEXNV { + parseContext.globalCheck($1.loc, "pervertexNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + $$.init($1.loc); + $$.qualifier.pervertexNV = true; + } + | PERPRIMITIVENV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perprimitiveNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); + // Fragment shader stage doesn't check for extension. So we explicitly add below extension check. + if (parseContext.language == EShLangFragment) + parseContext.requireExtensions($1.loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV"); + $$.init($1.loc); + $$.qualifier.perPrimitiveNV = true; + } + | PERVIEWNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "perviewNV"); + parseContext.requireStage($1.loc, EShLangMeshNV, "perviewNV"); + $$.init($1.loc); + $$.qualifier.perViewNV = true; + } + | PERTASKNV { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck($1.loc, "taskNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); + $$.init($1.loc); + $$.qualifier.perTaskNV = true; + } + + ; + +layout_qualifier + : LAYOUT LEFT_PAREN layout_qualifier_id_list RIGHT_PAREN { + $$ = $3; + } + ; + +layout_qualifier_id_list + : layout_qualifier_id { + $$ = $1; + } + | layout_qualifier_id_list COMMA layout_qualifier_id { + $$ = $1; + $$.shaderQualifiers.merge($3.shaderQualifiers); + parseContext.mergeObjectLayoutQualifiers($$.qualifier, $3.qualifier, false); + } + +layout_qualifier_id + : IDENTIFIER { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string); + } + | IDENTIFIER EQUAL constant_expression { + $$.init($1.loc); + parseContext.setLayoutQualifier($1.loc, $$, *$1.string, $3); + } + | SHARED { // because "shared" is both an identifier and a keyword + $$.init($1.loc); + TString strShared("shared"); + parseContext.setLayoutQualifier($1.loc, $$, strShared); + } + ; + + +precise_qualifier + : PRECISE { + parseContext.profileRequires($$.loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); + parseContext.profileRequires($1.loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); + $$.init($1.loc); + $$.qualifier.noContraction = true; + } + ; + + +type_qualifier + : single_type_qualifier { + $$ = $1; + } + | type_qualifier single_type_qualifier { + $$ = $1; + if ($$.basicType == EbtVoid) + $$.basicType = $2.basicType; + + $$.shaderQualifiers.merge($2.shaderQualifiers); + parseContext.mergeQualifiers($$.loc, $$.qualifier, $2.qualifier, false); + } + ; + +single_type_qualifier + : storage_qualifier { + $$ = $1; + } + | layout_qualifier { + $$ = $1; + } + | precision_qualifier { + parseContext.checkPrecisionQualifier($1.loc, $1.qualifier.precision); + $$ = $1; + } + | interpolation_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | invariant_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + + | precise_qualifier { + // allow inheritance of storage qualifier from block declaration + $$ = $1; + } + | non_uniform_qualifier { + $$ = $1; + } + + ; + +storage_qualifier + : CONST { + $$.init($1.loc); + $$.qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant + } + | INOUT { + parseContext.globalCheck($1.loc, "inout"); + $$.init($1.loc); + $$.qualifier.storage = EvqInOut; + } + | IN { + parseContext.globalCheck($1.loc, "in"); + $$.init($1.loc); + // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later + $$.qualifier.storage = EvqIn; + } + | OUT { + parseContext.globalCheck($1.loc, "out"); + $$.init($1.loc); + // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later + $$.qualifier.storage = EvqOut; + } + | CENTROID { + parseContext.profileRequires($1.loc, ENoProfile, 120, 0, "centroid"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "centroid"); + parseContext.globalCheck($1.loc, "centroid"); + $$.init($1.loc); + $$.qualifier.centroid = true; + } + | UNIFORM { + parseContext.globalCheck($1.loc, "uniform"); + $$.init($1.loc); + $$.qualifier.storage = EvqUniform; + } + | SHARED { + parseContext.globalCheck($1.loc, "shared"); + parseContext.profileRequires($1.loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); + parseContext.profileRequires($1.loc, EEsProfile, 310, 0, "shared"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared"); + $$.init($1.loc); + $$.qualifier.storage = EvqShared; + } + | BUFFER { + parseContext.globalCheck($1.loc, "buffer"); + $$.init($1.loc); + $$.qualifier.storage = EvqBuffer; + } + + | ATTRIBUTE { + parseContext.requireStage($1.loc, EShLangVertex, "attribute"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "attribute"); + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "attribute"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "attribute"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "attribute"); + + parseContext.globalCheck($1.loc, "attribute"); + + $$.init($1.loc); + $$.qualifier.storage = EvqVaryingIn; + } + | VARYING { + parseContext.checkDeprecated($1.loc, ENoProfile, 130, "varying"); + parseContext.checkDeprecated($1.loc, ECoreProfile, 130, "varying"); + parseContext.requireNotRemoved($1.loc, ECoreProfile, 420, "varying"); + parseContext.requireNotRemoved($1.loc, EEsProfile, 300, "varying"); + + parseContext.globalCheck($1.loc, "varying"); + + $$.init($1.loc); + if (parseContext.language == EShLangVertex) + $$.qualifier.storage = EvqVaryingOut; + else + $$.qualifier.storage = EvqVaryingIn; + } + | PATCH { + parseContext.globalCheck($1.loc, "patch"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); + $$.init($1.loc); + $$.qualifier.patch = true; + } + | SAMPLE { + parseContext.globalCheck($1.loc, "sample"); + $$.init($1.loc); + $$.qualifier.sample = true; + } + | HITATTRNV { + parseContext.globalCheck($1.loc, "hitAttributeNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | HITATTREXT { + parseContext.globalCheck($1.loc, "hitAttributeEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "hitAttributeNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqHitAttr; + } + | PAYLOADNV { + parseContext.globalCheck($1.loc, "rayPayloadNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADEXT { + parseContext.globalCheck($1.loc, "rayPayloadEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayload; + } + | PAYLOADINNV { + parseContext.globalCheck($1.loc, "rayPayloadInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | PAYLOADINEXT { + parseContext.globalCheck($1.loc, "rayPayloadInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqPayloadIn; + } + | CALLDATANV { + parseContext.globalCheck($1.loc, "callableDataNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAEXT { + parseContext.globalCheck($1.loc, "callableDataEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableData; + } + | CALLDATAINNV { + parseContext.globalCheck($1.loc, "callableDataInNV"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | CALLDATAINEXT { + parseContext.globalCheck($1.loc, "callableDataInEXT"); + parseContext.requireStage($1.loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); + parseContext.profileRequires($1.loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); + $$.init($1.loc); + $$.qualifier.storage = EvqCallableDataIn; + } + | COHERENT { + $$.init($1.loc); + $$.qualifier.coherent = true; + } + | DEVICECOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); + $$.qualifier.devicecoherent = true; + } + | QUEUEFAMILYCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); + $$.qualifier.queuefamilycoherent = true; + } + | WORKGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); + $$.qualifier.workgroupcoherent = true; + } + | SUBGROUPCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); + $$.qualifier.subgroupcoherent = true; + } + | NONPRIVATE { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); + $$.qualifier.nonprivate = true; + } + | SHADERCALLCOHERENT { + $$.init($1.loc); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); + $$.qualifier.shadercallcoherent = true; + } + | VOLATILE { + $$.init($1.loc); + $$.qualifier.volatil = true; + } + | RESTRICT { + $$.init($1.loc); + $$.qualifier.restrict = true; + } + | READONLY { + $$.init($1.loc); + $$.qualifier.readonly = true; + } + | WRITEONLY { + $$.init($1.loc); + $$.qualifier.writeonly = true; + } + | SUBROUTINE { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } + | SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN { + parseContext.spvRemoved($1.loc, "subroutine"); + parseContext.globalCheck($1.loc, "subroutine"); + parseContext.unimplemented($1.loc, "subroutine"); + $$.init($1.loc); + } + + ; + + +non_uniform_qualifier + : NONUNIFORM { + $$.init($1.loc); + $$.qualifier.nonUniform = true; + } + ; + +type_name_list + : IDENTIFIER { + // TODO + } + | type_name_list COMMA IDENTIFIER { + // TODO: 4.0 semantics: subroutines + // 1) make sure each identifier is a type declared earlier with SUBROUTINE + // 2) save all of the identifiers for future comparison with the declared function + } + ; + + +type_specifier + : type_specifier_nonarray type_parameter_specifier_opt { + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + } + | type_specifier_nonarray type_parameter_specifier_opt array_specifier { + parseContext.arrayOfArrayVersionCheck($3.loc, $3.arraySizes); + $$ = $1; + $$.qualifier.precision = parseContext.getDefaultPrecision($$); + $$.typeParameters = $2; + $$.arraySizes = $3.arraySizes; + } + ; + +array_specifier + : LEFT_BRACKET RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + $$.arraySizes->addInnerSize(); + } + | LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$.loc = $1.loc; + $$.arraySizes = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($2->getLoc(), $2, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + | array_specifier LEFT_BRACKET RIGHT_BRACKET { + $$ = $1; + $$.arraySizes->addInnerSize(); + } + | array_specifier LEFT_BRACKET conditional_expression RIGHT_BRACKET { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "array size"); + $$.arraySizes->addInnerSize(size); + } + ; + +type_parameter_specifier_opt + : type_parameter_specifier { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +type_parameter_specifier + : LEFT_ANGLE type_parameter_specifier_list RIGHT_ANGLE { + $$ = $2; + } + ; + +type_parameter_specifier_list + : unary_expression { + $$ = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck($1->getLoc(), $1, size, "type parameter"); + $$->addInnerSize(size); + } + | type_parameter_specifier_list COMMA unary_expression { + $$ = $1; + + TArraySize size; + parseContext.arraySizeCheck($3->getLoc(), $3, size, "type parameter"); + $$->addInnerSize(size); + } + ; + +type_specifier_nonarray + : VOID { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtVoid; + } + | FLOAT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | INT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT { + parseContext.fullIntegerCheck($1.loc, "unsigned integer"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | BOOL { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + } + | VEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | VEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | VEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | BVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(2); + } + | BVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(3); + } + | BVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtBool; + $$.setVector(4); + } + | IVEC2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | IVEC3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | IVEC4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | UVEC2 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | UVEC3 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | UVEC4 { + parseContext.fullIntegerCheck($1.loc, "unsigned integer vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | MAT2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | MAT2X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | MAT2X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | MAT2X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | MAT3X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | MAT3X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | MAT3X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | MAT4X2 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | MAT4X3 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | MAT4X4 { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + + | DOUBLE { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | FLOAT16_T { + parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + } + | FLOAT32_T { + parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + } + | FLOAT64_T { + parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + } + | INT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + } + | UINT8_T { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + } + | INT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + } + | UINT16_T { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + } + | INT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + } + | UINT32_T { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + } + | INT64_T { + parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + } + | UINT64_T { + parseContext.int64Check($1.loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + } + | DVEC2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | DVEC3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | DVEC4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double vector"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | F16VEC2 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(2); + } + | F16VEC3 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(3); + } + | F16VEC4 { + parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setVector(4); + } + | F32VEC2 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(2); + } + | F32VEC3 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(3); + } + | F32VEC4 { + parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setVector(4); + } + | F64VEC2 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(2); + } + | F64VEC3 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(3); + } + | F64VEC4 { + parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setVector(4); + } + | I8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(2); + } + | I8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(3); + } + | I8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt8; + $$.setVector(4); + } + | I16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(2); + } + | I16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(3); + } + | I16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt16; + $$.setVector(4); + } + | I32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(2); + } + | I32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(3); + } + | I32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.setVector(4); + } + | I64VEC2 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(2); + } + | I64VEC3 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(3); + } + | I64VEC4 { + parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt64; + $$.setVector(4); + } + | U8VEC2 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(2); + } + | U8VEC3 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(3); + } + | U8VEC4 { + parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint8; + $$.setVector(4); + } + | U16VEC2 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(2); + } + | U16VEC3 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(3); + } + | U16VEC4 { + parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint16; + $$.setVector(4); + } + | U32VEC2 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(2); + } + | U32VEC3 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(3); + } + | U32VEC4 { + parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.setVector(4); + } + | U64VEC2 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(2); + } + | U64VEC3 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(3); + } + | U64VEC4 { + parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint64; + $$.setVector(4); + } + | DMAT2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | DMAT2X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | DMAT2X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | DMAT2X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | DMAT3X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | DMAT3X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | DMAT3X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | DMAT4X2 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | DMAT4X3 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | DMAT4X4 { + parseContext.requireProfile($1.loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck($1.loc, "double matrix"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F16MAT2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F16MAT2X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 2); + } + | F16MAT2X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 3); + } + | F16MAT2X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(2, 4); + } + | F16MAT3X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 2); + } + | F16MAT3X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 3); + } + | F16MAT3X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(3, 4); + } + | F16MAT4X2 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 2); + } + | F16MAT4X3 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 3); + } + | F16MAT4X4 { + parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat16; + $$.setMatrix(4, 4); + } + | F32MAT2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F32MAT2X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 2); + } + | F32MAT2X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 3); + } + | F32MAT2X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(2, 4); + } + | F32MAT3X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 2); + } + | F32MAT3X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 3); + } + | F32MAT3X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(3, 4); + } + | F32MAT4X2 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 2); + } + | F32MAT4X3 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 3); + } + | F32MAT4X4 { + parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.setMatrix(4, 4); + } + | F64MAT2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | F64MAT2X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 2); + } + | F64MAT2X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 3); + } + | F64MAT2X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(2, 4); + } + | F64MAT3X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 2); + } + | F64MAT3X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 3); + } + | F64MAT3X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(3, 4); + } + | F64MAT4X2 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 2); + } + | F64MAT4X3 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 3); + } + | F64MAT4X4 { + parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtDouble; + $$.setMatrix(4, 4); + } + | ACCSTRUCTNV { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | ACCSTRUCTEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAccStruct; + } + | RAYQUERYEXT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtRayQuery; + } + | ATOMIC_UINT { + parseContext.vulkanRemoved($1.loc, "atomic counter types"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtAtomicUint; + } + | SAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D); + } + + | SAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + } + | SAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd3D); + } + | SAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube); + } + | SAMPLER2DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, true); + } + | SAMPLERCUBESHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, false, true); + } + | SAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true); + } + | SAMPLER2DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, true); + } + + | SAMPLER1DSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, false, true); + } + | SAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true); + } + | SAMPLER1DARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd1D, true, true); + } + | SAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true); + } + | SAMPLERCUBEARRAYSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdCube, true, true); + } + | F16SAMPLER1D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D); + } + | F16SAMPLER2D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D); + } + | F16SAMPLER3D { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd3D); + } + | F16SAMPLERCUBE { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube); + } + | F16SAMPLER1DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, false, true); + } + | F16SAMPLER2DSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, true); + } + | F16SAMPLERCUBESHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, false, true); + } + | F16SAMPLER1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true); + } + | F16SAMPLER2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true); + } + | F16SAMPLER1DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd1D, true, true); + } + | F16SAMPLER2DARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, true); + } + | F16SAMPLERCUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true); + } + | F16SAMPLERCUBEARRAYSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdCube, true, true); + } + | ISAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D); + } + + | ISAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D); + } + | ISAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd3D); + } + | ISAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube); + } + | ISAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true); + } + | USAMPLER2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D); + } + | USAMPLER3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd3D); + } + | USAMPLERCUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube); + } + + | ISAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd1D, true); + } + | ISAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdCube, true); + } + | USAMPLER1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D); + } + | USAMPLER1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd1D, true); + } + | USAMPLERCUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdCube, true); + } + | TEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube, true); + } + | ITEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube, true); + } + | UTEXTURECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube, true); + } + + | USAMPLER2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true); + } + | TEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D); + } + | TEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd3D); + } + | TEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true); + } + | TEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdCube); + } + | ITEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D); + } + | ITEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd3D); + } + | ITEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdCube); + } + | ITEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true); + } + | UTEXTURE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D); + } + | UTEXTURE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd3D); + } + | UTEXTURECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdCube); + } + | UTEXTURE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true); + } + | SAMPLER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(false); + } + | SAMPLERSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setPureSampler(true); + } + + | SAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect); + } + | SAMPLER2DRECTSHADOW { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdRect, false, true); + } + | F16SAMPLER2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect); + } + | F16SAMPLER2DRECTSHADOW { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdRect, false, true); + } + | ISAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdRect); + } + | USAMPLER2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdRect); + } + | SAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, EsdBuffer); + } + | F16SAMPLERBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, EsdBuffer); + } + | ISAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, EsdBuffer); + } + | USAMPLERBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, EsdBuffer); + } + | SAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, false, false, true); + } + | F16SAMPLER2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, false, false, true); + } + | ISAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, false, false, true); + } + | USAMPLER2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, false, false, true); + } + | SAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D, true, false, true); + } + | F16SAMPLER2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat16, Esd2D, true, false, true); + } + | ISAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtInt, Esd2D, true, false, true); + } + | USAMPLER2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtUint, Esd2D, true, false, true); + } + | TEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D); + } + | F16TEXTURE1D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D); + } + | F16TEXTURE2D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D); + } + | F16TEXTURE3D { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd3D); + } + | F16TEXTURECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube); + } + | TEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd1D, true); + } + | F16TEXTURE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd1D, true); + } + | F16TEXTURE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true); + } + | F16TEXTURECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdCube, true); + } + | ITEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D); + } + | ITEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd1D, true); + } + | UTEXTURE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D); + } + | UTEXTURE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd1D, true); + } + | TEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdRect); + } + | F16TEXTURE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdRect); + } + | ITEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdRect); + } + | UTEXTURE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdRect); + } + | TEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, EsdBuffer); + } + | F16TEXTUREBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, EsdBuffer); + } + | ITEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, EsdBuffer); + } + | UTEXTUREBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, EsdBuffer); + } + | TEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, false, false, true); + } + | F16TEXTURE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, false, false, true); + } + | ITEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, false, false, true); + } + | UTEXTURE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, false, false, true); + } + | TEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat, Esd2D, true, false, true); + } + | F16TEXTURE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtFloat16, Esd2D, true, false, true); + } + | ITEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtInt, Esd2D, true, false, true); + } + | UTEXTURE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setTexture(EbtUint, Esd2D, true, false, true); + } + | IMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D); + } + | F16IMAGE1D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D); + } + | IIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D); + } + | UIMAGE1D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D); + } + | IMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D); + } + | F16IMAGE2D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D); + } + | IIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D); + } + | UIMAGE2D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D); + } + | IMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd3D); + } + | F16IMAGE3D { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd3D); + } + | IIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd3D); + } + | UIMAGE3D { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd3D); + } + | IMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdRect); + } + | F16IMAGE2DRECT { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdRect); + } + | IIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdRect); + } + | UIMAGE2DRECT { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdRect); + } + | IMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube); + } + | F16IMAGECUBE { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube); + } + | IIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube); + } + | UIMAGECUBE { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube); + } + | IMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdBuffer); + } + | F16IMAGEBUFFER { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdBuffer); + } + | IIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdBuffer); + } + | UIMAGEBUFFER { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdBuffer); + } + | IMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd1D, true); + } + | F16IMAGE1DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd1D, true); + } + | IIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd1D, true); + } + | UIMAGE1DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd1D, true); + } + | IMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true); + } + | F16IMAGE2DARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true); + } + | IIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true); + } + | UIMAGE2DARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true); + } + | IMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, EsdCube, true); + } + | F16IMAGECUBEARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, EsdCube, true); + } + | IIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, EsdCube, true); + } + | UIMAGECUBEARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, EsdCube, true); + } + | IMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, false, false, true); + } + | F16IMAGE2DMS { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, false, false, true); + } + | IIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, false, false, true); + } + | UIMAGE2DMS { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, false, false, true); + } + | IMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat, Esd2D, true, false, true); + } + | F16IMAGE2DMSARRAY { + parseContext.float16OpaqueCheck($1.loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtFloat16, Esd2D, true, false, true); + } + | IIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtInt, Esd2D, true, false, true); + } + | UIMAGE2DMSARRAY { + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setImage(EbtUint, Esd2D, true, false, true); + } + | SAMPLEREXTERNALOES { // GL_OES_EGL_image_external + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.external = true; + } + | SAMPLEREXTERNAL2DY2YEXT { // GL_EXT_YUV_target + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.set(EbtFloat, Esd2D); + $$.sampler.yuv = true; + } + | SUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat); + } + | SUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat, true); + } + | F16SUBPASSINPUT { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16); + } + | F16SUBPASSINPUTMS { + parseContext.float16OpaqueCheck($1.loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtFloat16, true); + } + | ISUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt); + } + | ISUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtInt, true); + } + | USUBPASSINPUT { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint); + } + | USUBPASSINPUTMS { + parseContext.requireStage($1.loc, EShLangFragment, "subpass input"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtSampler; + $$.sampler.setSubpass(EbtUint, true); + } + | FCOOPMATNV { + parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtFloat; + $$.coopmat = true; + } + | ICOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtInt; + $$.coopmat = true; + } + | UCOOPMATNV { + parseContext.intcoopmatCheck($1.loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtUint; + $$.coopmat = true; + } + + | struct_specifier { + $$ = $1; + $$.qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; + parseContext.structTypeCheck($$.loc, $$); + } + | TYPE_NAME { + // + // This is for user defined type names. The lexical phase looked up the + // type. + // + if (const TVariable* variable = ($1.symbol)->getAsVariable()) { + const TType& structure = variable->getType(); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + $$.basicType = EbtStruct; + $$.userDef = &structure; + } else + parseContext.error($1.loc, "expected type name", $1.string->c_str(), ""); + } + ; + +precision_qualifier + : HIGH_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "highp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqHigh); + } + | MEDIUM_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "mediump precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqMedium); + } + | LOW_PRECISION { + parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "lowp precision qualifier"); + $$.init($1.loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier($1.loc, $$.qualifier, EpqLow); + } + ; + +struct_specifier + : STRUCT IDENTIFIER LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($5, *$2.string); + parseContext.structArrayCheck($2.loc, *structure); + TVariable* userTypeDef = new TVariable($2.string, *structure, true); + if (! parseContext.symbolTable.insert(*userTypeDef)) + parseContext.error($2.loc, "redefinition", $2.string->c_str(), "struct"); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + | STRUCT LEFT_BRACE { parseContext.nestedStructCheck($1.loc); } struct_declaration_list RIGHT_BRACE { + TType* structure = new TType($4, TString("")); + $$.init($1.loc); + $$.basicType = EbtStruct; + $$.userDef = structure; + --parseContext.structNestingLevel; + } + ; + +struct_declaration_list + : struct_declaration { + $$ = $1; + } + | struct_declaration_list struct_declaration { + $$ = $1; + for (unsigned int i = 0; i < $2->size(); ++i) { + for (unsigned int j = 0; j < $$->size(); ++j) { + if ((*$$)[j].type->getFieldName() == (*$2)[i].type->getFieldName()) + parseContext.error((*$2)[i].loc, "duplicate member name:", "", (*$2)[i].type->getFieldName().c_str()); + } + $$->push_back((*$2)[i]); + } + } + ; + +struct_declaration + : type_specifier struct_declarator_list SEMICOLON { + if ($1.arraySizes) { + parseContext.profileRequires($1.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($1.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($1.loc, *$1.arraySizes); + } + + $$ = $2; + + parseContext.voidErrorCheck($1.loc, (*$2)[0].type->getFieldName(), $1.basicType); + parseContext.precisionQualifierCheck($1.loc, $1.basicType, $1.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($1); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($1.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + | type_qualifier type_specifier struct_declarator_list SEMICOLON { + if ($2.arraySizes) { + parseContext.profileRequires($2.loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires($2.loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck($2.loc, *$2.arraySizes); + } + + $$ = $3; + + parseContext.memberQualifierCheck($1); + parseContext.voidErrorCheck($2.loc, (*$3)[0].type->getFieldName(), $2.basicType); + parseContext.mergeQualifiers($2.loc, $2.qualifier, $1.qualifier, true); + parseContext.precisionQualifierCheck($2.loc, $2.basicType, $2.qualifier); + + for (unsigned int i = 0; i < $$->size(); ++i) { + TType type($2); + type.setFieldName((*$$)[i].type->getFieldName()); + type.transferArraySizes((*$$)[i].type->getArraySizes()); + type.copyArrayInnerSizes($2.arraySizes); + parseContext.arrayOfArrayVersionCheck((*$$)[i].loc, type.getArraySizes()); + (*$$)[i].type->shallowCopy(type); + } + } + ; + +struct_declarator_list + : struct_declarator { + $$ = new TTypeList; + $$->push_back($1); + } + | struct_declarator_list COMMA struct_declarator { + $$->push_back($3); + } + ; + +struct_declarator + : IDENTIFIER { + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + } + | IDENTIFIER array_specifier { + parseContext.arrayOfArrayVersionCheck($1.loc, $2.arraySizes); + + $$.type = new TType(EbtVoid); + $$.loc = $1.loc; + $$.type->setFieldName(*$1.string); + $$.type->transferArraySizes($2.arraySizes); + } + ; + +initializer + : assignment_expression { + $$ = $1; + } + + | LEFT_BRACE initializer_list RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } + | LEFT_BRACE initializer_list COMMA RIGHT_BRACE { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile($1.loc, ~EEsProfile, initFeature); + parseContext.profileRequires($1.loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + $$ = $2; + } + + ; + + +initializer_list + : initializer { + $$ = parseContext.intermediate.growAggregate(0, $1, $1->getLoc()); + } + | initializer_list COMMA initializer { + $$ = parseContext.intermediate.growAggregate($1, $3); + } + ; + + +declaration_statement + : declaration { $$ = $1; } + ; + +statement + : compound_statement { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +// Grammar Note: labeled statements for switch statements only; 'goto' is not supported. + +simple_statement + : declaration_statement { $$ = $1; } + | expression_statement { $$ = $1; } + | selection_statement { $$ = $1; } + | switch_statement { $$ = $1; } + | case_label { $$ = $1; } + | iteration_statement { $$ = $1; } + | jump_statement { $$ = $1; } + + | demote_statement { $$ = $1; } + + ; + + +demote_statement + : DEMOTE SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "demote"); + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); + $$ = parseContext.intermediate.addBranch(EOpDemote, $1.loc); + } + ; + + +compound_statement + : LEFT_BRACE RIGHT_BRACE { $$ = 0; } + | LEFT_BRACE { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } + statement_list { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } + RIGHT_BRACE { + if ($3 && $3->getAsAggregate()) + $3->getAsAggregate()->setOperator(EOpSequence); + $$ = $3; + } + ; + +statement_no_new_scope + : compound_statement_no_new_scope { $$ = $1; } + | simple_statement { $$ = $1; } + ; + +statement_scoped + : { + ++parseContext.controlFlowNestingLevel; + } + compound_statement { + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + | { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + simple_statement { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + $$ = $2; + } + +compound_statement_no_new_scope + // Statement that doesn't create a new scope, for selection_statement, iteration_statement + : LEFT_BRACE RIGHT_BRACE { + $$ = 0; + } + | LEFT_BRACE statement_list RIGHT_BRACE { + if ($2 && $2->getAsAggregate()) + $2->getAsAggregate()->setOperator(EOpSequence); + $$ = $2; + } + ; + +statement_list + : statement { + $$ = parseContext.intermediate.makeAggregate($1); + if ($1 && $1->getAsBranchNode() && ($1->getAsBranchNode()->getFlowOp() == EOpCase || + $1->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence(0, $1); + $$ = 0; // start a fresh subsequence for what's after this case + } + } + | statement_list statement { + if ($2 && $2->getAsBranchNode() && ($2->getAsBranchNode()->getFlowOp() == EOpCase || + $2->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence($1 ? $1->getAsAggregate() : 0, $2); + $$ = 0; // start a fresh subsequence for what's after this case + } else + $$ = parseContext.intermediate.growAggregate($1, $2); + } + ; + +expression_statement + : SEMICOLON { $$ = 0; } + | expression SEMICOLON { $$ = static_cast($1); } + ; + +selection_statement + : selection_statement_nonattributed { + $$ = $1; + } + + | attribute selection_statement_nonattributed { + parseContext.handleSelectionAttributes(*$1, $2); + $$ = $2; + } + + +selection_statement_nonattributed + : IF LEFT_PAREN expression RIGHT_PAREN selection_rest_statement { + parseContext.boolCheck($1.loc, $3); + $$ = parseContext.intermediate.addSelection($3, $5, $1.loc); + } + ; + +selection_rest_statement + : statement_scoped ELSE statement_scoped { + $$.node1 = $1; + $$.node2 = $3; + } + | statement_scoped { + $$.node1 = $1; + $$.node2 = 0; + } + ; + +condition + // In 1996 c++ draft, conditions can include single declarations + : expression { + $$ = $1; + parseContext.boolCheck($1->getLoc(), $1); + } + | fully_specified_type IDENTIFIER EQUAL initializer { + parseContext.boolCheck($2.loc, $1); + + TType type($1); + TIntermNode* initNode = parseContext.declareVariable($2.loc, *$2.string, $1, 0, $4); + if (initNode) + $$ = initNode->getAsTyped(); + else + $$ = 0; + } + ; + +switch_statement + : switch_statement_nonattributed { + $$ = $1; + } + + | attribute switch_statement_nonattributed { + parseContext.handleSwitchAttributes(*$1, $2); + $$ = $2; + } + + +switch_statement_nonattributed + : SWITCH LEFT_PAREN expression RIGHT_PAREN { + // start new switch sequence on the switch stack + ++parseContext.controlFlowNestingLevel; + ++parseContext.statementNestingLevel; + parseContext.switchSequenceStack.push_back(new TIntermSequence); + parseContext.switchLevel.push_back(parseContext.statementNestingLevel); + parseContext.symbolTable.push(); + } + LEFT_BRACE switch_statement_list RIGHT_BRACE { + $$ = parseContext.addSwitch($1.loc, $3, $7 ? $7->getAsAggregate() : 0); + delete parseContext.switchSequenceStack.back(); + parseContext.switchSequenceStack.pop_back(); + parseContext.switchLevel.pop_back(); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +switch_statement_list + : /* nothing */ { + $$ = 0; + } + | statement_list { + $$ = $1; + } + ; + +case_label + : CASE expression COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "case", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "case", ""); + else { + parseContext.constantValueCheck($2, "case"); + parseContext.integerCheck($2, "case"); + $$ = parseContext.intermediate.addBranch(EOpCase, $2, $1.loc); + } + } + | DEFAULT COLON { + $$ = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error($1.loc, "cannot appear outside switch statement", "default", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error($1.loc, "cannot be nested inside control flow", "default", ""); + else + $$ = parseContext.intermediate.addBranch(EOpDefault, $1.loc); + } + ; + +iteration_statement + : iteration_statement_nonattributed { + $$ = $1; + } + + | attribute iteration_statement_nonattributed { + parseContext.handleLoopAttributes(*$1, $2); + $$ = $2; + } + + +iteration_statement_nonattributed + : WHILE LEFT_PAREN { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "while loops not available", "limitation", ""); + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + condition RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.addLoop($6, $4, 0, true, $1.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | DO { + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + statement WHILE LEFT_PAREN expression RIGHT_PAREN SEMICOLON { + if (! parseContext.limits.whileLoops) + parseContext.error($1.loc, "do-while loops not available", "limitation", ""); + + parseContext.boolCheck($8.loc, $6); + + $$ = parseContext.intermediate.addLoop($3, $6, 0, false, $4.loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + | FOR LEFT_PAREN { + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } + for_init_statement for_rest_statement RIGHT_PAREN statement_no_new_scope { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.makeAggregate($4, $2.loc); + TIntermLoop* forLoop = parseContext.intermediate.addLoop($7, reinterpret_cast($5.node1), reinterpret_cast($5.node2), true, $1.loc); + if (! parseContext.limits.nonInductiveForLoops) + parseContext.inductiveLoopCheck($1.loc, $4, forLoop); + $$ = parseContext.intermediate.growAggregate($$, forLoop, $1.loc); + $$->getAsAggregate()->setOperator(EOpSequence); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } + ; + +for_init_statement + : expression_statement { + $$ = $1; + } + | declaration_statement { + $$ = $1; + } + ; + +conditionopt + : condition { + $$ = $1; + } + | /* May be null */ { + $$ = 0; + } + ; + +for_rest_statement + : conditionopt SEMICOLON { + $$.node1 = $1; + $$.node2 = 0; + } + | conditionopt SEMICOLON expression { + $$.node1 = $1; + $$.node2 = $3; + } + ; + +jump_statement + : CONTINUE SEMICOLON { + if (parseContext.loopNestingLevel <= 0) + parseContext.error($1.loc, "continue statement only allowed in loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpContinue, $1.loc); + } + | BREAK SEMICOLON { + if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) + parseContext.error($1.loc, "break statement only allowed in switch and loops", "", ""); + $$ = parseContext.intermediate.addBranch(EOpBreak, $1.loc); + } + | RETURN SEMICOLON { + $$ = parseContext.intermediate.addBranch(EOpReturn, $1.loc); + if (parseContext.currentFunctionType->getBasicType() != EbtVoid) + parseContext.error($1.loc, "non-void function must return a value", "return", ""); + if (parseContext.inMain) + parseContext.postEntryPointReturn = true; + } + | RETURN expression SEMICOLON { + $$ = parseContext.handleReturnValue($1.loc, $2); + } + | DISCARD SEMICOLON { + parseContext.requireStage($1.loc, EShLangFragment, "discard"); + $$ = parseContext.intermediate.addBranch(EOpKill, $1.loc); + } + ; + +// Grammar Note: No 'goto'. Gotos are not supported. + +translation_unit + : external_declaration { + $$ = $1; + parseContext.intermediate.setTreeRoot($$); + } + | translation_unit external_declaration { + if ($2 != nullptr) { + $$ = parseContext.intermediate.growAggregate($1, $2); + parseContext.intermediate.setTreeRoot($$); + } + } + ; + +external_declaration + : function_definition { + $$ = $1; + } + | declaration { + $$ = $1; + } + + | SEMICOLON { + parseContext.requireProfile($1.loc, ~EEsProfile, "extraneous semicolon"); + parseContext.profileRequires($1.loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); + $$ = nullptr; + } + + ; + +function_definition + : function_prototype { + $1.function = parseContext.handleFunctionDeclarator($1.loc, *$1.function, false /* not prototype */); + $1.intermNode = parseContext.handleFunctionDefinition($1.loc, *$1.function); + + // For ES 100 only, according to ES shading language 100 spec: A function + // body has a scope nested inside the function's definition. + if (parseContext.profile == EEsProfile && parseContext.version == 100) + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } + } + compound_statement_no_new_scope { + // May be best done as post process phase on intermediate code + if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) + parseContext.error($1.loc, "function does not return a value:", "", $1.function->getName().c_str()); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + $$ = parseContext.intermediate.growAggregate($1.intermNode, $3); + parseContext.intermediate.setAggregateOperator($$, EOpFunction, $1.function->getType(), $1.loc); + $$->getAsAggregate()->setName($1.function->getMangledName().c_str()); + + // store the pragma information for debug and optimize and other vendor specific + // information. This information can be queried from the parse tree + $$->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize); + $$->getAsAggregate()->setDebug(parseContext.contextPragma.debug); + $$->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable); + + // Set currentFunctionType to empty pointer when goes outside of the function + parseContext.currentFunctionType = nullptr; + + // For ES 100 only, according to ES shading language 100 spec: A function + // body has a scope nested inside the function's definition. + if (parseContext.profile == EEsProfile && parseContext.version == 100) + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } + } + ; + + +attribute + : LEFT_BRACKET LEFT_BRACKET attribute_list RIGHT_BRACKET RIGHT_BRACKET { + $$ = $3; + parseContext.requireExtensions($1.loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); + } + +attribute_list + : single_attribute { + $$ = $1; + } + | attribute_list COMMA single_attribute { + $$ = parseContext.mergeAttributes($1, $3); + } + +single_attribute + : IDENTIFIER { + $$ = parseContext.makeAttributes(*$1.string); + } + | IDENTIFIER LEFT_PAREN constant_expression RIGHT_PAREN { + $$ = parseContext.makeAttributes(*$1.string, $3); + } + + +%% diff --git a/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp b/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp new file mode 100644 index 0000000..ac35797 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp @@ -0,0 +1,10615 @@ +/* A Bison parser, made by GNU Bison 3.0.4. */ + +/* Bison implementation for Yacc-like parsers in C + + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* As a special exception, you may create a larger work that contains + part or all of the Bison parser skeleton and distribute that work + under terms of your choice, so long as that work isn't itself a + parser generator using the skeleton or a modified version thereof + as a parser skeleton. Alternatively, if you modify or redistribute + the parser skeleton itself, you may (at your option) remove this + special exception, which will cause the skeleton and the resulting + Bison output files to be licensed under the GNU General Public + License without this special exception. + + This special exception was added by the Free Software Foundation in + version 2.2 of Bison. */ + +/* C LALR(1) parser skeleton written by Richard Stallman, by + simplifying the original so-called "semantic" parser. */ + +/* All symbols defined below should begin with yy or YY, to avoid + infringing on user name space. This should be done even for local + variables, as they might otherwise be expanded by user macros. + There are some unavoidable exceptions within include files to + define necessary library symbols; they are noted "INFRINGES ON + USER NAME SPACE" below. */ + +/* Identify Bison output. */ +#define YYBISON 1 + +/* Bison version. */ +#define YYBISON_VERSION "3.0.4" + +/* Skeleton name. */ +#define YYSKELETON_NAME "yacc.c" + +/* Pure parsers. */ +#define YYPURE 1 + +/* Push parsers. */ +#define YYPUSH 0 + +/* Pull parsers. */ +#define YYPULL 1 + + + + +/* Copy the first part of user declarations. */ +#line 69 "MachineIndependent/glslang.y" /* yacc.c:339 */ + + +/* Based on: +ANSI C Yacc grammar + +In 1985, Jeff Lee published his Yacc grammar (which is accompanied by a +matching Lex specification) for the April 30, 1985 draft version of the +ANSI C standard. Tom Stockfisch reposted it to net.sources in 1987; that +original, as mentioned in the answer to question 17.25 of the comp.lang.c +FAQ, can be ftp'ed from ftp.uu.net, file usenet/net.sources/ansi.c.grammar.Z. + +I intend to keep this version as close to the current C Standard grammar as +possible; please let me know if you discover discrepancies. + +Jutta Degener, 1995 +*/ + +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "../Public/ShaderLang.h" +#include "attribute.h" + +using namespace glslang; + + +#line 92 "MachineIndependent/glslang_tab.cpp" /* yacc.c:339 */ + +# ifndef YY_NULLPTR +# if defined __cplusplus && 201103L <= __cplusplus +# define YY_NULLPTR nullptr +# else +# define YY_NULLPTR 0 +# endif +# endif + +/* Enabling verbose error messages. */ +#ifdef YYERROR_VERBOSE +# undef YYERROR_VERBOSE +# define YYERROR_VERBOSE 1 +#else +# define YYERROR_VERBOSE 1 +#endif + +/* In a future release of Bison, this section will be replaced + by #include "glslang_tab.cpp.h". */ +#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED +# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED +/* Debug traces. */ +#ifndef YYDEBUG +# define YYDEBUG 1 +#endif +#if YYDEBUG +extern int yydebug; +#endif + +/* Token type. */ +#ifndef YYTOKENTYPE +# define YYTOKENTYPE + enum yytokentype + { + CONST = 258, + BOOL = 259, + INT = 260, + UINT = 261, + FLOAT = 262, + BVEC2 = 263, + BVEC3 = 264, + BVEC4 = 265, + IVEC2 = 266, + IVEC3 = 267, + IVEC4 = 268, + UVEC2 = 269, + UVEC3 = 270, + UVEC4 = 271, + VEC2 = 272, + VEC3 = 273, + VEC4 = 274, + MAT2 = 275, + MAT3 = 276, + MAT4 = 277, + MAT2X2 = 278, + MAT2X3 = 279, + MAT2X4 = 280, + MAT3X2 = 281, + MAT3X3 = 282, + MAT3X4 = 283, + MAT4X2 = 284, + MAT4X3 = 285, + MAT4X4 = 286, + SAMPLER2D = 287, + SAMPLER3D = 288, + SAMPLERCUBE = 289, + SAMPLER2DSHADOW = 290, + SAMPLERCUBESHADOW = 291, + SAMPLER2DARRAY = 292, + SAMPLER2DARRAYSHADOW = 293, + ISAMPLER2D = 294, + ISAMPLER3D = 295, + ISAMPLERCUBE = 296, + ISAMPLER2DARRAY = 297, + USAMPLER2D = 298, + USAMPLER3D = 299, + USAMPLERCUBE = 300, + USAMPLER2DARRAY = 301, + SAMPLER = 302, + SAMPLERSHADOW = 303, + TEXTURE2D = 304, + TEXTURE3D = 305, + TEXTURECUBE = 306, + TEXTURE2DARRAY = 307, + ITEXTURE2D = 308, + ITEXTURE3D = 309, + ITEXTURECUBE = 310, + ITEXTURE2DARRAY = 311, + UTEXTURE2D = 312, + UTEXTURE3D = 313, + UTEXTURECUBE = 314, + UTEXTURE2DARRAY = 315, + ATTRIBUTE = 316, + VARYING = 317, + FLOAT16_T = 318, + FLOAT32_T = 319, + DOUBLE = 320, + FLOAT64_T = 321, + INT64_T = 322, + UINT64_T = 323, + INT32_T = 324, + UINT32_T = 325, + INT16_T = 326, + UINT16_T = 327, + INT8_T = 328, + UINT8_T = 329, + I64VEC2 = 330, + I64VEC3 = 331, + I64VEC4 = 332, + U64VEC2 = 333, + U64VEC3 = 334, + U64VEC4 = 335, + I32VEC2 = 336, + I32VEC3 = 337, + I32VEC4 = 338, + U32VEC2 = 339, + U32VEC3 = 340, + U32VEC4 = 341, + I16VEC2 = 342, + I16VEC3 = 343, + I16VEC4 = 344, + U16VEC2 = 345, + U16VEC3 = 346, + U16VEC4 = 347, + I8VEC2 = 348, + I8VEC3 = 349, + I8VEC4 = 350, + U8VEC2 = 351, + U8VEC3 = 352, + U8VEC4 = 353, + DVEC2 = 354, + DVEC3 = 355, + DVEC4 = 356, + DMAT2 = 357, + DMAT3 = 358, + DMAT4 = 359, + F16VEC2 = 360, + F16VEC3 = 361, + F16VEC4 = 362, + F16MAT2 = 363, + F16MAT3 = 364, + F16MAT4 = 365, + F32VEC2 = 366, + F32VEC3 = 367, + F32VEC4 = 368, + F32MAT2 = 369, + F32MAT3 = 370, + F32MAT4 = 371, + F64VEC2 = 372, + F64VEC3 = 373, + F64VEC4 = 374, + F64MAT2 = 375, + F64MAT3 = 376, + F64MAT4 = 377, + DMAT2X2 = 378, + DMAT2X3 = 379, + DMAT2X4 = 380, + DMAT3X2 = 381, + DMAT3X3 = 382, + DMAT3X4 = 383, + DMAT4X2 = 384, + DMAT4X3 = 385, + DMAT4X4 = 386, + F16MAT2X2 = 387, + F16MAT2X3 = 388, + F16MAT2X4 = 389, + F16MAT3X2 = 390, + F16MAT3X3 = 391, + F16MAT3X4 = 392, + F16MAT4X2 = 393, + F16MAT4X3 = 394, + F16MAT4X4 = 395, + F32MAT2X2 = 396, + F32MAT2X3 = 397, + F32MAT2X4 = 398, + F32MAT3X2 = 399, + F32MAT3X3 = 400, + F32MAT3X4 = 401, + F32MAT4X2 = 402, + F32MAT4X3 = 403, + F32MAT4X4 = 404, + F64MAT2X2 = 405, + F64MAT2X3 = 406, + F64MAT2X4 = 407, + F64MAT3X2 = 408, + F64MAT3X3 = 409, + F64MAT3X4 = 410, + F64MAT4X2 = 411, + F64MAT4X3 = 412, + F64MAT4X4 = 413, + ATOMIC_UINT = 414, + ACCSTRUCTNV = 415, + ACCSTRUCTEXT = 416, + RAYQUERYEXT = 417, + FCOOPMATNV = 418, + ICOOPMATNV = 419, + UCOOPMATNV = 420, + SAMPLERCUBEARRAY = 421, + SAMPLERCUBEARRAYSHADOW = 422, + ISAMPLERCUBEARRAY = 423, + USAMPLERCUBEARRAY = 424, + SAMPLER1D = 425, + SAMPLER1DARRAY = 426, + SAMPLER1DARRAYSHADOW = 427, + ISAMPLER1D = 428, + SAMPLER1DSHADOW = 429, + SAMPLER2DRECT = 430, + SAMPLER2DRECTSHADOW = 431, + ISAMPLER2DRECT = 432, + USAMPLER2DRECT = 433, + SAMPLERBUFFER = 434, + ISAMPLERBUFFER = 435, + USAMPLERBUFFER = 436, + SAMPLER2DMS = 437, + ISAMPLER2DMS = 438, + USAMPLER2DMS = 439, + SAMPLER2DMSARRAY = 440, + ISAMPLER2DMSARRAY = 441, + USAMPLER2DMSARRAY = 442, + SAMPLEREXTERNALOES = 443, + SAMPLEREXTERNAL2DY2YEXT = 444, + ISAMPLER1DARRAY = 445, + USAMPLER1D = 446, + USAMPLER1DARRAY = 447, + F16SAMPLER1D = 448, + F16SAMPLER2D = 449, + F16SAMPLER3D = 450, + F16SAMPLER2DRECT = 451, + F16SAMPLERCUBE = 452, + F16SAMPLER1DARRAY = 453, + F16SAMPLER2DARRAY = 454, + F16SAMPLERCUBEARRAY = 455, + F16SAMPLERBUFFER = 456, + F16SAMPLER2DMS = 457, + F16SAMPLER2DMSARRAY = 458, + F16SAMPLER1DSHADOW = 459, + F16SAMPLER2DSHADOW = 460, + F16SAMPLER1DARRAYSHADOW = 461, + F16SAMPLER2DARRAYSHADOW = 462, + F16SAMPLER2DRECTSHADOW = 463, + F16SAMPLERCUBESHADOW = 464, + F16SAMPLERCUBEARRAYSHADOW = 465, + IMAGE1D = 466, + IIMAGE1D = 467, + UIMAGE1D = 468, + IMAGE2D = 469, + IIMAGE2D = 470, + UIMAGE2D = 471, + IMAGE3D = 472, + IIMAGE3D = 473, + UIMAGE3D = 474, + IMAGE2DRECT = 475, + IIMAGE2DRECT = 476, + UIMAGE2DRECT = 477, + IMAGECUBE = 478, + IIMAGECUBE = 479, + UIMAGECUBE = 480, + IMAGEBUFFER = 481, + IIMAGEBUFFER = 482, + UIMAGEBUFFER = 483, + IMAGE1DARRAY = 484, + IIMAGE1DARRAY = 485, + UIMAGE1DARRAY = 486, + IMAGE2DARRAY = 487, + IIMAGE2DARRAY = 488, + UIMAGE2DARRAY = 489, + IMAGECUBEARRAY = 490, + IIMAGECUBEARRAY = 491, + UIMAGECUBEARRAY = 492, + IMAGE2DMS = 493, + IIMAGE2DMS = 494, + UIMAGE2DMS = 495, + IMAGE2DMSARRAY = 496, + IIMAGE2DMSARRAY = 497, + UIMAGE2DMSARRAY = 498, + F16IMAGE1D = 499, + F16IMAGE2D = 500, + F16IMAGE3D = 501, + F16IMAGE2DRECT = 502, + F16IMAGECUBE = 503, + F16IMAGE1DARRAY = 504, + F16IMAGE2DARRAY = 505, + F16IMAGECUBEARRAY = 506, + F16IMAGEBUFFER = 507, + F16IMAGE2DMS = 508, + F16IMAGE2DMSARRAY = 509, + TEXTURECUBEARRAY = 510, + ITEXTURECUBEARRAY = 511, + UTEXTURECUBEARRAY = 512, + TEXTURE1D = 513, + ITEXTURE1D = 514, + UTEXTURE1D = 515, + TEXTURE1DARRAY = 516, + ITEXTURE1DARRAY = 517, + UTEXTURE1DARRAY = 518, + TEXTURE2DRECT = 519, + ITEXTURE2DRECT = 520, + UTEXTURE2DRECT = 521, + TEXTUREBUFFER = 522, + ITEXTUREBUFFER = 523, + UTEXTUREBUFFER = 524, + TEXTURE2DMS = 525, + ITEXTURE2DMS = 526, + UTEXTURE2DMS = 527, + TEXTURE2DMSARRAY = 528, + ITEXTURE2DMSARRAY = 529, + UTEXTURE2DMSARRAY = 530, + F16TEXTURE1D = 531, + F16TEXTURE2D = 532, + F16TEXTURE3D = 533, + F16TEXTURE2DRECT = 534, + F16TEXTURECUBE = 535, + F16TEXTURE1DARRAY = 536, + F16TEXTURE2DARRAY = 537, + F16TEXTURECUBEARRAY = 538, + F16TEXTUREBUFFER = 539, + F16TEXTURE2DMS = 540, + F16TEXTURE2DMSARRAY = 541, + SUBPASSINPUT = 542, + SUBPASSINPUTMS = 543, + ISUBPASSINPUT = 544, + ISUBPASSINPUTMS = 545, + USUBPASSINPUT = 546, + USUBPASSINPUTMS = 547, + F16SUBPASSINPUT = 548, + F16SUBPASSINPUTMS = 549, + LEFT_OP = 550, + RIGHT_OP = 551, + INC_OP = 552, + DEC_OP = 553, + LE_OP = 554, + GE_OP = 555, + EQ_OP = 556, + NE_OP = 557, + AND_OP = 558, + OR_OP = 559, + XOR_OP = 560, + MUL_ASSIGN = 561, + DIV_ASSIGN = 562, + ADD_ASSIGN = 563, + MOD_ASSIGN = 564, + LEFT_ASSIGN = 565, + RIGHT_ASSIGN = 566, + AND_ASSIGN = 567, + XOR_ASSIGN = 568, + OR_ASSIGN = 569, + SUB_ASSIGN = 570, + STRING_LITERAL = 571, + LEFT_PAREN = 572, + RIGHT_PAREN = 573, + LEFT_BRACKET = 574, + RIGHT_BRACKET = 575, + LEFT_BRACE = 576, + RIGHT_BRACE = 577, + DOT = 578, + COMMA = 579, + COLON = 580, + EQUAL = 581, + SEMICOLON = 582, + BANG = 583, + DASH = 584, + TILDE = 585, + PLUS = 586, + STAR = 587, + SLASH = 588, + PERCENT = 589, + LEFT_ANGLE = 590, + RIGHT_ANGLE = 591, + VERTICAL_BAR = 592, + CARET = 593, + AMPERSAND = 594, + QUESTION = 595, + INVARIANT = 596, + HIGH_PRECISION = 597, + MEDIUM_PRECISION = 598, + LOW_PRECISION = 599, + PRECISION = 600, + PACKED = 601, + RESOURCE = 602, + SUPERP = 603, + FLOATCONSTANT = 604, + INTCONSTANT = 605, + UINTCONSTANT = 606, + BOOLCONSTANT = 607, + IDENTIFIER = 608, + TYPE_NAME = 609, + CENTROID = 610, + IN = 611, + OUT = 612, + INOUT = 613, + STRUCT = 614, + VOID = 615, + WHILE = 616, + BREAK = 617, + CONTINUE = 618, + DO = 619, + ELSE = 620, + FOR = 621, + IF = 622, + DISCARD = 623, + RETURN = 624, + SWITCH = 625, + CASE = 626, + DEFAULT = 627, + UNIFORM = 628, + SHARED = 629, + BUFFER = 630, + FLAT = 631, + SMOOTH = 632, + LAYOUT = 633, + DOUBLECONSTANT = 634, + INT16CONSTANT = 635, + UINT16CONSTANT = 636, + FLOAT16CONSTANT = 637, + INT32CONSTANT = 638, + UINT32CONSTANT = 639, + INT64CONSTANT = 640, + UINT64CONSTANT = 641, + SUBROUTINE = 642, + DEMOTE = 643, + PAYLOADNV = 644, + PAYLOADINNV = 645, + HITATTRNV = 646, + CALLDATANV = 647, + CALLDATAINNV = 648, + PAYLOADEXT = 649, + PAYLOADINEXT = 650, + HITATTREXT = 651, + CALLDATAEXT = 652, + CALLDATAINEXT = 653, + PATCH = 654, + SAMPLE = 655, + NONUNIFORM = 656, + COHERENT = 657, + VOLATILE = 658, + RESTRICT = 659, + READONLY = 660, + WRITEONLY = 661, + DEVICECOHERENT = 662, + QUEUEFAMILYCOHERENT = 663, + WORKGROUPCOHERENT = 664, + SUBGROUPCOHERENT = 665, + NONPRIVATE = 666, + SHADERCALLCOHERENT = 667, + NOPERSPECTIVE = 668, + EXPLICITINTERPAMD = 669, + PERVERTEXNV = 670, + PERPRIMITIVENV = 671, + PERVIEWNV = 672, + PERTASKNV = 673, + PRECISE = 674 + }; +#endif + +/* Value type. */ +#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED + +union YYSTYPE +{ +#line 97 "MachineIndependent/glslang.y" /* yacc.c:355 */ + + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; + +#line 588 "MachineIndependent/glslang_tab.cpp" /* yacc.c:355 */ +}; + +typedef union YYSTYPE YYSTYPE; +# define YYSTYPE_IS_TRIVIAL 1 +# define YYSTYPE_IS_DECLARED 1 +#endif + + + +int yyparse (glslang::TParseContext* pParseContext); + +#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */ + +/* Copy the second part of user declarations. */ +#line 133 "MachineIndependent/glslang.y" /* yacc.c:358 */ + + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4065) + #pragma warning(disable : 4127) + #pragma warning(disable : 4244) +#endif + +#define parseContext (*pParseContext) +#define yyerror(context, msg) context->parserError(msg) + +extern int yylex(YYSTYPE*, TParseContext&); + + +#line 619 "MachineIndependent/glslang_tab.cpp" /* yacc.c:358 */ + +#ifdef short +# undef short +#endif + +#ifdef YYTYPE_UINT8 +typedef YYTYPE_UINT8 yytype_uint8; +#else +typedef unsigned char yytype_uint8; +#endif + +#ifdef YYTYPE_INT8 +typedef YYTYPE_INT8 yytype_int8; +#else +typedef signed char yytype_int8; +#endif + +#ifdef YYTYPE_UINT16 +typedef YYTYPE_UINT16 yytype_uint16; +#else +typedef unsigned short int yytype_uint16; +#endif + +#ifdef YYTYPE_INT16 +typedef YYTYPE_INT16 yytype_int16; +#else +typedef short int yytype_int16; +#endif + +#ifndef YYSIZE_T +# ifdef __SIZE_TYPE__ +# define YYSIZE_T __SIZE_TYPE__ +# elif defined size_t +# define YYSIZE_T size_t +# elif ! defined YYSIZE_T +# include /* INFRINGES ON USER NAME SPACE */ +# define YYSIZE_T size_t +# else +# define YYSIZE_T unsigned int +# endif +#endif + +#define YYSIZE_MAXIMUM ((YYSIZE_T) -1) + +#ifndef YY_ +# if defined YYENABLE_NLS && YYENABLE_NLS +# if ENABLE_NLS +# include /* INFRINGES ON USER NAME SPACE */ +# define YY_(Msgid) dgettext ("bison-runtime", Msgid) +# endif +# endif +# ifndef YY_ +# define YY_(Msgid) Msgid +# endif +#endif + +#ifndef YY_ATTRIBUTE +# if (defined __GNUC__ \ + && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ + || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C +# define YY_ATTRIBUTE(Spec) __attribute__(Spec) +# else +# define YY_ATTRIBUTE(Spec) /* empty */ +# endif +#endif + +#ifndef YY_ATTRIBUTE_PURE +# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) +#endif + +#ifndef YY_ATTRIBUTE_UNUSED +# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) +#endif + +#if !defined _Noreturn \ + && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) +# if defined _MSC_VER && 1200 <= _MSC_VER +# define _Noreturn __declspec (noreturn) +# else +# define _Noreturn YY_ATTRIBUTE ((__noreturn__)) +# endif +#endif + +/* Suppress unused-variable warnings by "using" E. */ +#if ! defined lint || defined __GNUC__ +# define YYUSE(E) ((void) (E)) +#else +# define YYUSE(E) /* empty */ +#endif + +#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ +/* Suppress an incorrect diagnostic about yylval being uninitialized. */ +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ + _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ + _Pragma ("GCC diagnostic pop") +#else +# define YY_INITIAL_VALUE(Value) Value +#endif +#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_END +#endif +#ifndef YY_INITIAL_VALUE +# define YY_INITIAL_VALUE(Value) /* Nothing. */ +#endif + + +#if ! defined yyoverflow || YYERROR_VERBOSE + +/* The parser invokes alloca or malloc; define the necessary symbols. */ + +# ifdef YYSTACK_USE_ALLOCA +# if YYSTACK_USE_ALLOCA +# ifdef __GNUC__ +# define YYSTACK_ALLOC __builtin_alloca +# elif defined __BUILTIN_VA_ARG_INCR +# include /* INFRINGES ON USER NAME SPACE */ +# elif defined _AIX +# define YYSTACK_ALLOC __alloca +# elif defined _MSC_VER +# include /* INFRINGES ON USER NAME SPACE */ +# define alloca _alloca +# else +# define YYSTACK_ALLOC alloca +# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS +# include /* INFRINGES ON USER NAME SPACE */ + /* Use EXIT_SUCCESS as a witness for stdlib.h. */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# endif +# endif +# endif + +# ifdef YYSTACK_ALLOC + /* Pacify GCC's 'empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) +# ifndef YYSTACK_ALLOC_MAXIMUM + /* The OS might guarantee only one guard page at the bottom of the stack, + and a page size can be as small as 4096 bytes. So we cannot safely + invoke alloca (N) if N exceeds 4096. Use a slightly smaller number + to allow for a few compiler-allocated temporary stack slots. */ +# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ +# endif +# else +# define YYSTACK_ALLOC YYMALLOC +# define YYSTACK_FREE YYFREE +# ifndef YYSTACK_ALLOC_MAXIMUM +# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM +# endif +# if (defined __cplusplus && ! defined EXIT_SUCCESS \ + && ! ((defined YYMALLOC || defined malloc) \ + && (defined YYFREE || defined free))) +# include /* INFRINGES ON USER NAME SPACE */ +# ifndef EXIT_SUCCESS +# define EXIT_SUCCESS 0 +# endif +# endif +# ifndef YYMALLOC +# define YYMALLOC malloc +# if ! defined malloc && ! defined EXIT_SUCCESS +void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# ifndef YYFREE +# define YYFREE free +# if ! defined free && ! defined EXIT_SUCCESS +void free (void *); /* INFRINGES ON USER NAME SPACE */ +# endif +# endif +# endif +#endif /* ! defined yyoverflow || YYERROR_VERBOSE */ + + +#if (! defined yyoverflow \ + && (! defined __cplusplus \ + || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) + +/* A type that is properly aligned for any stack member. */ +union yyalloc +{ + yytype_int16 yyss_alloc; + YYSTYPE yyvs_alloc; +}; + +/* The size of the maximum gap between one aligned stack and the next. */ +# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) + +/* The size of an array large to enough to hold all stacks, each with + N elements. */ +# define YYSTACK_BYTES(N) \ + ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + + YYSTACK_GAP_MAXIMUM) + +# define YYCOPY_NEEDED 1 + +/* Relocate STACK from its old location to the new one. The + local variables YYSIZE and YYSTACKSIZE give the old and new number of + elements in the stack, and YYPTR gives the new location of the + stack. Advance YYPTR to a properly aligned location for the next + stack. */ +# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ + Stack = &yyptr->Stack_alloc; \ + yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ + while (0) + +#endif + +#if defined YYCOPY_NEEDED && YYCOPY_NEEDED +/* Copy COUNT objects from SRC to DST. The source and destination do + not overlap. */ +# ifndef YYCOPY +# if defined __GNUC__ && 1 < __GNUC__ +# define YYCOPY(Dst, Src, Count) \ + __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) +# else +# define YYCOPY(Dst, Src, Count) \ + do \ + { \ + YYSIZE_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (Dst)[yyi] = (Src)[yyi]; \ + } \ + while (0) +# endif +# endif +#endif /* !YYCOPY_NEEDED */ + +/* YYFINAL -- State number of the termination state. */ +#define YYFINAL 394 +/* YYLAST -- Last index in YYTABLE. */ +#define YYLAST 9550 + +/* YYNTOKENS -- Number of terminals. */ +#define YYNTOKENS 420 +/* YYNNTS -- Number of nonterminals. */ +#define YYNNTS 111 +/* YYNRULES -- Number of rules. */ +#define YYNRULES 591 +/* YYNSTATES -- Number of states. */ +#define YYNSTATES 736 + +/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned + by yylex, with out-of-bounds checking. */ +#define YYUNDEFTOK 2 +#define YYMAXUTOK 674 + +#define YYTRANSLATE(YYX) \ + ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) + +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, without out-of-bounds checking. */ +static const yytype_uint16 yytranslate[] = +{ + 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, + 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, + 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, + 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419 +}; + +#if YYDEBUG + /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ +static const yytype_uint16 yyrline[] = +{ + 0, 357, 357, 363, 366, 371, 374, 377, 381, 385, + 388, 392, 396, 400, 404, 408, 412, 418, 426, 429, + 432, 435, 438, 443, 451, 458, 465, 471, 475, 482, + 485, 491, 498, 508, 516, 521, 549, 558, 564, 568, + 572, 592, 593, 594, 595, 601, 602, 607, 612, 621, + 622, 627, 635, 636, 642, 651, 652, 657, 662, 667, + 675, 676, 685, 697, 698, 707, 708, 717, 718, 727, + 728, 736, 737, 745, 746, 754, 755, 755, 773, 774, + 790, 794, 798, 802, 807, 811, 815, 819, 823, 827, + 831, 838, 841, 852, 859, 864, 869, 876, 880, 884, + 888, 893, 898, 907, 907, 918, 922, 929, 936, 939, + 946, 954, 974, 997, 1012, 1037, 1048, 1058, 1068, 1078, + 1087, 1090, 1094, 1098, 1103, 1111, 1118, 1123, 1128, 1133, + 1142, 1152, 1179, 1188, 1195, 1203, 1210, 1217, 1225, 1235, + 1242, 1253, 1259, 1262, 1269, 1273, 1277, 1286, 1296, 1299, + 1310, 1313, 1316, 1320, 1324, 1329, 1333, 1340, 1344, 1349, + 1355, 1361, 1368, 1373, 1381, 1387, 1399, 1413, 1419, 1424, + 1432, 1440, 1448, 1456, 1464, 1472, 1480, 1488, 1495, 1502, + 1506, 1511, 1516, 1521, 1526, 1531, 1536, 1540, 1544, 1548, + 1552, 1558, 1569, 1576, 1579, 1588, 1593, 1603, 1608, 1616, + 1620, 1630, 1633, 1639, 1645, 1652, 1662, 1666, 1670, 1674, + 1679, 1683, 1688, 1693, 1698, 1703, 1708, 1713, 1718, 1723, + 1728, 1734, 1740, 1746, 1751, 1756, 1761, 1766, 1771, 1776, + 1781, 1786, 1791, 1796, 1801, 1807, 1814, 1819, 1824, 1829, + 1834, 1839, 1844, 1849, 1854, 1859, 1864, 1869, 1877, 1885, + 1893, 1899, 1905, 1911, 1917, 1923, 1929, 1935, 1941, 1947, + 1953, 1959, 1965, 1971, 1977, 1983, 1989, 1995, 2001, 2007, + 2013, 2019, 2025, 2031, 2037, 2043, 2049, 2055, 2061, 2067, + 2073, 2079, 2085, 2091, 2099, 2107, 2115, 2123, 2131, 2139, + 2147, 2155, 2163, 2171, 2179, 2187, 2193, 2199, 2205, 2211, + 2217, 2223, 2229, 2235, 2241, 2247, 2253, 2259, 2265, 2271, + 2277, 2283, 2289, 2295, 2301, 2307, 2313, 2319, 2325, 2331, + 2337, 2343, 2349, 2355, 2361, 2367, 2373, 2379, 2385, 2391, + 2397, 2403, 2407, 2411, 2415, 2420, 2426, 2431, 2436, 2441, + 2446, 2451, 2456, 2462, 2467, 2472, 2477, 2482, 2487, 2493, + 2499, 2505, 2511, 2517, 2523, 2529, 2535, 2541, 2547, 2553, + 2559, 2565, 2571, 2576, 2581, 2586, 2591, 2596, 2601, 2607, + 2612, 2617, 2622, 2627, 2632, 2637, 2642, 2648, 2653, 2658, + 2663, 2668, 2673, 2678, 2683, 2688, 2693, 2698, 2703, 2708, + 2713, 2718, 2724, 2729, 2734, 2740, 2746, 2751, 2756, 2761, + 2767, 2772, 2777, 2782, 2788, 2793, 2798, 2803, 2809, 2814, + 2819, 2824, 2830, 2836, 2842, 2848, 2853, 2859, 2865, 2871, + 2876, 2881, 2886, 2891, 2896, 2902, 2907, 2912, 2917, 2923, + 2928, 2933, 2938, 2944, 2949, 2954, 2959, 2965, 2970, 2975, + 2980, 2986, 2991, 2996, 3001, 3007, 3012, 3017, 3022, 3028, + 3033, 3038, 3043, 3049, 3054, 3059, 3064, 3070, 3075, 3080, + 3085, 3091, 3096, 3101, 3106, 3112, 3117, 3122, 3127, 3133, + 3138, 3143, 3148, 3154, 3159, 3164, 3169, 3175, 3180, 3185, + 3190, 3196, 3201, 3206, 3212, 3218, 3224, 3230, 3237, 3244, + 3250, 3256, 3262, 3268, 3274, 3280, 3287, 3292, 3308, 3313, + 3318, 3326, 3326, 3337, 3337, 3347, 3350, 3363, 3385, 3412, + 3416, 3422, 3427, 3438, 3442, 3448, 3459, 3462, 3469, 3473, + 3474, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3488, 3494, + 3503, 3504, 3508, 3504, 3520, 3521, 3525, 3525, 3532, 3532, + 3546, 3549, 3557, 3565, 3576, 3577, 3581, 3585, 3592, 3599, + 3603, 3611, 3615, 3628, 3632, 3639, 3639, 3659, 3662, 3668, + 3680, 3692, 3696, 3703, 3703, 3718, 3718, 3734, 3734, 3755, + 3758, 3764, 3767, 3773, 3777, 3784, 3789, 3794, 3801, 3804, + 3813, 3817, 3826, 3829, 3833, 3842, 3842, 3884, 3890, 3893, + 3898, 3901 +}; +#endif + +#if YYDEBUG || YYERROR_VERBOSE || 1 +/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. + First, the terminals, then, starting at YYNTOKENS, nonterminals. */ +static const char *const yytname[] = +{ + "$end", "error", "$undefined", "CONST", "BOOL", "INT", "UINT", "FLOAT", + "BVEC2", "BVEC3", "BVEC4", "IVEC2", "IVEC3", "IVEC4", "UVEC2", "UVEC3", + "UVEC4", "VEC2", "VEC3", "VEC4", "MAT2", "MAT3", "MAT4", "MAT2X2", + "MAT2X3", "MAT2X4", "MAT3X2", "MAT3X3", "MAT3X4", "MAT4X2", "MAT4X3", + "MAT4X4", "SAMPLER2D", "SAMPLER3D", "SAMPLERCUBE", "SAMPLER2DSHADOW", + "SAMPLERCUBESHADOW", "SAMPLER2DARRAY", "SAMPLER2DARRAYSHADOW", + "ISAMPLER2D", "ISAMPLER3D", "ISAMPLERCUBE", "ISAMPLER2DARRAY", + "USAMPLER2D", "USAMPLER3D", "USAMPLERCUBE", "USAMPLER2DARRAY", "SAMPLER", + "SAMPLERSHADOW", "TEXTURE2D", "TEXTURE3D", "TEXTURECUBE", + "TEXTURE2DARRAY", "ITEXTURE2D", "ITEXTURE3D", "ITEXTURECUBE", + "ITEXTURE2DARRAY", "UTEXTURE2D", "UTEXTURE3D", "UTEXTURECUBE", + "UTEXTURE2DARRAY", "ATTRIBUTE", "VARYING", "FLOAT16_T", "FLOAT32_T", + "DOUBLE", "FLOAT64_T", "INT64_T", "UINT64_T", "INT32_T", "UINT32_T", + "INT16_T", "UINT16_T", "INT8_T", "UINT8_T", "I64VEC2", "I64VEC3", + "I64VEC4", "U64VEC2", "U64VEC3", "U64VEC4", "I32VEC2", "I32VEC3", + "I32VEC4", "U32VEC2", "U32VEC3", "U32VEC4", "I16VEC2", "I16VEC3", + "I16VEC4", "U16VEC2", "U16VEC3", "U16VEC4", "I8VEC2", "I8VEC3", "I8VEC4", + "U8VEC2", "U8VEC3", "U8VEC4", "DVEC2", "DVEC3", "DVEC4", "DMAT2", + "DMAT3", "DMAT4", "F16VEC2", "F16VEC3", "F16VEC4", "F16MAT2", "F16MAT3", + "F16MAT4", "F32VEC2", "F32VEC3", "F32VEC4", "F32MAT2", "F32MAT3", + "F32MAT4", "F64VEC2", "F64VEC3", "F64VEC4", "F64MAT2", "F64MAT3", + "F64MAT4", "DMAT2X2", "DMAT2X3", "DMAT2X4", "DMAT3X2", "DMAT3X3", + "DMAT3X4", "DMAT4X2", "DMAT4X3", "DMAT4X4", "F16MAT2X2", "F16MAT2X3", + "F16MAT2X4", "F16MAT3X2", "F16MAT3X3", "F16MAT3X4", "F16MAT4X2", + "F16MAT4X3", "F16MAT4X4", "F32MAT2X2", "F32MAT2X3", "F32MAT2X4", + "F32MAT3X2", "F32MAT3X3", "F32MAT3X4", "F32MAT4X2", "F32MAT4X3", + "F32MAT4X4", "F64MAT2X2", "F64MAT2X3", "F64MAT2X4", "F64MAT3X2", + "F64MAT3X3", "F64MAT3X4", "F64MAT4X2", "F64MAT4X3", "F64MAT4X4", + "ATOMIC_UINT", "ACCSTRUCTNV", "ACCSTRUCTEXT", "RAYQUERYEXT", + "FCOOPMATNV", "ICOOPMATNV", "UCOOPMATNV", "SAMPLERCUBEARRAY", + "SAMPLERCUBEARRAYSHADOW", "ISAMPLERCUBEARRAY", "USAMPLERCUBEARRAY", + "SAMPLER1D", "SAMPLER1DARRAY", "SAMPLER1DARRAYSHADOW", "ISAMPLER1D", + "SAMPLER1DSHADOW", "SAMPLER2DRECT", "SAMPLER2DRECTSHADOW", + "ISAMPLER2DRECT", "USAMPLER2DRECT", "SAMPLERBUFFER", "ISAMPLERBUFFER", + "USAMPLERBUFFER", "SAMPLER2DMS", "ISAMPLER2DMS", "USAMPLER2DMS", + "SAMPLER2DMSARRAY", "ISAMPLER2DMSARRAY", "USAMPLER2DMSARRAY", + "SAMPLEREXTERNALOES", "SAMPLEREXTERNAL2DY2YEXT", "ISAMPLER1DARRAY", + "USAMPLER1D", "USAMPLER1DARRAY", "F16SAMPLER1D", "F16SAMPLER2D", + "F16SAMPLER3D", "F16SAMPLER2DRECT", "F16SAMPLERCUBE", + "F16SAMPLER1DARRAY", "F16SAMPLER2DARRAY", "F16SAMPLERCUBEARRAY", + "F16SAMPLERBUFFER", "F16SAMPLER2DMS", "F16SAMPLER2DMSARRAY", + "F16SAMPLER1DSHADOW", "F16SAMPLER2DSHADOW", "F16SAMPLER1DARRAYSHADOW", + "F16SAMPLER2DARRAYSHADOW", "F16SAMPLER2DRECTSHADOW", + "F16SAMPLERCUBESHADOW", "F16SAMPLERCUBEARRAYSHADOW", "IMAGE1D", + "IIMAGE1D", "UIMAGE1D", "IMAGE2D", "IIMAGE2D", "UIMAGE2D", "IMAGE3D", + "IIMAGE3D", "UIMAGE3D", "IMAGE2DRECT", "IIMAGE2DRECT", "UIMAGE2DRECT", + "IMAGECUBE", "IIMAGECUBE", "UIMAGECUBE", "IMAGEBUFFER", "IIMAGEBUFFER", + "UIMAGEBUFFER", "IMAGE1DARRAY", "IIMAGE1DARRAY", "UIMAGE1DARRAY", + "IMAGE2DARRAY", "IIMAGE2DARRAY", "UIMAGE2DARRAY", "IMAGECUBEARRAY", + "IIMAGECUBEARRAY", "UIMAGECUBEARRAY", "IMAGE2DMS", "IIMAGE2DMS", + "UIMAGE2DMS", "IMAGE2DMSARRAY", "IIMAGE2DMSARRAY", "UIMAGE2DMSARRAY", + "F16IMAGE1D", "F16IMAGE2D", "F16IMAGE3D", "F16IMAGE2DRECT", + "F16IMAGECUBE", "F16IMAGE1DARRAY", "F16IMAGE2DARRAY", + "F16IMAGECUBEARRAY", "F16IMAGEBUFFER", "F16IMAGE2DMS", + "F16IMAGE2DMSARRAY", "TEXTURECUBEARRAY", "ITEXTURECUBEARRAY", + "UTEXTURECUBEARRAY", "TEXTURE1D", "ITEXTURE1D", "UTEXTURE1D", + "TEXTURE1DARRAY", "ITEXTURE1DARRAY", "UTEXTURE1DARRAY", "TEXTURE2DRECT", + "ITEXTURE2DRECT", "UTEXTURE2DRECT", "TEXTUREBUFFER", "ITEXTUREBUFFER", + "UTEXTUREBUFFER", "TEXTURE2DMS", "ITEXTURE2DMS", "UTEXTURE2DMS", + "TEXTURE2DMSARRAY", "ITEXTURE2DMSARRAY", "UTEXTURE2DMSARRAY", + "F16TEXTURE1D", "F16TEXTURE2D", "F16TEXTURE3D", "F16TEXTURE2DRECT", + "F16TEXTURECUBE", "F16TEXTURE1DARRAY", "F16TEXTURE2DARRAY", + "F16TEXTURECUBEARRAY", "F16TEXTUREBUFFER", "F16TEXTURE2DMS", + "F16TEXTURE2DMSARRAY", "SUBPASSINPUT", "SUBPASSINPUTMS", "ISUBPASSINPUT", + "ISUBPASSINPUTMS", "USUBPASSINPUT", "USUBPASSINPUTMS", "F16SUBPASSINPUT", + "F16SUBPASSINPUTMS", "LEFT_OP", "RIGHT_OP", "INC_OP", "DEC_OP", "LE_OP", + "GE_OP", "EQ_OP", "NE_OP", "AND_OP", "OR_OP", "XOR_OP", "MUL_ASSIGN", + "DIV_ASSIGN", "ADD_ASSIGN", "MOD_ASSIGN", "LEFT_ASSIGN", "RIGHT_ASSIGN", + "AND_ASSIGN", "XOR_ASSIGN", "OR_ASSIGN", "SUB_ASSIGN", "STRING_LITERAL", + "LEFT_PAREN", "RIGHT_PAREN", "LEFT_BRACKET", "RIGHT_BRACKET", + "LEFT_BRACE", "RIGHT_BRACE", "DOT", "COMMA", "COLON", "EQUAL", + "SEMICOLON", "BANG", "DASH", "TILDE", "PLUS", "STAR", "SLASH", "PERCENT", + "LEFT_ANGLE", "RIGHT_ANGLE", "VERTICAL_BAR", "CARET", "AMPERSAND", + "QUESTION", "INVARIANT", "HIGH_PRECISION", "MEDIUM_PRECISION", + "LOW_PRECISION", "PRECISION", "PACKED", "RESOURCE", "SUPERP", + "FLOATCONSTANT", "INTCONSTANT", "UINTCONSTANT", "BOOLCONSTANT", + "IDENTIFIER", "TYPE_NAME", "CENTROID", "IN", "OUT", "INOUT", "STRUCT", + "VOID", "WHILE", "BREAK", "CONTINUE", "DO", "ELSE", "FOR", "IF", + "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT", "UNIFORM", "SHARED", + "BUFFER", "FLAT", "SMOOTH", "LAYOUT", "DOUBLECONSTANT", "INT16CONSTANT", + "UINT16CONSTANT", "FLOAT16CONSTANT", "INT32CONSTANT", "UINT32CONSTANT", + "INT64CONSTANT", "UINT64CONSTANT", "SUBROUTINE", "DEMOTE", "PAYLOADNV", + "PAYLOADINNV", "HITATTRNV", "CALLDATANV", "CALLDATAINNV", "PAYLOADEXT", + "PAYLOADINEXT", "HITATTREXT", "CALLDATAEXT", "CALLDATAINEXT", "PATCH", + "SAMPLE", "NONUNIFORM", "COHERENT", "VOLATILE", "RESTRICT", "READONLY", + "WRITEONLY", "DEVICECOHERENT", "QUEUEFAMILYCOHERENT", + "WORKGROUPCOHERENT", "SUBGROUPCOHERENT", "NONPRIVATE", + "SHADERCALLCOHERENT", "NOPERSPECTIVE", "EXPLICITINTERPAMD", + "PERVERTEXNV", "PERPRIMITIVENV", "PERVIEWNV", "PERTASKNV", "PRECISE", + "$accept", "variable_identifier", "primary_expression", + "postfix_expression", "integer_expression", "function_call", + "function_call_or_method", "function_call_generic", + "function_call_header_no_parameters", + "function_call_header_with_parameters", "function_call_header", + "function_identifier", "unary_expression", "unary_operator", + "multiplicative_expression", "additive_expression", "shift_expression", + "relational_expression", "equality_expression", "and_expression", + "exclusive_or_expression", "inclusive_or_expression", + "logical_and_expression", "logical_xor_expression", + "logical_or_expression", "conditional_expression", "$@1", + "assignment_expression", "assignment_operator", "expression", + "constant_expression", "declaration", "block_structure", "$@2", + "identifier_list", "function_prototype", "function_declarator", + "function_header_with_parameters", "function_header", + "parameter_declarator", "parameter_declaration", + "parameter_type_specifier", "init_declarator_list", "single_declaration", + "fully_specified_type", "invariant_qualifier", "interpolation_qualifier", + "layout_qualifier", "layout_qualifier_id_list", "layout_qualifier_id", + "precise_qualifier", "type_qualifier", "single_type_qualifier", + "storage_qualifier", "non_uniform_qualifier", "type_name_list", + "type_specifier", "array_specifier", "type_parameter_specifier_opt", + "type_parameter_specifier", "type_parameter_specifier_list", + "type_specifier_nonarray", "precision_qualifier", "struct_specifier", + "$@3", "$@4", "struct_declaration_list", "struct_declaration", + "struct_declarator_list", "struct_declarator", "initializer", + "initializer_list", "declaration_statement", "statement", + "simple_statement", "demote_statement", "compound_statement", "$@5", + "$@6", "statement_no_new_scope", "statement_scoped", "$@7", "$@8", + "compound_statement_no_new_scope", "statement_list", + "expression_statement", "selection_statement", + "selection_statement_nonattributed", "selection_rest_statement", + "condition", "switch_statement", "switch_statement_nonattributed", "$@9", + "switch_statement_list", "case_label", "iteration_statement", + "iteration_statement_nonattributed", "$@10", "$@11", "$@12", + "for_init_statement", "conditionopt", "for_rest_statement", + "jump_statement", "translation_unit", "external_declaration", + "function_definition", "$@13", "attribute", "attribute_list", + "single_attribute", YY_NULLPTR +}; +#endif + +# ifdef YYPRINT +/* YYTOKNUM[NUM] -- (External) token number corresponding to the + (internal) symbol number NUM (which must be that of a token). */ +static const yytype_uint16 yytoknum[] = +{ + 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, + 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, + 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, + 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, + 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, + 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, + 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, + 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, + 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, + 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, + 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, + 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, + 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, + 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, + 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, + 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, + 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, + 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, + 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, + 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, + 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, + 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, + 665, 666, 667, 668, 669, 670, 671, 672, 673, 674 +}; +# endif + +#define YYPACT_NINF -457 + +#define yypact_value_is_default(Yystate) \ + (!!((Yystate) == (-457))) + +#define YYTABLE_NINF -537 + +#define yytable_value_is_error(Yytable_value) \ + 0 + + /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing + STATE-NUM. */ +static const yytype_int16 yypact[] = +{ + 4075, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, 132, -457, + -457, -457, -457, -457, -1, -457, -457, -457, -457, -457, + -457, -301, -298, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, 11, -249, 17, 30, 6160, + 20, -457, 28, -457, -457, -457, -457, 4492, -457, -457, + -457, -457, 50, -457, -457, 739, -457, -457, 16, -457, + 81, -29, 69, -457, -313, -457, 111, -457, 6160, -457, + -457, -457, 6160, 103, 106, -457, -314, -457, 72, -457, + -457, 8566, 142, -457, -457, -457, 136, 6160, -457, 144, + -457, 53, -457, -457, 76, 6974, -457, -312, 1156, -457, + -457, -457, -457, 142, -309, -457, 7372, -308, -457, 119, + -457, 65, 8566, 8566, -457, 8566, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, -457, 36, -457, -457, -457, 171, + 85, 8964, 173, -457, 8566, -457, -457, -323, 174, -457, + 6160, 139, 4909, -457, 6160, 8566, -457, -29, -457, 141, + -457, -457, 145, 99, 35, 26, 71, 156, 159, 161, + 196, 195, 23, 181, 7770, -457, 183, 182, -457, -457, + 186, 179, 180, -457, 191, 192, 187, 8168, 193, 8566, + 188, 189, 127, -457, -457, 96, -457, -249, 200, 201, + -457, -457, -457, -457, -457, 1573, -457, -457, -457, -457, + -457, -457, -457, -457, -457, -24, 174, 7372, 13, 7372, + -457, -457, 7372, 6160, -457, 166, -457, -457, -457, 86, + -457, -457, 8566, 168, -457, -457, 8566, 205, -457, -457, + -457, 8566, -457, 139, 142, 124, -457, -457, -457, 5326, + -457, -457, -457, -457, 8566, 8566, 8566, 8566, 8566, 8566, + 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, 8566, + 8566, 8566, 8566, -457, -457, -457, 206, 172, -457, 1990, + -457, -457, -457, 1990, -457, 8566, -457, -457, 130, 8566, + 125, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, -457, -457, 8566, 8566, -457, -457, -457, -457, + -457, -457, -457, 7372, -457, 94, -457, 5743, -457, -457, + 207, 204, -457, -457, -457, 131, 174, 139, -457, -457, + -457, -457, -457, 145, 145, 99, 99, 35, 35, 35, + 35, 26, 26, 71, 156, 159, 161, 196, 195, 8566, + -457, 212, 60, -457, 1990, 3658, 169, 3241, 87, -457, + 89, -457, -457, -457, -457, -457, 6576, -457, -457, -457, + -457, 143, 8566, 211, 172, 210, 204, 184, 6160, 217, + 219, -457, -457, 3658, 218, -457, -457, -457, 8566, 220, + -457, -457, -457, 214, 2407, 8566, -457, 216, 223, 185, + 224, 2824, -457, 225, -457, -457, 7372, -457, -457, -457, + 97, 8566, 2407, 218, -457, -457, 1990, -457, 222, 204, + -457, -457, 1990, 229, -457, -457 +}; + + /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. + Performed when YYTABLE does not specify something else to do. Zero + means the default is an error. */ +static const yytype_uint16 yydefact[] = +{ + 0, 157, 210, 208, 209, 207, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 211, 212, 213, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 336, 337, 338, 339, 340, 341, 342, 362, 363, 364, + 365, 366, 367, 368, 377, 390, 391, 378, 379, 381, + 380, 382, 383, 384, 385, 386, 387, 388, 389, 165, + 166, 236, 237, 235, 238, 245, 246, 243, 244, 241, + 242, 239, 240, 268, 269, 270, 280, 281, 282, 265, + 266, 267, 277, 278, 279, 262, 263, 264, 274, 275, + 276, 259, 260, 261, 271, 272, 273, 247, 248, 249, + 283, 284, 285, 250, 251, 252, 295, 296, 297, 253, + 254, 255, 307, 308, 309, 256, 257, 258, 319, 320, + 321, 286, 287, 288, 289, 290, 291, 292, 293, 294, + 298, 299, 300, 301, 302, 303, 304, 305, 306, 310, + 311, 312, 313, 314, 315, 316, 317, 318, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 334, 331, 332, + 333, 493, 494, 495, 346, 347, 370, 373, 335, 344, + 345, 361, 343, 392, 393, 396, 397, 398, 400, 401, + 402, 404, 405, 406, 408, 409, 483, 484, 369, 371, + 372, 348, 349, 350, 394, 351, 355, 356, 359, 399, + 403, 407, 352, 353, 357, 358, 395, 354, 360, 439, + 441, 442, 443, 445, 446, 447, 449, 450, 451, 453, + 454, 455, 457, 458, 459, 461, 462, 463, 465, 466, + 467, 469, 470, 471, 473, 474, 475, 477, 478, 479, + 481, 482, 440, 444, 448, 452, 456, 464, 468, 472, + 460, 476, 480, 374, 375, 376, 410, 419, 421, 415, + 420, 422, 423, 425, 426, 427, 429, 430, 431, 433, + 434, 435, 437, 438, 411, 412, 413, 424, 414, 416, + 417, 418, 428, 432, 436, 485, 486, 489, 490, 491, + 492, 487, 488, 584, 132, 498, 499, 500, 0, 497, + 161, 159, 160, 158, 0, 206, 162, 163, 164, 134, + 133, 0, 190, 171, 173, 169, 175, 177, 172, 174, + 170, 176, 178, 167, 168, 192, 179, 186, 187, 188, + 189, 180, 181, 182, 183, 184, 185, 135, 136, 137, + 138, 139, 140, 147, 583, 0, 585, 0, 109, 108, + 0, 120, 125, 154, 153, 151, 155, 0, 148, 150, + 156, 130, 202, 152, 496, 0, 580, 582, 0, 503, + 0, 0, 0, 97, 0, 94, 0, 107, 0, 116, + 110, 118, 0, 119, 0, 95, 126, 100, 0, 149, + 131, 0, 195, 201, 1, 581, 0, 0, 501, 144, + 146, 0, 142, 193, 0, 0, 98, 0, 0, 586, + 111, 115, 117, 113, 121, 112, 0, 127, 103, 0, + 101, 0, 0, 0, 9, 0, 43, 42, 44, 41, + 5, 6, 7, 8, 2, 16, 14, 15, 17, 10, + 11, 12, 13, 3, 18, 37, 20, 25, 26, 0, + 0, 30, 0, 204, 0, 36, 34, 0, 196, 96, + 0, 0, 0, 505, 0, 0, 141, 0, 191, 0, + 197, 45, 49, 52, 55, 60, 63, 65, 67, 69, + 71, 73, 75, 0, 0, 99, 0, 531, 540, 544, + 0, 0, 0, 565, 0, 0, 0, 0, 0, 0, + 0, 0, 45, 78, 91, 0, 518, 0, 156, 130, + 521, 542, 520, 528, 519, 0, 522, 523, 546, 524, + 553, 525, 526, 561, 527, 0, 114, 0, 122, 0, + 513, 129, 0, 0, 105, 0, 102, 38, 39, 0, + 22, 23, 0, 0, 28, 27, 0, 206, 31, 33, + 40, 0, 203, 0, 511, 0, 509, 504, 506, 0, + 93, 145, 143, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 76, 198, 199, 0, 0, 530, 0, + 563, 576, 575, 0, 567, 0, 579, 577, 0, 0, + 0, 560, 529, 81, 82, 84, 83, 86, 87, 88, + 89, 90, 85, 80, 0, 0, 545, 541, 543, 547, + 554, 562, 124, 0, 516, 0, 128, 0, 106, 4, + 0, 24, 21, 32, 205, 0, 512, 0, 507, 502, + 46, 47, 48, 51, 50, 53, 54, 58, 59, 56, + 57, 61, 62, 64, 66, 68, 70, 72, 74, 0, + 200, 590, 0, 588, 532, 0, 0, 0, 0, 578, + 0, 559, 79, 92, 123, 514, 0, 104, 19, 508, + 510, 0, 0, 0, 0, 0, 551, 0, 0, 0, + 0, 570, 569, 572, 538, 555, 515, 517, 0, 0, + 587, 589, 533, 0, 0, 0, 571, 0, 0, 550, + 0, 0, 548, 0, 77, 591, 0, 535, 564, 534, + 0, 573, 0, 538, 537, 539, 557, 552, 0, 574, + 568, 549, 558, 0, 566, 556 +}; + + /* YYPGOTO[NTERM-NUM]. */ +static const yytype_int16 yypgoto[] = +{ + -457, -457, -457, -457, -457, -457, -457, -457, -457, -457, + -457, -457, 8868, -457, -87, -84, -127, -93, -33, -31, + -27, -25, -28, -26, -457, -86, -457, -103, -457, -111, + -125, 2, -457, -457, -457, 4, -457, -457, -457, 176, + 194, 178, -457, -457, -337, -457, -457, -457, -457, 95, + -457, -37, -46, -457, 9, -457, 0, -63, -457, -457, + -457, -457, 263, -457, -457, -457, -456, -140, 10, -73, + -211, -457, -102, -198, -321, -457, -144, -457, -457, -155, + -154, -457, -457, 198, -274, -97, -457, 46, -457, -118, + -457, 51, -457, -457, -457, -457, 52, -457, -457, -457, + -457, -457, -457, -457, -457, 213, -457, -457, -457, -457, + -105 +}; + + /* YYDEFGOTO[NTERM-NUM]. */ +static const yytype_int16 yydefgoto[] = +{ + -1, 443, 444, 445, 630, 446, 447, 448, 449, 450, + 451, 452, 502, 454, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 503, 659, 504, 614, 505, + 561, 506, 345, 533, 421, 507, 347, 348, 349, 379, + 380, 381, 350, 351, 352, 353, 354, 355, 401, 402, + 356, 357, 358, 359, 455, 404, 456, 407, 392, 393, + 457, 362, 363, 364, 464, 397, 462, 463, 555, 556, + 531, 625, 510, 511, 512, 513, 514, 589, 685, 718, + 709, 710, 711, 719, 515, 516, 517, 518, 712, 689, + 519, 520, 713, 733, 521, 522, 523, 665, 593, 667, + 693, 707, 708, 524, 365, 366, 367, 376, 525, 662, + 663 +}; + + /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If + positive, shift that token. If negative, reduce the rule whose + number is the opposite. If YYTABLE_NINF, syntax error. */ +static const yytype_int16 yytable[] = +{ + 361, 551, 344, 415, 346, 405, 405, 484, 559, 360, + 405, 484, 416, 552, 406, 485, 371, 527, 532, 372, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 627, 375, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 389, 382, 530, 539, 664, 622, 618, 624, 483, + 369, 626, 558, 417, 399, 571, 572, 582, 687, 458, + 569, 570, 484, 540, 541, 377, 389, 490, 373, 623, + 493, 382, 494, 495, 384, 400, 498, 385, 548, 383, + 526, 528, 370, -35, 378, 542, 687, 390, 360, 543, + 460, 573, 574, 583, 374, 361, 360, 344, 396, 346, + 299, 466, 575, 576, 360, 304, 305, 467, 383, 560, + 683, 386, 383, 717, 684, 391, 598, 360, 600, 535, + 725, 360, 536, 418, 468, 666, 419, 461, 586, 420, + 469, 717, 398, 545, 629, 694, 360, 695, 509, 546, + 615, 615, 674, 615, 389, 728, 675, 508, 676, 558, + 615, 615, 403, 616, 530, 460, 530, 460, 567, 530, + 568, 631, 408, 603, 604, 605, 606, 607, 608, 609, + 610, 611, 612, 633, 647, 648, 649, 650, 637, 615, + 671, 638, 732, 613, 615, 637, 413, 669, 679, 414, + 553, 405, 461, 459, 461, 697, 618, 615, 698, 360, + 465, 360, 534, 360, 295, 296, 297, 564, 565, 566, + 643, 644, 651, 652, 668, 645, 646, 558, 670, 544, + 549, 636, 554, 484, 563, 577, 460, 578, 579, 580, + 581, 584, 587, 590, 588, 727, 591, 592, 594, 595, + 599, 672, 673, 601, 596, 509, 602, -36, -34, 628, + 530, 632, 460, -29, 508, 661, 660, 678, 615, 682, + 690, 700, 702, 461, 618, 704, 705, 703, 715, -536, + 716, 722, 360, 721, 653, 487, 726, 654, 681, 734, + 723, 735, 655, 657, 686, 656, 658, 699, 411, 461, + 412, 368, 562, 635, 680, 691, 724, 730, 360, 731, + 692, 619, 410, 530, 409, 706, 620, 621, 395, 701, + 0, 0, 686, 0, 0, 0, 0, 0, 0, 509, + 460, 0, 0, 509, 720, 714, 560, 0, 508, 0, + 0, 0, 508, 0, 0, 0, 0, 0, 0, 0, + 729, 0, 0, 530, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 461, 688, 0, + 0, 0, 0, 0, 0, 0, 360, 0, 0, 0, + 0, 0, 389, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 688, 0, 0, 0, + 0, 0, 0, 0, 509, 509, 0, 509, 0, 0, + 0, 0, 0, 508, 508, 0, 508, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 390, 0, + 0, 0, 0, 509, 0, 0, 0, 360, 0, 0, + 0, 0, 508, 0, 509, 0, 0, 0, 0, 0, + 0, 509, 0, 508, 0, 0, 0, 0, 0, 0, + 508, 0, 509, 0, 0, 0, 509, 0, 0, 0, + 0, 508, 509, 0, 0, 508, 0, 0, 0, 394, + 0, 508, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 293, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 294, 295, 296, 297, 298, 0, 0, 0, 0, 0, + 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, + 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 486, 0, 487, 488, 0, + 0, 0, 0, 489, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, + 297, 298, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 300, 301, 302, 303, 304, 305, 490, 491, 492, + 493, 0, 494, 495, 496, 497, 498, 499, 500, 306, + 307, 308, 309, 310, 311, 435, 436, 437, 438, 439, + 440, 441, 442, 312, 501, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, + 422, 423, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 424, + 425, 0, 486, 0, 487, 617, 0, 0, 0, 0, + 489, 426, 427, 428, 429, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 294, 295, 296, 297, 298, 0, + 0, 0, 430, 431, 432, 433, 434, 299, 300, 301, + 302, 303, 304, 305, 490, 491, 492, 493, 0, 494, + 495, 496, 497, 498, 499, 500, 306, 307, 308, 309, + 310, 311, 435, 436, 437, 438, 439, 440, 441, 442, + 312, 501, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, + 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 424, 425, 0, 486, + 0, 487, 0, 0, 0, 0, 0, 489, 426, 427, + 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 294, 295, 296, 297, 298, 0, 0, 0, 430, + 431, 432, 433, 434, 299, 300, 301, 302, 303, 304, + 305, 490, 491, 492, 493, 0, 494, 495, 496, 497, + 498, 499, 500, 306, 307, 308, 309, 310, 311, 435, + 436, 437, 438, 439, 440, 441, 442, 312, 501, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 0, 0, 422, 423, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 424, 425, 0, 486, 0, 408, 0, + 0, 0, 0, 0, 489, 426, 427, 428, 429, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 294, 295, + 296, 297, 298, 0, 0, 0, 430, 431, 432, 433, + 434, 299, 300, 301, 302, 303, 304, 305, 490, 491, + 492, 493, 0, 494, 495, 496, 497, 498, 499, 500, + 306, 307, 308, 309, 310, 311, 435, 436, 437, 438, + 439, 440, 441, 442, 312, 501, 313, 314, 315, 316, + 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, + 337, 338, 339, 340, 341, 342, 343, 1, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 486, 0, 0, 0, 0, 0, 0, + 0, 489, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 294, 295, 296, 297, 298, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 300, + 301, 302, 303, 304, 305, 490, 491, 492, 493, 0, + 494, 495, 496, 497, 498, 499, 500, 306, 307, 308, + 309, 310, 311, 435, 436, 437, 438, 439, 440, 441, + 442, 312, 501, 313, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, + 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, + 340, 341, 342, 343, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 0, 0, 422, 423, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 424, 425, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 489, 426, + 427, 428, 429, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 294, 295, 296, 297, 298, 0, 0, 0, + 430, 431, 432, 433, 434, 299, 300, 301, 302, 303, + 304, 305, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 306, 307, 308, 309, 310, 311, + 435, 436, 437, 438, 439, 440, 441, 442, 312, 0, + 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, + 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, + 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, + 343, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 426, 427, 428, 429, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 294, + 295, 296, 297, 0, 0, 0, 0, 430, 431, 432, + 433, 434, 299, 300, 301, 302, 303, 304, 305, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 306, 307, 308, 309, 310, 311, 435, 436, 437, + 438, 439, 440, 441, 442, 312, 0, 313, 314, 315, + 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 293, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 294, 295, 296, 297, + 298, 0, 0, 0, 0, 0, 0, 0, 0, 299, + 300, 301, 302, 303, 304, 305, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 306, 307, + 308, 309, 310, 311, 0, 0, 0, 0, 0, 0, + 0, 0, 312, 0, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, + 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, + 339, 340, 341, 342, 343, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 387, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 294, 295, 296, 297, 0, 0, 0, + 0, 0, 0, 0, 0, 388, 299, 300, 301, 302, + 303, 304, 305, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 306, 307, 308, 309, 310, + 311, 0, 0, 0, 0, 0, 0, 0, 0, 312, + 0, 313, 314, 315, 316, 317, 318, 319, 320, 321, + 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, + 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 557, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 294, 295, 296, 297, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 299, 300, 301, 302, 303, 304, 305, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 306, 307, 308, 309, 310, 311, 0, 0, + 0, 0, 0, 0, 0, 0, 312, 0, 313, 314, + 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, + 335, 336, 337, 338, 339, 340, 341, 342, 343, 1, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 639, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 294, 295, 296, + 297, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 299, 300, 301, 302, 303, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 306, + 307, 308, 309, 310, 311, 0, 0, 0, 0, 0, + 0, 0, 0, 312, 0, 313, 314, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 342, 343, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 677, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 294, 295, 296, 297, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 299, 300, 301, + 302, 303, 304, 305, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 306, 307, 308, 309, + 310, 311, 0, 0, 0, 0, 0, 0, 0, 0, + 312, 0, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, + 341, 342, 343, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 294, 295, 296, 297, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 299, 300, 301, 302, 303, 304, + 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 306, 307, 308, 309, 310, 311, 0, + 0, 0, 0, 0, 0, 0, 0, 312, 0, 313, + 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, + 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 0, 0, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 0, 0, 529, 696, 0, + 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, + 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 0, 0, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 0, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 0, 470, 0, 0, 0, 0, 0, + 0, 0, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, + 0, 0, 0, 304, 305, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, + 442, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 325, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 0, 0, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 0, 0, 422, + 423, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 424, 425, + 0, 0, 0, 529, 0, 0, 0, 0, 0, 0, + 426, 427, 428, 429, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 430, 431, 432, 433, 434, 299, 0, 0, 0, + 0, 304, 305, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 435, 436, 437, 438, 439, 440, 441, 442, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 325, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 0, 0, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 0, 0, 422, 423, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 424, 425, 0, 0, + 585, 0, 0, 0, 0, 0, 0, 0, 426, 427, + 428, 429, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 430, + 431, 432, 433, 434, 299, 0, 0, 0, 0, 304, + 305, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 435, + 436, 437, 438, 439, 440, 441, 442, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 325, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 0, + 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 0, 0, 422, 423, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 424, 425, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 597, 426, 427, 428, 429, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 430, 431, 432, + 433, 434, 299, 0, 0, 0, 0, 304, 305, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 435, 436, 437, + 438, 439, 440, 441, 442, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 325, + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 0, 0, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 0, 0, 422, 423, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 424, 425, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 426, 427, 428, 429, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 430, 431, 432, 433, 434, + 299, 0, 0, 0, 0, 304, 305, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 435, 436, 437, 438, 439, + 440, 441, 442, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 325, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 0, 0, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 453, + 0, 422, 423, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, + 424, 425, 0, 0, 0, 0, 0, 0, 0, 0, + 537, 538, 426, 427, 428, 429, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 430, 431, 432, 433, 434, 299, 0, + 0, 0, 550, 304, 547, 0, 0, 0, 0, 0, + 0, 0, 0, 471, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 435, 436, 437, 438, 439, 440, 441, + 442, 0, 471, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 325, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 634, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 640, 641, 642, 471, 471, 471, 471, 471, + 471, 471, 471, 471, 471, 471, 471, 471, 471, 471, + 471, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 471 +}; + +static const yytype_int16 yycheck[] = +{ + 0, 324, 0, 317, 0, 319, 319, 319, 464, 0, + 319, 319, 326, 336, 327, 327, 317, 326, 326, 317, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 533, 327, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, 357, 349, 416, 425, 589, 527, 515, 529, 405, + 321, 532, 462, 386, 353, 299, 300, 304, 665, 392, + 295, 296, 319, 297, 298, 318, 382, 361, 327, 326, + 364, 378, 366, 367, 324, 374, 370, 327, 451, 349, + 413, 414, 353, 317, 324, 319, 693, 357, 349, 323, + 397, 335, 336, 340, 353, 365, 357, 365, 368, 365, + 354, 318, 301, 302, 365, 359, 360, 324, 378, 465, + 320, 353, 382, 704, 324, 335, 497, 378, 499, 324, + 711, 382, 327, 321, 318, 593, 324, 397, 484, 327, + 324, 722, 321, 318, 318, 318, 397, 318, 408, 324, + 324, 324, 623, 324, 460, 318, 322, 408, 324, 559, + 324, 324, 353, 327, 527, 462, 529, 464, 329, 532, + 331, 542, 321, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 546, 571, 572, 573, 574, 324, 324, + 325, 327, 726, 326, 324, 324, 353, 327, 327, 353, + 460, 319, 462, 327, 464, 676, 664, 324, 325, 460, + 326, 462, 353, 464, 342, 343, 344, 332, 333, 334, + 567, 568, 575, 576, 595, 569, 570, 627, 599, 318, + 317, 554, 353, 319, 353, 339, 533, 338, 337, 303, + 305, 320, 319, 317, 322, 716, 327, 327, 317, 317, + 317, 614, 615, 325, 327, 515, 327, 317, 317, 353, + 623, 353, 559, 318, 515, 353, 320, 320, 324, 317, + 361, 320, 322, 533, 732, 318, 317, 353, 318, 321, + 326, 318, 533, 327, 577, 321, 321, 578, 659, 327, + 365, 322, 579, 581, 665, 580, 582, 682, 382, 559, + 382, 298, 467, 553, 637, 667, 710, 722, 559, 723, + 667, 525, 378, 676, 376, 693, 525, 525, 365, 684, + -1, -1, 693, -1, -1, -1, -1, -1, -1, 589, + 627, -1, -1, 593, 705, 698, 682, -1, 589, -1, + -1, -1, 593, -1, -1, -1, -1, -1, -1, -1, + 721, -1, -1, 716, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 627, 665, -1, + -1, -1, -1, -1, -1, -1, 627, -1, -1, -1, + -1, -1, 688, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 693, -1, -1, -1, + -1, -1, -1, -1, 664, 665, -1, 667, -1, -1, + -1, -1, -1, 664, 665, -1, 667, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 688, -1, + -1, -1, -1, 693, -1, -1, -1, 688, -1, -1, + -1, -1, 693, -1, 704, -1, -1, -1, -1, -1, + -1, 711, -1, 704, -1, -1, -1, -1, -1, -1, + 711, -1, 722, -1, -1, -1, 726, -1, -1, -1, + -1, 722, 732, -1, -1, 726, -1, -1, -1, 0, + -1, 732, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 327, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 341, 342, 343, 344, 345, -1, -1, -1, -1, -1, + -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, + -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, 319, -1, 321, 322, -1, + -1, -1, -1, 327, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, + 344, 345, -1, -1, -1, 349, 350, 351, 352, 353, + 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, + 364, -1, 366, 367, 368, 369, 370, 371, 372, 373, + 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, + 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, + 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, + 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, + 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, + 297, 298, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 316, + 317, -1, 319, -1, 321, 322, -1, -1, -1, -1, + 327, 328, 329, 330, 331, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 341, 342, 343, 344, 345, -1, + -1, -1, 349, 350, 351, 352, 353, 354, 355, 356, + 357, 358, 359, 360, 361, 362, 363, 364, -1, 366, + 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, + 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, + 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, + 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, + 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 316, 317, -1, 319, + -1, 321, -1, -1, -1, -1, -1, 327, 328, 329, + 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 341, 342, 343, 344, 345, -1, -1, -1, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 363, 364, -1, 366, 367, 368, 369, + 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, + 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, + 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, + 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, + 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, + 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, + 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, + 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, + 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, + 293, 294, -1, -1, 297, 298, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 316, 317, -1, 319, -1, 321, -1, + -1, -1, -1, -1, 327, 328, 329, 330, 331, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 341, 342, + 343, 344, 345, -1, -1, -1, 349, 350, 351, 352, + 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, + 363, 364, -1, 366, 367, 368, 369, 370, 371, 372, + 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, + 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, + 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, + 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, + 413, 414, 415, 416, 417, 418, 419, 3, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 316, 317, -1, 319, -1, -1, -1, -1, -1, -1, + -1, 327, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 341, 342, 343, 344, 345, + -1, -1, -1, 349, 350, 351, 352, 353, 354, 355, + 356, 357, 358, 359, 360, 361, 362, 363, 364, -1, + 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, + 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, + 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, + 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, + 416, 417, 418, 419, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, + 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, + 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, + 289, 290, 291, 292, 293, 294, -1, -1, 297, 298, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 316, 317, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 327, 328, + 329, 330, 331, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 341, 342, 343, 344, 345, -1, -1, -1, + 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 373, 374, 375, 376, 377, 378, + 379, 380, 381, 382, 383, 384, 385, 386, 387, -1, + 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, + 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, + 419, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 328, 329, 330, 331, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 341, + 342, 343, 344, -1, -1, -1, -1, 349, 350, 351, + 352, 353, 354, 355, 356, 357, 358, 359, 360, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 373, 374, 375, 376, 377, 378, 379, 380, 381, + 382, 383, 384, 385, 386, 387, -1, 389, 390, 391, + 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, + 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, + 412, 413, 414, 415, 416, 417, 418, 419, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, + 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, + 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, + 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, + 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 327, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 341, 342, 343, 344, + 345, -1, -1, -1, -1, -1, -1, -1, -1, 354, + 355, 356, 357, 358, 359, 360, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 373, 374, + 375, 376, 377, 378, -1, -1, -1, -1, -1, -1, + -1, -1, 387, -1, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 293, 294, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 327, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 341, 342, 343, 344, -1, -1, -1, + -1, -1, -1, -1, -1, 353, 354, 355, 356, 357, + 358, 359, 360, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 373, 374, 375, 376, 377, + 378, -1, -1, -1, -1, -1, -1, -1, -1, 387, + -1, 389, 390, 391, 392, 393, 394, 395, 396, 397, + 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, + 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, + 418, 419, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, + 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 291, 292, 293, 294, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 322, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 341, 342, 343, 344, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 354, 355, 356, 357, 358, 359, 360, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 373, 374, 375, 376, 377, 378, -1, -1, + -1, -1, -1, -1, -1, -1, 387, -1, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 416, 417, 418, 419, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 322, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 341, 342, 343, + 344, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 354, 355, 356, 357, 358, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 373, + 374, 375, 376, 377, 378, -1, -1, -1, -1, -1, + -1, -1, -1, 387, -1, 389, 390, 391, 392, 393, + 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, + 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, + 414, 415, 416, 417, 418, 419, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, + 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, + 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, + 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, + 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, + 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 322, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 341, 342, 343, 344, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 354, 355, 356, + 357, 358, 359, 360, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 373, 374, 375, 376, + 377, 378, -1, -1, -1, -1, -1, -1, -1, -1, + 387, -1, 389, 390, 391, 392, 393, 394, 395, 396, + 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, + 417, 418, 419, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 341, 342, 343, 344, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 354, 355, 356, 357, 358, 359, + 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 373, 374, 375, 376, 377, 378, -1, + -1, -1, -1, -1, -1, -1, -1, 387, -1, 389, + 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, + 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, + 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, -1, -1, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, -1, -1, 321, 322, -1, + -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, + 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, + 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, -1, -1, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, -1, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 316, 317, -1, -1, 320, -1, -1, -1, -1, -1, + -1, -1, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, + -1, -1, -1, 359, 360, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, + 386, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 401, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, -1, -1, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, + 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, + 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, + 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, + 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, + 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, + 288, 289, 290, 291, 292, 293, 294, -1, -1, 297, + 298, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, 316, 317, + -1, -1, -1, 321, -1, -1, -1, -1, -1, -1, + 328, 329, 330, 331, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 349, 350, 351, 352, 353, 354, -1, -1, -1, + -1, 359, 360, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 379, 380, 381, 382, 383, 384, 385, 386, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 401, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, -1, -1, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, + 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, + 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 293, 294, -1, -1, 297, 298, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, 316, 317, -1, -1, + 320, -1, -1, -1, -1, -1, -1, -1, 328, 329, + 330, 331, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 349, + 350, 351, 352, 353, 354, -1, -1, -1, -1, 359, + 360, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 379, + 380, 381, 382, 383, 384, 385, 386, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 401, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, -1, + -1, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, -1, -1, 297, 298, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 316, 317, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 327, 328, 329, 330, 331, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 349, 350, 351, + 352, 353, 354, -1, -1, -1, -1, 359, 360, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 379, 380, 381, + 382, 383, 384, 385, 386, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 401, + 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, -1, -1, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, + 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, + 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, + 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, + 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, + 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, + 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, + 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, + 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, + 294, -1, -1, 297, 298, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 316, 317, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 328, 329, 330, 331, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 349, 350, 351, 352, 353, + 354, -1, -1, -1, -1, 359, 360, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 379, 380, 381, 382, 383, + 384, 385, 386, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, 401, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, -1, -1, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, + 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, + 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, + 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, + 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, + 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, + 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, + 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, + 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, 294, 391, + -1, 297, 298, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 405, -1, -1, -1, -1, -1, -1, + 316, 317, -1, -1, -1, -1, -1, -1, -1, -1, + 422, 423, 328, 329, 330, 331, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 349, 350, 351, 352, 353, 354, -1, + -1, -1, 454, 359, 360, -1, -1, -1, -1, -1, + -1, -1, -1, 465, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 379, 380, 381, 382, 383, 384, 385, + 386, -1, 484, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, 401, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, 551, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, 564, 565, 566, 567, 568, 569, 570, 571, + 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, + 582, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 682 +}; + + /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing + symbol of state STATE-NUM. */ +static const yytype_uint16 yystos[] = +{ + 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, + 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, + 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, + 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, + 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 327, 341, 342, 343, 344, 345, 354, + 355, 356, 357, 358, 359, 360, 373, 374, 375, 376, + 377, 378, 387, 389, 390, 391, 392, 393, 394, 395, + 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, + 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, + 416, 417, 418, 419, 451, 452, 455, 456, 457, 458, + 462, 463, 464, 465, 466, 467, 470, 471, 472, 473, + 474, 476, 481, 482, 483, 524, 525, 526, 482, 321, + 353, 317, 317, 327, 353, 327, 527, 318, 324, 459, + 460, 461, 471, 476, 324, 327, 353, 327, 353, 472, + 476, 335, 478, 479, 0, 525, 476, 485, 321, 353, + 374, 468, 469, 353, 475, 319, 327, 477, 321, 503, + 460, 459, 461, 353, 353, 317, 326, 477, 321, 324, + 327, 454, 297, 298, 316, 317, 328, 329, 330, 331, + 349, 350, 351, 352, 353, 379, 380, 381, 382, 383, + 384, 385, 386, 421, 422, 423, 425, 426, 427, 428, + 429, 430, 431, 432, 433, 474, 476, 480, 477, 327, + 471, 476, 486, 487, 484, 326, 318, 324, 318, 324, + 320, 432, 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 319, 327, 319, 321, 322, 327, + 361, 362, 363, 364, 366, 367, 368, 369, 370, 371, + 372, 388, 432, 445, 447, 449, 451, 455, 474, 476, + 492, 493, 494, 495, 496, 504, 505, 506, 507, 510, + 511, 514, 515, 516, 523, 528, 477, 326, 477, 321, + 447, 490, 326, 453, 353, 324, 327, 432, 432, 449, + 297, 298, 319, 323, 318, 318, 324, 360, 447, 317, + 432, 324, 336, 476, 353, 488, 489, 322, 487, 486, + 445, 450, 469, 353, 332, 333, 334, 329, 331, 295, + 296, 299, 300, 335, 336, 301, 302, 339, 338, 337, + 303, 305, 304, 340, 320, 320, 445, 319, 322, 497, + 317, 327, 327, 518, 317, 317, 327, 327, 449, 317, + 449, 325, 327, 306, 307, 308, 309, 310, 311, 312, + 313, 314, 315, 326, 448, 324, 327, 322, 493, 507, + 511, 516, 490, 326, 490, 491, 490, 486, 353, 318, + 424, 449, 353, 447, 432, 488, 477, 324, 327, 322, + 432, 432, 432, 434, 434, 435, 435, 436, 436, 436, + 436, 437, 437, 438, 439, 440, 441, 442, 443, 446, + 320, 353, 529, 530, 504, 517, 493, 519, 449, 327, + 449, 325, 447, 447, 490, 322, 324, 322, 320, 327, + 489, 449, 317, 320, 324, 498, 449, 464, 471, 509, + 361, 492, 505, 520, 318, 318, 322, 490, 325, 450, + 320, 530, 322, 353, 318, 317, 509, 521, 522, 500, + 501, 502, 508, 512, 447, 318, 326, 494, 499, 503, + 449, 327, 318, 365, 496, 494, 321, 490, 318, 449, + 499, 500, 504, 513, 327, 322 +}; + + /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ +static const yytype_uint16 yyr1[] = +{ + 0, 420, 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, 423, 423, + 423, 423, 423, 423, 424, 425, 426, 427, 427, 428, + 428, 429, 429, 430, 431, 431, 431, 432, 432, 432, + 432, 433, 433, 433, 433, 434, 434, 434, 434, 435, + 435, 435, 436, 436, 436, 437, 437, 437, 437, 437, + 438, 438, 438, 439, 439, 440, 440, 441, 441, 442, + 442, 443, 443, 444, 444, 445, 446, 445, 447, 447, + 448, 448, 448, 448, 448, 448, 448, 448, 448, 448, + 448, 449, 449, 450, 451, 451, 451, 451, 451, 451, + 451, 451, 451, 453, 452, 454, 454, 455, 456, 456, + 457, 457, 458, 459, 459, 460, 460, 460, 460, 461, + 462, 462, 462, 462, 462, 463, 463, 463, 463, 463, + 464, 464, 465, 466, 466, 466, 466, 466, 466, 466, + 466, 467, 468, 468, 469, 469, 469, 470, 471, 471, + 472, 472, 472, 472, 472, 472, 472, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 473, 473, 473, 473, 473, 473, 473, 473, + 473, 473, 474, 475, 475, 476, 476, 477, 477, 477, + 477, 478, 478, 479, 480, 480, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 481, 481, + 481, 481, 481, 481, 481, 481, 481, 481, 482, 482, + 482, 484, 483, 485, 483, 486, 486, 487, 487, 488, + 488, 489, 489, 490, 490, 490, 491, 491, 492, 493, + 493, 494, 494, 494, 494, 494, 494, 494, 494, 495, + 496, 497, 498, 496, 499, 499, 501, 500, 502, 500, + 503, 503, 504, 504, 505, 505, 506, 506, 507, 508, + 508, 509, 509, 510, 510, 512, 511, 513, 513, 514, + 514, 515, 515, 517, 516, 518, 516, 519, 516, 520, + 520, 521, 521, 522, 522, 523, 523, 523, 523, 523, + 524, 524, 525, 525, 525, 527, 526, 528, 529, 529, + 530, 530 +}; + + /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ +static const yytype_uint8 yyr2[] = +{ + 0, 2, 1, 1, 3, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, + 1, 3, 2, 2, 1, 1, 1, 2, 2, 2, + 1, 2, 3, 2, 1, 1, 1, 1, 2, 2, + 2, 1, 1, 1, 1, 1, 3, 3, 3, 1, + 3, 3, 1, 3, 3, 1, 3, 3, 3, 3, + 1, 3, 3, 1, 3, 1, 3, 1, 3, 1, + 3, 1, 3, 1, 3, 1, 0, 6, 1, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 3, 1, 2, 2, 4, 2, 3, 4, + 2, 3, 4, 0, 6, 2, 3, 2, 1, 1, + 2, 3, 3, 2, 3, 2, 1, 2, 1, 1, + 1, 3, 4, 6, 5, 1, 2, 3, 5, 4, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 4, 1, 3, 1, 3, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 4, 1, 1, 3, 2, 3, 2, 3, 3, + 4, 1, 0, 3, 1, 3, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 0, 6, 0, 5, 1, 2, 3, 4, 1, + 3, 1, 2, 1, 3, 4, 1, 3, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 0, 0, 5, 1, 1, 0, 2, 0, 2, + 2, 3, 1, 2, 1, 2, 1, 2, 5, 3, + 1, 1, 4, 1, 2, 0, 8, 0, 1, 3, + 2, 1, 2, 0, 6, 0, 8, 0, 7, 1, + 1, 1, 0, 2, 3, 2, 2, 2, 3, 2, + 1, 2, 1, 1, 1, 0, 3, 5, 1, 3, + 1, 4 +}; + + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab + + +#define YYRECOVERING() (!!yyerrstatus) + +#define YYBACKUP(Token, Value) \ +do \ + if (yychar == YYEMPTY) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + YYPOPSTACK (yylen); \ + yystate = *yyssp; \ + goto yybackup; \ + } \ + else \ + { \ + yyerror (pParseContext, YY_("syntax error: cannot back up")); \ + YYERROR; \ + } \ +while (0) + +/* Error token number */ +#define YYTERROR 1 +#define YYERRCODE 256 + + + +/* Enable debugging if requested. */ +#if YYDEBUG + +# ifndef YYFPRINTF +# include /* INFRINGES ON USER NAME SPACE */ +# define YYFPRINTF fprintf +# endif + +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ +} while (0) + +/* This macro is provided for backward compatibility. */ +#ifndef YY_LOCATION_PRINT +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +#endif + + +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value, pParseContext); \ + YYFPRINTF (stderr, "\n"); \ + } \ +} while (0) + + +/*----------------------------------------. +| Print this symbol's value on YYOUTPUT. | +`----------------------------------------*/ + +static void +yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +{ + FILE *yyo = yyoutput; + YYUSE (yyo); + YYUSE (pParseContext); + if (!yyvaluep) + return; +# ifdef YYPRINT + if (yytype < YYNTOKENS) + YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); +# endif + YYUSE (yytype); +} + + +/*--------------------------------. +| Print this symbol on YYOUTPUT. | +`--------------------------------*/ + +static void +yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, glslang::TParseContext* pParseContext) +{ + YYFPRINTF (yyoutput, "%s %s (", + yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); + + yy_symbol_value_print (yyoutput, yytype, yyvaluep, pParseContext); + YYFPRINTF (yyoutput, ")"); +} + +/*------------------------------------------------------------------. +| yy_stack_print -- Print the state stack from its BOTTOM up to its | +| TOP (included). | +`------------------------------------------------------------------*/ + +static void +yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) +{ + YYFPRINTF (stderr, "Stack now"); + for (; yybottom <= yytop; yybottom++) + { + int yybot = *yybottom; + YYFPRINTF (stderr, " %d", yybot); + } + YYFPRINTF (stderr, "\n"); +} + +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ +} while (0) + + +/*------------------------------------------------. +| Report that the YYRULE is going to be reduced. | +`------------------------------------------------*/ + +static void +yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule, glslang::TParseContext* pParseContext) +{ + unsigned long int yylno = yyrline[yyrule]; + int yynrhs = yyr2[yyrule]; + int yyi; + YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", + yyrule - 1, yylno); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) + { + YYFPRINTF (stderr, " $%d = ", yyi + 1); + yy_symbol_print (stderr, + yystos[yyssp[yyi + 1 - yynrhs]], + &(yyvsp[(yyi + 1) - (yynrhs)]) + , pParseContext); + YYFPRINTF (stderr, "\n"); + } +} + +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ + yy_reduce_print (yyssp, yyvsp, Rule, pParseContext); \ +} while (0) + +/* Nonzero means print parse trace. It is left uninitialized so that + multiple parsers can coexist. */ +int yydebug; +#else /* !YYDEBUG */ +# define YYDPRINTF(Args) +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) +# define YY_STACK_PRINT(Bottom, Top) +# define YY_REDUCE_PRINT(Rule) +#endif /* !YYDEBUG */ + + +/* YYINITDEPTH -- initial size of the parser's stacks. */ +#ifndef YYINITDEPTH +# define YYINITDEPTH 200 +#endif + +/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only + if the built-in stack extension method is used). + + Do not make this value too large; the results are undefined if + YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) + evaluated with infinite-precision integer arithmetic. */ + +#ifndef YYMAXDEPTH +# define YYMAXDEPTH 10000 +#endif + + +#if YYERROR_VERBOSE + +# ifndef yystrlen +# if defined __GLIBC__ && defined _STRING_H +# define yystrlen strlen +# else +/* Return the length of YYSTR. */ +static YYSIZE_T +yystrlen (const char *yystr) +{ + YYSIZE_T yylen; + for (yylen = 0; yystr[yylen]; yylen++) + continue; + return yylen; +} +# endif +# endif + +# ifndef yystpcpy +# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE +# define yystpcpy stpcpy +# else +/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in + YYDEST. */ +static char * +yystpcpy (char *yydest, const char *yysrc) +{ + char *yyd = yydest; + const char *yys = yysrc; + + while ((*yyd++ = *yys++) != '\0') + continue; + + return yyd - 1; +} +# endif +# endif + +# ifndef yytnamerr +/* Copy to YYRES the contents of YYSTR after stripping away unnecessary + quotes and backslashes, so that it's suitable for yyerror. The + heuristic is that double-quoting is unnecessary unless the string + contains an apostrophe, a comma, or backslash (other than + backslash-backslash). YYSTR is taken from yytname. If YYRES is + null, do not copy; instead, return the length of what the result + would have been. */ +static YYSIZE_T +yytnamerr (char *yyres, const char *yystr) +{ + if (*yystr == '"') + { + YYSIZE_T yyn = 0; + char const *yyp = yystr; + + for (;;) + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } + do_not_strip_quotes: ; + } + + if (! yyres) + return yystrlen (yystr); + + return yystpcpy (yyres, yystr) - yyres; +} +# endif + +/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message + about the unexpected token YYTOKEN for the state stack whose top is + YYSSP. + + Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is + not large enough to hold the message. In that case, also set + *YYMSG_ALLOC to the required number of bytes. Return 2 if the + required number of bytes is too large to store. */ +static int +yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, + yytype_int16 *yyssp, int yytoken) +{ + YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); + YYSIZE_T yysize = yysize0; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + /* Internationalized format string. */ + const char *yyformat = YY_NULLPTR; + /* Arguments of yyformat. */ + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + /* Number of reported tokens (one for the "unexpected", one per + "expected"). */ + int yycount = 0; + + /* There are many possibilities here to consider: + - If this state is a consistent state with a default action, then + the only way this function was invoked is if the default action + is an error action. In that case, don't check for expected + tokens because there are none. + - The only way there can be no lookahead present (in yychar) is if + this state is a consistent state with a default action. Thus, + detecting the absence of a lookahead is sufficient to determine + that there is no unexpected or expected token to report. In that + case, just report a simple "syntax error". + - Don't assume there isn't a lookahead just because this state is a + consistent state with a default action. There might have been a + previous inconsistent state, consistent state with a non-default + action, or user semantic action that manipulated yychar. + - Of course, the expected token list depends on states to have + correct lookahead information, and it depends on the parser not + to perform extra reductions after fetching a lookahead from the + scanner and before detecting a syntax error. Thus, state merging + (from LALR or IELR) and default reductions corrupt the expected + token list. However, the list is correct for canonical LR with + one exception: it will still contain any token that will not be + accepted due to an error action in a later state. + */ + if (yytoken != YYEMPTY) + { + int yyn = yypact[*yyssp]; + yyarg[yycount++] = yytname[yytoken]; + if (!yypact_value_is_default (yyn)) + { + /* Start YYX at -YYN if negative to avoid negative indexes in + YYCHECK. In other words, skip the first -YYN actions for + this state because they are default actions. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yyx; + + for (yyx = yyxbegin; yyx < yyxend; ++yyx) + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR + && !yytable_value_is_error (yytable[yyx + yyn])) + { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) + { + yycount = 1; + yysize = yysize0; + break; + } + yyarg[yycount++] = yytname[yyx]; + { + YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); + if (! (yysize <= yysize1 + && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) + return 2; + yysize = yysize1; + } + } + } + } + + switch (yycount) + { +# define YYCASE_(N, S) \ + case N: \ + yyformat = S; \ + break + YYCASE_(0, YY_("syntax error")); + YYCASE_(1, YY_("syntax error, unexpected %s")); + YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); + YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); + YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); + YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); +# undef YYCASE_ + } + + { + YYSIZE_T yysize1 = yysize + yystrlen (yyformat); + if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) + return 2; + yysize = yysize1; + } + + if (*yymsg_alloc < yysize) + { + *yymsg_alloc = 2 * yysize; + if (! (yysize <= *yymsg_alloc + && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) + *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; + return 1; + } + + /* Avoid sprintf, as that infringes on the user's name space. + Don't have undefined behavior even if the translation + produced a string with the wrong number of "%s"s. */ + { + char *yyp = *yymsg; + int yyi = 0; + while ((*yyp = *yyformat) != '\0') + if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) + { + yyp += yytnamerr (yyp, yyarg[yyi++]); + yyformat += 2; + } + else + { + yyp++; + yyformat++; + } + } + return 0; +} +#endif /* YYERROR_VERBOSE */ + +/*-----------------------------------------------. +| Release the memory associated to this symbol. | +`-----------------------------------------------*/ + +static void +yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, glslang::TParseContext* pParseContext) +{ + YYUSE (yyvaluep); + YYUSE (pParseContext); + if (!yymsg) + yymsg = "Deleting"; + YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + YYUSE (yytype); + YY_IGNORE_MAYBE_UNINITIALIZED_END +} + + + + +/*----------. +| yyparse. | +`----------*/ + +int +yyparse (glslang::TParseContext* pParseContext) +{ +/* The lookahead symbol. */ +int yychar; + + +/* The semantic value of the lookahead symbol. */ +/* Default value used for initialization, for pacifying older GCCs + or non-GCC compilers. */ +YY_INITIAL_VALUE (static YYSTYPE yyval_default;) +YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); + + /* Number of syntax errors so far. */ + int yynerrs; + + int yystate; + /* Number of tokens to shift before error messages enabled. */ + int yyerrstatus; + + /* The stacks and their tools: + 'yyss': related to states. + 'yyvs': related to semantic values. + + Refer to the stacks through separate pointers, to allow yyoverflow + to reallocate them elsewhere. */ + + /* The state stack. */ + yytype_int16 yyssa[YYINITDEPTH]; + yytype_int16 *yyss; + yytype_int16 *yyssp; + + /* The semantic value stack. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs; + YYSTYPE *yyvsp; + + YYSIZE_T yystacksize; + + int yyn; + int yyresult; + /* Lookahead token as an internal (translated) token number. */ + int yytoken = 0; + /* The variables used to return semantic value and location from the + action routines. */ + YYSTYPE yyval; + +#if YYERROR_VERBOSE + /* Buffer for error messages, and its allocated size. */ + char yymsgbuf[128]; + char *yymsg = yymsgbuf; + YYSIZE_T yymsg_alloc = sizeof yymsgbuf; +#endif + +#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) + + /* The number of symbols on the RHS of the reduced rule. + Keep to zero when no symbol should be popped. */ + int yylen = 0; + + yyssp = yyss = yyssa; + yyvsp = yyvs = yyvsa; + yystacksize = YYINITDEPTH; + + YYDPRINTF ((stderr, "Starting parse\n")); + + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ + goto yysetstate; + +/*------------------------------------------------------------. +| yynewstate -- Push a new state, which is found in yystate. | +`------------------------------------------------------------*/ + yynewstate: + /* In all cases, when you get here, the value and location stacks + have just been pushed. So pushing a state here evens the stacks. */ + yyssp++; + + yysetstate: + *yyssp = yystate; + + if (yyss + yystacksize - 1 <= yyssp) + { + /* Get the current used size of the three stacks, in elements. */ + YYSIZE_T yysize = yyssp - yyss + 1; + +#ifdef yyoverflow + { + /* Give user a chance to reallocate the stack. Use copies of + these so that the &'s don't force the real ones into + memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + + /* Each stack pointer address is followed by the size of the + data in use in that stack, in bytes. This used to be a + conditional around just the two extra args, but that might + be undefined if yyoverflow is a macro. */ + yyoverflow (YY_("memory exhausted"), + &yyss1, yysize * sizeof (*yyssp), + &yyvs1, yysize * sizeof (*yyvsp), + &yystacksize); + + yyss = yyss1; + yyvs = yyvs1; + } +#else /* no yyoverflow */ +# ifndef YYSTACK_RELOCATE + goto yyexhaustedlab; +# else + /* Extend the stack our own way. */ + if (YYMAXDEPTH <= yystacksize) + goto yyexhaustedlab; + yystacksize *= 2; + if (YYMAXDEPTH < yystacksize) + yystacksize = YYMAXDEPTH; + + { + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); + if (! yyptr) + goto yyexhaustedlab; + YYSTACK_RELOCATE (yyss_alloc, yyss); + YYSTACK_RELOCATE (yyvs_alloc, yyvs); +# undef YYSTACK_RELOCATE + if (yyss1 != yyssa) + YYSTACK_FREE (yyss1); + } +# endif +#endif /* no yyoverflow */ + + yyssp = yyss + yysize - 1; + yyvsp = yyvs + yysize - 1; + + YYDPRINTF ((stderr, "Stack size increased to %lu\n", + (unsigned long int) yystacksize)); + + if (yyss + yystacksize - 1 <= yyssp) + YYABORT; + } + + YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + + if (yystate == YYFINAL) + YYACCEPT; + + goto yybackup; + +/*-----------. +| yybackup. | +`-----------*/ +yybackup: + + /* Do appropriate processing given the current state. Read a + lookahead token if we need one and don't already have one. */ + + /* First try to decide what to do without reference to lookahead token. */ + yyn = yypact[yystate]; + if (yypact_value_is_default (yyn)) + goto yydefault; + + /* Not known => get a lookahead token if don't already have one. */ + + /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ + if (yychar == YYEMPTY) + { + YYDPRINTF ((stderr, "Reading a token: ")); + yychar = yylex (&yylval, parseContext); + } + + if (yychar <= YYEOF) + { + yychar = yytoken = YYEOF; + YYDPRINTF ((stderr, "Now at end of input.\n")); + } + else + { + yytoken = YYTRANSLATE (yychar); + YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); + } + + /* If the proper action on seeing token YYTOKEN is to reduce or to + detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) + goto yydefault; + yyn = yytable[yyn]; + if (yyn <= 0) + { + if (yytable_value_is_error (yyn)) + goto yyerrlab; + yyn = -yyn; + goto yyreduce; + } + + /* Count tokens shifted since error; after three, turn off error + status. */ + if (yyerrstatus) + yyerrstatus--; + + /* Shift the lookahead token. */ + YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); + + /* Discard the shifted token. */ + yychar = YYEMPTY; + + yystate = yyn; + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + goto yynewstate; + + +/*-----------------------------------------------------------. +| yydefault -- do the default action for the current state. | +`-----------------------------------------------------------*/ +yydefault: + yyn = yydefact[yystate]; + if (yyn == 0) + goto yyerrlab; + goto yyreduce; + + +/*-----------------------------. +| yyreduce -- Do a reduction. | +`-----------------------------*/ +yyreduce: + /* yyn is the number of a rule to reduce with. */ + yylen = yyr2[yyn]; + + /* If YYLEN is nonzero, implement the default value of the action: + '$$ = $1'. + + Otherwise, the following line sets YYVAL to garbage. + This behavior is undocumented and Bison + users should not rely upon it. Assigning to YYVAL + unconditionally makes the parser a bit smaller, and it avoids a + GCC warning that YYVAL may be used uninitialized. */ + yyval = yyvsp[1-yylen]; + + + YY_REDUCE_PRINT (yyn); + switch (yyn) + { + case 2: +#line 357 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleVariable((yyvsp[0].lex).loc, (yyvsp[0].lex).symbol, (yyvsp[0].lex).string); + } +#line 4229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 3: +#line 363 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4237 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 4: +#line 366 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); + if ((yyval.interm.intermTypedNode)->getAsConstantUnion()) + (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); + } +#line 4247 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 5: +#line 371 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat, (yyvsp[0].lex).loc, true); + } +#line 4255 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 6: +#line 374 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 7: +#line 377 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4272 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 8: +#line 381 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).b, (yyvsp[0].lex).loc, true); + } +#line 4280 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 9: +#line 385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).string, (yyvsp[0].lex).loc, true); + } +#line 4288 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 10: +#line 388 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4297 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 11: +#line 392 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4306 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 12: +#line 396 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).i64, (yyvsp[0].lex).loc, true); + } +#line 4315 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 13: +#line 400 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).u64, (yyvsp[0].lex).loc, true); + } +#line 4324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 14: +#line 404 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((short)(yyvsp[0].lex).i, (yyvsp[0].lex).loc, true); + } +#line 4333 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 15: +#line 408 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt16Check((yyvsp[0].lex).loc, "16-bit unsigned integer literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((unsigned short)(yyvsp[0].lex).u, (yyvsp[0].lex).loc, true); + } +#line 4342 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 16: +#line 412 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double literal"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtDouble, (yyvsp[0].lex).loc, true); + } +#line 4353 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 17: +#line 418 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float literal"); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion((yyvsp[0].lex).d, EbtFloat16, (yyvsp[0].lex).loc, true); + } +#line 4362 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 18: +#line 426 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4370 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 19: +#line 429 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBracketDereference((yyvsp[-2].lex).loc, (yyvsp[-3].interm.intermTypedNode), (yyvsp[-1].interm.intermTypedNode)); + } +#line 4378 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 20: +#line 432 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4386 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 21: +#line 435 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleDotDereference((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode), *(yyvsp[0].lex).string); + } +#line 4394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 22: +#line 438 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); + parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "++", (yyvsp[-1].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "++", EOpPostIncrement, (yyvsp[-1].interm.intermTypedNode)); + } +#line 4404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 23: +#line 443 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[-1].interm.intermTypedNode)); + parseContext.lValueErrorCheck((yyvsp[0].lex).loc, "--", (yyvsp[-1].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[0].lex).loc, "--", EOpPostDecrement, (yyvsp[-1].interm.intermTypedNode)); + } +#line 4414 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 24: +#line 451 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.integerCheck((yyvsp[0].interm.intermTypedNode), "[]"); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 4423 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 25: +#line 458 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleFunctionCall((yyvsp[0].interm).loc, (yyvsp[0].interm).function, (yyvsp[0].interm).intermNode); + delete (yyvsp[0].interm).function; + } +#line 4432 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 26: +#line 465 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 4440 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 27: +#line 471 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 4449 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 28: +#line 475 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 4458 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 29: +#line 482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + } +#line 4466 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 30: +#line 485 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 4474 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 31: +#line 491 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType }; + param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); + (yyvsp[-1].interm).function->addParameter(param); + (yyval.interm).function = (yyvsp[-1].interm).function; + (yyval.interm).intermNode = (yyvsp[0].interm.intermTypedNode); + } +#line 4486 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 32: +#line 498 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType }; + param.type->shallowCopy((yyvsp[0].interm.intermTypedNode)->getType()); + (yyvsp[-2].interm).function->addParameter(param); + (yyval.interm).function = (yyvsp[-2].interm).function; + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); + } +#line 4498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 33: +#line 508 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-1].interm); + } +#line 4506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 34: +#line 516 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // Constructor + (yyval.interm).intermNode = 0; + (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); + } +#line 4516 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 35: +#line 521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // + // Should be a method or subroutine call, but we haven't recognized the arguments yet. + // + (yyval.interm).function = 0; + (yyval.interm).intermNode = 0; + + TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode(); + if (method) { + (yyval.interm).function = new TFunction(&method->getMethodName(), TType(EbtInt), EOpArrayLength); + (yyval.interm).intermNode = method->getObject(); + } else { + TIntermSymbol* symbol = (yyvsp[0].interm.intermTypedNode)->getAsSymbolNode(); + if (symbol) { + parseContext.reservedErrorCheck(symbol->getLoc(), symbol->getName()); + TFunction *function = new TFunction(&symbol->getName(), TType(EbtVoid)); + (yyval.interm).function = function; + } else + parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "function call, method, or subroutine call expected", "", ""); + } + + if ((yyval.interm).function == 0) { + // error recover + TString* empty = NewPoolTString(""); + (yyval.interm).function = new TFunction(empty, TType(EbtVoid), EOpNull); + } + } +#line 4548 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 36: +#line 549 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // Constructor + (yyval.interm).intermNode = 0; + (yyval.interm).function = parseContext.handleConstructorCall((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type)); + } +#line 4558 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 37: +#line 558 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.variableCheck((yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + if (TIntermMethod* method = (yyvsp[0].interm.intermTypedNode)->getAsMethodNode()) + parseContext.error((yyvsp[0].interm.intermTypedNode)->getLoc(), "incomplete method syntax", method->getMethodName().c_str(), ""); + } +#line 4569 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 38: +#line 564 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "++", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "++", EOpPreIncrement, (yyvsp[0].interm.intermTypedNode)); + } +#line 4578 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 39: +#line 568 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.lValueErrorCheck((yyvsp[-1].lex).loc, "--", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].lex).loc, "--", EOpPreDecrement, (yyvsp[0].interm.intermTypedNode)); + } +#line 4587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 40: +#line 572 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm).op != EOpNull) { + char errorOp[2] = {0, 0}; + switch((yyvsp[-1].interm).op) { + case EOpNegative: errorOp[0] = '-'; break; + case EOpLogicalNot: errorOp[0] = '!'; break; + case EOpBitwiseNot: errorOp[0] = '~'; break; + default: break; // some compilers want this + } + (yyval.interm.intermTypedNode) = parseContext.handleUnaryMath((yyvsp[-1].interm).loc, errorOp, (yyvsp[-1].interm).op, (yyvsp[0].interm.intermTypedNode)); + } else { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + if ((yyval.interm.intermTypedNode)->getAsConstantUnion()) + (yyval.interm.intermTypedNode)->getAsConstantUnion()->setExpression(); + } + } +#line 4608 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 41: +#line 592 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNull; } +#line 4614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 42: +#line 593 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpNegative; } +#line 4620 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 43: +#line 594 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLogicalNot; } +#line 4626 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 44: +#line 595 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpBitwiseNot; + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise not"); } +#line 4633 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 45: +#line 601 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4639 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 46: +#line 602 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "*", EOpMul, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4649 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 47: +#line 607 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "/", EOpDiv, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4659 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 48: +#line 612 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "%"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "%", EOpMod, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 49: +#line 621 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 50: +#line 622 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "+", EOpAdd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 51: +#line 627 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "-", EOpSub, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 52: +#line 635 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 53: +#line 636 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift left"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<<", EOpLeftShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 54: +#line 642 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bit shift right"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">>", EOpRightShift, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4724 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 55: +#line 651 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4730 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 56: +#line 652 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<", EOpLessThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4740 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 57: +#line 657 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">", EOpGreaterThan, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4750 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 58: +#line 662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "<=", EOpLessThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 59: +#line 667 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, ">=", EOpGreaterThanEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4770 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 60: +#line 675 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4776 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 61: +#line 676 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); + parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "=="); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "==", EOpEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4790 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 62: +#line 685 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array comparison"); + parseContext.opaqueCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + parseContext.specializationCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + parseContext.referenceCheck((yyvsp[-1].lex).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "!="); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "!=", EOpNotEqual, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4804 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 63: +#line 697 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 64: +#line 698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise and"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&", EOpAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4821 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 65: +#line 707 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4827 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 66: +#line 708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise exclusive or"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^", EOpExclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4838 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 67: +#line 717 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4844 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 68: +#line 718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[-1].lex).loc, "bitwise inclusive or"); + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "|", EOpInclusiveOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 4855 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 69: +#line 727 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4861 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 70: +#line 728 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "&&", EOpLogicalAnd, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4871 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 71: +#line 736 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4877 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 72: +#line 737 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "^^", EOpLogicalXor, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4887 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 73: +#line 745 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4893 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 74: +#line 746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.handleBinaryMath((yyvsp[-1].lex).loc, "||", EOpLogicalOr, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) + (yyval.interm.intermTypedNode) = parseContext.intermediate.addConstantUnion(false, (yyvsp[-1].lex).loc); + } +#line 4903 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 75: +#line 754 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4909 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 76: +#line 755 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + ++parseContext.controlFlowNestingLevel; + } +#line 4917 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 77: +#line 758 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + --parseContext.controlFlowNestingLevel; + parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-5].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-4].lex).loc, "?", (yyvsp[-5].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].lex).loc, ":", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addSelection((yyvsp[-5].interm.intermTypedNode), (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-4].lex).loc); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.binaryOpError((yyvsp[-4].lex).loc, ":", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } + } +#line 4934 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 78: +#line 773 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); } +#line 4940 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 79: +#line 774 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayObjectCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "array assignment"); + parseContext.opaqueCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.storage16BitAssignmentCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.specializationCheck((yyvsp[-1].interm).loc, (yyvsp[-2].interm.intermTypedNode)->getType(), "="); + parseContext.lValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)); + parseContext.rValueErrorCheck((yyvsp[-1].interm).loc, "assign", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.addAssign((yyvsp[-1].interm).loc, (yyvsp[-1].interm).op, (yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.assignError((yyvsp[-1].interm).loc, "assign", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } + } +#line 4958 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 80: +#line 790 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpAssign; + } +#line 4967 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 81: +#line 794 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpMulAssign; + } +#line 4976 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 82: +#line 798 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpDivAssign; + } +#line 4985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 83: +#line 802 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "%="); + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpModAssign; + } +#line 4995 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 84: +#line 807 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpAddAssign; + } +#line 5004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 85: +#line 811 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).op = EOpSubAssign; + } +#line 5013 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 86: +#line 815 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift left assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpLeftShiftAssign; + } +#line 5022 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 87: +#line 819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bit-shift right assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpRightShiftAssign; + } +#line 5031 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 88: +#line 823 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-and assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpAndAssign; + } +#line 5040 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 89: +#line 827 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-xor assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpExclusiveOrAssign; + } +#line 5049 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 90: +#line 831 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "bitwise-or assign"); + (yyval.interm).loc = (yyvsp[0].lex).loc; (yyval.interm).op = EOpInclusiveOrAssign; + } +#line 5058 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 91: +#line 838 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 5066 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 92: +#line 841 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.samplerConstructorLocationCheck((yyvsp[-1].lex).loc, ",", (yyvsp[0].interm.intermTypedNode)); + (yyval.interm.intermTypedNode) = parseContext.intermediate.addComma((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode), (yyvsp[-1].lex).loc); + if ((yyval.interm.intermTypedNode) == 0) { + parseContext.binaryOpError((yyvsp[-1].lex).loc, ",", (yyvsp[-2].interm.intermTypedNode)->getCompleteString(), (yyvsp[0].interm.intermTypedNode)->getCompleteString()); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } + } +#line 5079 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 93: +#line 852 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.constantValueCheck((yyvsp[0].interm.intermTypedNode), ""); + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 5088 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 94: +#line 859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.handleFunctionDeclarator((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).function, true /* prototype */); + (yyval.interm.intermNode) = 0; + // TODO: 4.0 functionality: subroutines: make the identifier a user type for this signature + } +#line 5098 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 95: +#line 864 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm).intermNode && (yyvsp[-1].interm).intermNode->getAsAggregate()) + (yyvsp[-1].interm).intermNode->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-1].interm).intermNode; + } +#line 5108 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 96: +#line 869 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[-3].lex).loc, ENoProfile, 130, 0, "precision statement"); + // lazy setting of the previous scope's defaults, has effect only the first time it is called in a particular scope + parseContext.symbolTable.setPreviousDefaultPrecisions(&parseContext.defaultPrecision[0]); + parseContext.setDefaultPrecision((yyvsp[-3].lex).loc, (yyvsp[-1].interm.type), (yyvsp[-2].interm.type).qualifier.precision); + (yyval.interm.intermNode) = 0; + } +#line 5120 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 97: +#line 876 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-1].interm).loc, *(yyvsp[-1].interm).typeList); + (yyval.interm.intermNode) = 0; + } +#line 5129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 98: +#line 880 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-2].interm).loc, *(yyvsp[-2].interm).typeList, (yyvsp[-1].lex).string); + (yyval.interm.intermNode) = 0; + } +#line 5138 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 99: +#line 884 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.declareBlock((yyvsp[-3].interm).loc, *(yyvsp[-3].interm).typeList, (yyvsp[-2].lex).string, (yyvsp[-1].interm).arraySizes); + (yyval.interm.intermNode) = 0; + } +#line 5147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 100: +#line 888 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); + parseContext.updateStandaloneQualifierDefaults((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type)); + (yyval.interm.intermNode) = 0; + } +#line 5157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 101: +#line 893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.checkNoShaderLayouts((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).shaderQualifiers); + parseContext.addQualifierToExisting((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, *(yyvsp[-1].lex).string); + (yyval.interm.intermNode) = 0; + } +#line 5167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 102: +#line 898 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.checkNoShaderLayouts((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).shaderQualifiers); + (yyvsp[-1].interm.identifierList)->push_back((yyvsp[-2].lex).string); + parseContext.addQualifierToExisting((yyvsp[-3].interm.type).loc, (yyvsp[-3].interm.type).qualifier, *(yyvsp[-1].interm.identifierList)); + (yyval.interm.intermNode) = 0; + } +#line 5178 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 103: +#line 907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { parseContext.nestedBlockCheck((yyvsp[-2].interm.type).loc); } +#line 5184 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 104: +#line 907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + --parseContext.structNestingLevel; + parseContext.blockName = (yyvsp[-4].lex).string; + parseContext.globalQualifierFixCheck((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).qualifier); + parseContext.checkNoShaderLayouts((yyvsp[-5].interm.type).loc, (yyvsp[-5].interm.type).shaderQualifiers); + parseContext.currentBlockQualifier = (yyvsp[-5].interm.type).qualifier; + (yyval.interm).loc = (yyvsp[-5].interm.type).loc; + (yyval.interm).typeList = (yyvsp[-1].interm.typeList); + } +#line 5198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 105: +#line 918 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.identifierList) = new TIdentifierList; + (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); + } +#line 5207 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 106: +#line 922 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.identifierList) = (yyvsp[-2].interm.identifierList); + (yyval.interm.identifierList)->push_back((yyvsp[0].lex).string); + } +#line 5216 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 107: +#line 929 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).function = (yyvsp[-1].interm.function); + (yyval.interm).loc = (yyvsp[0].lex).loc; + } +#line 5225 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 108: +#line 936 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.function) = (yyvsp[0].interm.function); + } +#line 5233 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 109: +#line 939 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.function) = (yyvsp[0].interm.function); + } +#line 5241 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 110: +#line 946 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // Add the parameter + (yyval.interm.function) = (yyvsp[-1].interm.function); + if ((yyvsp[0].interm).param.type->getBasicType() != EbtVoid) + (yyvsp[-1].interm.function)->addParameter((yyvsp[0].interm).param); + else + delete (yyvsp[0].interm).param.type; + } +#line 5254 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 111: +#line 954 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // + // Only first parameter of one-parameter functions can be void + // The check for named parameters not being void is done in parameter_declarator + // + if ((yyvsp[0].interm).param.type->getBasicType() == EbtVoid) { + // + // This parameter > first is void + // + parseContext.error((yyvsp[-1].lex).loc, "cannot be an argument type except for '(void)'", "void", ""); + delete (yyvsp[0].interm).param.type; + } else { + // Add the parameter + (yyval.interm.function) = (yyvsp[-2].interm.function); + (yyvsp[-2].interm.function)->addParameter((yyvsp[0].interm).param); + } + } +#line 5276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 112: +#line 974 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).qualifier.storage != EvqGlobal && (yyvsp[-2].interm.type).qualifier.storage != EvqTemporary) { + parseContext.error((yyvsp[-1].lex).loc, "no qualifiers allowed for function return", + GetStorageQualifierString((yyvsp[-2].interm.type).qualifier.storage), ""); + } + if ((yyvsp[-2].interm.type).arraySizes) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + + // Add the function as a prototype after parsing it (we do not support recursion) + TFunction *function; + TType type((yyvsp[-2].interm.type)); + + // Potentially rename shader entry point function. No-op most of the time. + parseContext.renameShaderFunction((yyvsp[-1].lex).string); + + // Make the function + function = new TFunction((yyvsp[-1].lex).string, type); + (yyval.interm.function) = function; + } +#line 5300 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 113: +#line 997 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-1].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-1].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck((yyvsp[-1].interm.type).loc, *(yyvsp[-1].interm.type).arraySizes); + } + if ((yyvsp[-1].interm.type).basicType == EbtVoid) { + parseContext.error((yyvsp[0].lex).loc, "illegal use of type 'void'", (yyvsp[0].lex).string->c_str(), ""); + } + parseContext.reservedErrorCheck((yyvsp[0].lex).loc, *(yyvsp[0].lex).string); + + TParameter param = {(yyvsp[0].lex).string, new TType((yyvsp[-1].interm.type))}; + (yyval.interm).loc = (yyvsp[0].lex).loc; + (yyval.interm).param = param; + } +#line 5320 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 114: +#line 1012 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + TType* type = new TType((yyvsp[-2].interm.type)); + type->transferArraySizes((yyvsp[0].interm).arraySizes); + type->copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + + parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, type->getArraySizes()); + parseContext.arraySizeRequiredCheck((yyvsp[0].interm).loc, *(yyvsp[0].interm).arraySizes); + parseContext.reservedErrorCheck((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string); + + TParameter param = { (yyvsp[-1].lex).string, type }; + + (yyval.interm).loc = (yyvsp[-1].lex).loc; + (yyval.interm).param = param; + } +#line 5344 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 115: +#line 1037 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) + (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + + parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type); + parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); + + } +#line 5360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 116: +#line 1048 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); + parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + } +#line 5372 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 117: +#line 1058 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + if ((yyvsp[-1].interm.type).qualifier.precision != EpqNone) + (yyval.interm).param.type->getQualifier().precision = (yyvsp[-1].interm.type).qualifier.precision; + parseContext.precisionQualifierCheck((yyvsp[-1].interm.type).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + + parseContext.checkNoShaderLayouts((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, (yyvsp[-1].interm.type).qualifier.storage, *(yyval.interm).param.type); + parseContext.paramCheckFix((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, *(yyval.interm).param.type); + } +#line 5387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 118: +#line 1068 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + + parseContext.parameterTypeCheck((yyvsp[0].interm).loc, EvqIn, *(yyvsp[0].interm).param.type); + parseContext.paramCheckFixStorage((yyvsp[0].interm).loc, EvqTemporary, *(yyval.interm).param.type); + parseContext.precisionQualifierCheck((yyval.interm).loc, (yyval.interm).param.type->getBasicType(), (yyval.interm).param.type->getQualifier()); + } +#line 5399 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 119: +#line 1078 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + TParameter param = { 0, new TType((yyvsp[0].interm.type)) }; + (yyval.interm).param = param; + if ((yyvsp[0].interm.type).arraySizes) + parseContext.arraySizeRequiredCheck((yyvsp[0].interm.type).loc, *(yyvsp[0].interm.type).arraySizes); + } +#line 5410 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 120: +#line 1087 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[0].interm); + } +#line 5418 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 121: +#line 1090 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-2].interm); + parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-2].interm).type); + } +#line 5427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 122: +#line 1094 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-3].interm); + parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-3].interm).type, (yyvsp[0].interm).arraySizes); + } +#line 5436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 123: +#line 1098 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-5].interm).type; + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-5].interm).type, (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-5].interm).intermNode, initNode, (yyvsp[-1].lex).loc); + } +#line 5446 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 124: +#line 1103 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-4].interm).type; + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-4].interm).type, 0, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate((yyvsp[-4].interm).intermNode, initNode, (yyvsp[-1].lex).loc); + } +#line 5456 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 125: +#line 1111 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[0].interm.type); + (yyval.interm).intermNode = 0; + + parseContext.declareTypeDefaults((yyval.interm).loc, (yyval.interm).type); + + } +#line 5468 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 126: +#line 1118 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-1].interm.type); + (yyval.interm).intermNode = 0; + parseContext.declareVariable((yyvsp[0].lex).loc, *(yyvsp[0].lex).string, (yyvsp[-1].interm.type)); + } +#line 5478 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 127: +#line 1123 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-2].interm.type); + (yyval.interm).intermNode = 0; + parseContext.declareVariable((yyvsp[-1].lex).loc, *(yyvsp[-1].lex).string, (yyvsp[-2].interm.type), (yyvsp[0].interm).arraySizes); + } +#line 5488 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 128: +#line 1128 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-4].interm.type); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-3].lex).loc, *(yyvsp[-3].lex).string, (yyvsp[-4].interm.type), (yyvsp[-2].interm).arraySizes, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); + } +#line 5498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 129: +#line 1133 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).type = (yyvsp[-3].interm.type); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode)); + (yyval.interm).intermNode = parseContext.intermediate.growAggregate(0, initNode, (yyvsp[-1].lex).loc); + } +#line 5508 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 130: +#line 1142 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + + parseContext.globalQualifierTypeCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyval.interm.type)); + if ((yyvsp[0].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + } + parseContext.precisionQualifierCheck((yyval.interm.type).loc, (yyval.interm.type).basicType, (yyval.interm.type).qualifier); + } +#line 5523 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 131: +#line 1152 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalQualifierFixCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier); + parseContext.globalQualifierTypeCheck((yyvsp[-1].interm.type).loc, (yyvsp[-1].interm.type).qualifier, (yyvsp[0].interm.type)); + + if ((yyvsp[0].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[0].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[0].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + } + + if ((yyvsp[0].interm.type).arraySizes && parseContext.arrayQualifierError((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).qualifier)) + (yyvsp[0].interm.type).arraySizes = nullptr; + + parseContext.checkNoShaderLayouts((yyvsp[0].interm.type).loc, (yyvsp[-1].interm.type).shaderQualifiers); + (yyvsp[0].interm.type).shaderQualifiers.merge((yyvsp[-1].interm.type).shaderQualifiers); + parseContext.mergeQualifiers((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier, (yyvsp[-1].interm.type).qualifier, true); + parseContext.precisionQualifierCheck((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).basicType, (yyvsp[0].interm.type).qualifier); + + (yyval.interm.type) = (yyvsp[0].interm.type); + + if (! (yyval.interm.type).qualifier.isInterpolation() && + ((parseContext.language == EShLangVertex && (yyval.interm.type).qualifier.storage == EvqVaryingOut) || + (parseContext.language == EShLangFragment && (yyval.interm.type).qualifier.storage == EvqVaryingIn))) + (yyval.interm.type).qualifier.smooth = true; + } +#line 5552 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 132: +#line 1179 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "invariant"); + parseContext.profileRequires((yyval.interm.type).loc, ENoProfile, 120, 0, "invariant"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.invariant = true; + } +#line 5563 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 133: +#line 1188 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "smooth"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "smooth"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "smooth"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.smooth = true; + } +#line 5575 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 134: +#line 1195 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "flat"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "flat"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "flat"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.flat = true; + } +#line 5587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 135: +#line 1203 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "noperspective"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective"); + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "noperspective"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.nopersp = true; + } +#line 5599 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 136: +#line 1210 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "__explicitInterpAMD"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 450, E_GL_AMD_shader_explicit_vertex_parameter, "explicit interpolation"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.explicitInterp = true; + } +#line 5611 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 137: +#line 1217 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "pervertexNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECompatibilityProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 0, E_GL_NV_fragment_shader_barycentric, "fragment shader barycentric"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.pervertexNV = true; + } +#line 5624 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 138: +#line 1225 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "perprimitiveNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangFragmentMask | EShLangMeshNVMask), "perprimitiveNV"); + // Fragment shader stage doesn't check for extension. So we explicitly add below extension check. + if (parseContext.language == EShLangFragment) + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_NV_mesh_shader, "perprimitiveNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perPrimitiveNV = true; + } +#line 5639 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 139: +#line 1235 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "perviewNV"); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangMeshNV, "perviewNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perViewNV = true; + } +#line 5651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 140: +#line 1242 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // No need for profile version or extension check. Shader stage already checks both. + parseContext.globalCheck((yyvsp[0].lex).loc, "taskNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask), "taskNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.perTaskNV = true; + } +#line 5663 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 141: +#line 1253 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + } +#line 5671 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 142: +#line 1259 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5679 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 143: +#line 1262 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-2].interm.type); + (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); + parseContext.mergeObjectLayoutQualifiers((yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); + } +#line 5689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 144: +#line 1269 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), *(yyvsp[0].lex).string); + } +#line 5698 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 145: +#line 1273 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[-2].lex).loc); + parseContext.setLayoutQualifier((yyvsp[-2].lex).loc, (yyval.interm.type), *(yyvsp[-2].lex).string, (yyvsp[0].interm.intermTypedNode)); + } +#line 5707 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 146: +#line 1277 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { // because "shared" is both an identifier and a keyword + (yyval.interm.type).init((yyvsp[0].lex).loc); + TString strShared("shared"); + parseContext.setLayoutQualifier((yyvsp[0].lex).loc, (yyval.interm.type), strShared); + } +#line 5717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 147: +#line 1286 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyval.interm.type).loc, ECoreProfile | ECompatibilityProfile, 400, E_GL_ARB_gpu_shader5, "precise"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, "precise"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.noContraction = true; + } +#line 5728 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 148: +#line 1296 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5736 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 149: +#line 1299 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + if ((yyval.interm.type).basicType == EbtVoid) + (yyval.interm.type).basicType = (yyvsp[0].interm.type).basicType; + + (yyval.interm.type).shaderQualifiers.merge((yyvsp[0].interm.type).shaderQualifiers); + parseContext.mergeQualifiers((yyval.interm.type).loc, (yyval.interm.type).qualifier, (yyvsp[0].interm.type).qualifier, false); + } +#line 5749 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 150: +#line 1310 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5757 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 151: +#line 1313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5765 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 152: +#line 1316 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.checkPrecisionQualifier((yyvsp[0].interm.type).loc, (yyvsp[0].interm.type).qualifier.precision); + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5774 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 153: +#line 1320 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5783 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 154: +#line 1324 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5792 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 155: +#line 1329 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // allow inheritance of storage qualifier from block declaration + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5801 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 156: +#line 1333 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + } +#line 5809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 157: +#line 1340 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqConst; // will later turn into EvqConstReadOnly, if the initializer is not constant + } +#line 5818 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 158: +#line 1344 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "inout"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqInOut; + } +#line 5828 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 159: +#line 1349 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "in"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + // whether this is a parameter "in" or a pipeline "in" will get sorted out a bit later + (yyval.interm.type).qualifier.storage = EvqIn; + } +#line 5839 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 160: +#line 1355 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "out"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + // whether this is a parameter "out" or a pipeline "out" will get sorted out a bit later + (yyval.interm.type).qualifier.storage = EvqOut; + } +#line 5850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 161: +#line 1361 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 120, 0, "centroid"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 300, 0, "centroid"); + parseContext.globalCheck((yyvsp[0].lex).loc, "centroid"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.centroid = true; + } +#line 5862 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 162: +#line 1368 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "uniform"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqUniform; + } +#line 5872 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 163: +#line 1373 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "shared"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, 430, E_GL_ARB_compute_shader, "shared"); + parseContext.profileRequires((yyvsp[0].lex).loc, EEsProfile, 310, 0, "shared"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangComputeMask | EShLangMeshNVMask | EShLangTaskNVMask), "shared"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqShared; + } +#line 5885 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 164: +#line 1381 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "buffer"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqBuffer; + } +#line 5895 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 165: +#line 1387 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangVertex, "attribute"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "attribute"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "attribute"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "attribute"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "attribute"); + + parseContext.globalCheck((yyvsp[0].lex).loc, "attribute"); + + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqVaryingIn; + } +#line 5912 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 166: +#line 1399 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.checkDeprecated((yyvsp[0].lex).loc, ENoProfile, 130, "varying"); + parseContext.checkDeprecated((yyvsp[0].lex).loc, ECoreProfile, 130, "varying"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, ECoreProfile, 420, "varying"); + parseContext.requireNotRemoved((yyvsp[0].lex).loc, EEsProfile, 300, "varying"); + + parseContext.globalCheck((yyvsp[0].lex).loc, "varying"); + + (yyval.interm.type).init((yyvsp[0].lex).loc); + if (parseContext.language == EShLangVertex) + (yyval.interm.type).qualifier.storage = EvqVaryingOut; + else + (yyval.interm.type).qualifier.storage = EvqVaryingIn; + } +#line 5931 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 167: +#line 1413 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "patch"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangTessControlMask | EShLangTessEvaluationMask), "patch"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.patch = true; + } +#line 5942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 168: +#line 1419 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "sample"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.sample = true; + } +#line 5952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 169: +#line 1424 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "hitAttributeNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqHitAttr; + } +#line 5965 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 170: +#line 1432 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "hitAttributeEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangIntersectMask | EShLangClosestHitMask + | EShLangAnyHitMask), "hitAttributeEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "hitAttributeNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqHitAttr; + } +#line 5978 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 171: +#line 1440 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayload; + } +#line 5991 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 172: +#line 1448 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayload; + } +#line 6004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 173: +#line 1456 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "rayPayloadInNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayloadIn; + } +#line 6017 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 174: +#line 1464 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "rayPayloadInEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangClosestHitMask | + EShLangAnyHitMask | EShLangMissMask), "rayPayloadInEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "rayPayloadInEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqPayloadIn; + } +#line 6030 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 175: +#line 1472 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableData; + } +#line 6043 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 176: +#line 1480 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangRayGenMask | + EShLangClosestHitMask | EShLangMissMask | EShLangCallableMask), "callableDataEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableData; + } +#line 6056 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 177: +#line 1488 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInNV"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInNV"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_NV_ray_tracing, "callableDataInNV"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableDataIn; + } +#line 6068 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 178: +#line 1495 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.globalCheck((yyvsp[0].lex).loc, "callableDataInEXT"); + parseContext.requireStage((yyvsp[0].lex).loc, (EShLanguageMask)(EShLangCallableMask), "callableDataInEXT"); + parseContext.profileRequires((yyvsp[0].lex).loc, ECoreProfile, 460, E_GL_EXT_ray_tracing, "callableDataInEXT"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.storage = EvqCallableDataIn; + } +#line 6080 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 179: +#line 1502 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.coherent = true; + } +#line 6089 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 180: +#line 1506 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "devicecoherent"); + (yyval.interm.type).qualifier.devicecoherent = true; + } +#line 6099 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 181: +#line 1511 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "queuefamilycoherent"); + (yyval.interm.type).qualifier.queuefamilycoherent = true; + } +#line 6109 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 182: +#line 1516 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "workgroupcoherent"); + (yyval.interm.type).qualifier.workgroupcoherent = true; + } +#line 6119 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 183: +#line 1521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "subgroupcoherent"); + (yyval.interm.type).qualifier.subgroupcoherent = true; + } +#line 6129 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 184: +#line 1526 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_KHR_memory_scope_semantics, "nonprivate"); + (yyval.interm.type).qualifier.nonprivate = true; + } +#line 6139 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 185: +#line 1531 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + parseContext.requireExtensions((yyvsp[0].lex).loc, 1, &E_GL_EXT_ray_tracing, "shadercallcoherent"); + (yyval.interm.type).qualifier.shadercallcoherent = true; + } +#line 6149 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 186: +#line 1536 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.volatil = true; + } +#line 6158 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 187: +#line 1540 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.restrict = true; + } +#line 6167 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 188: +#line 1544 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.readonly = true; + } +#line 6176 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 189: +#line 1548 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.writeonly = true; + } +#line 6185 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 190: +#line 1552 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.spvRemoved((yyvsp[0].lex).loc, "subroutine"); + parseContext.globalCheck((yyvsp[0].lex).loc, "subroutine"); + parseContext.unimplemented((yyvsp[0].lex).loc, "subroutine"); + (yyval.interm.type).init((yyvsp[0].lex).loc); + } +#line 6196 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 191: +#line 1558 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.spvRemoved((yyvsp[-3].lex).loc, "subroutine"); + parseContext.globalCheck((yyvsp[-3].lex).loc, "subroutine"); + parseContext.unimplemented((yyvsp[-3].lex).loc, "subroutine"); + (yyval.interm.type).init((yyvsp[-3].lex).loc); + } +#line 6207 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 192: +#line 1569 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc); + (yyval.interm.type).qualifier.nonUniform = true; + } +#line 6216 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 193: +#line 1576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // TODO + } +#line 6224 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 194: +#line 1579 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // TODO: 4.0 semantics: subroutines + // 1) make sure each identifier is a type declared earlier with SUBROUTINE + // 2) save all of the identifiers for future comparison with the declared function + } +#line 6234 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 195: +#line 1588 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[-1].interm.type); + (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); + (yyval.interm.type).typeParameters = (yyvsp[0].interm.typeParameters); + } +#line 6244 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 196: +#line 1593 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayOfArrayVersionCheck((yyvsp[0].interm).loc, (yyvsp[0].interm).arraySizes); + (yyval.interm.type) = (yyvsp[-2].interm.type); + (yyval.interm.type).qualifier.precision = parseContext.getDefaultPrecision((yyval.interm.type)); + (yyval.interm.type).typeParameters = (yyvsp[-1].interm.typeParameters); + (yyval.interm.type).arraySizes = (yyvsp[0].interm).arraySizes; + } +#line 6256 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 197: +#line 1603 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[-1].lex).loc; + (yyval.interm).arraySizes = new TArraySizes; + (yyval.interm).arraySizes->addInnerSize(); + } +#line 6266 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 198: +#line 1608 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm).loc = (yyvsp[-2].lex).loc; + (yyval.interm).arraySizes = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); + (yyval.interm).arraySizes->addInnerSize(size); + } +#line 6279 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 199: +#line 1616 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-2].interm); + (yyval.interm).arraySizes->addInnerSize(); + } +#line 6288 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 200: +#line 1620 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm) = (yyvsp[-3].interm); + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[-1].interm.intermTypedNode)->getLoc(), (yyvsp[-1].interm.intermTypedNode), size, "array size"); + (yyval.interm).arraySizes->addInnerSize(size); + } +#line 6300 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 201: +#line 1630 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[0].interm.typeParameters); + } +#line 6308 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 202: +#line 1633 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = 0; + } +#line 6316 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 203: +#line 1639 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[-1].interm.typeParameters); + } +#line 6324 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 204: +#line 1645 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = new TArraySizes; + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); + (yyval.interm.typeParameters)->addInnerSize(size); + } +#line 6336 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 205: +#line 1652 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeParameters) = (yyvsp[-2].interm.typeParameters); + + TArraySize size; + parseContext.arraySizeCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode), size, "type parameter"); + (yyval.interm.typeParameters)->addInnerSize(size); + } +#line 6348 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 206: +#line 1662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtVoid; + } +#line 6357 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 207: +#line 1666 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + } +#line 6366 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 208: +#line 1670 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + } +#line 6375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 209: +#line 1674 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + } +#line 6385 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 210: +#line 1679 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + } +#line 6394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 211: +#line 1683 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(2); + } +#line 6404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 212: +#line 1688 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(3); + } +#line 6414 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 213: +#line 1693 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(4); + } +#line 6424 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 214: +#line 1698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(2); + } +#line 6434 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 215: +#line 1703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(3); + } +#line 6444 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 216: +#line 1708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtBool; + (yyval.interm.type).setVector(4); + } +#line 6454 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 217: +#line 1713 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(2); + } +#line 6464 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 218: +#line 1718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(3); + } +#line 6474 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 219: +#line 1723 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(4); + } +#line 6484 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 220: +#line 1728 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(2); + } +#line 6495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 221: +#line 1734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(3); + } +#line 6506 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 222: +#line 1740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fullIntegerCheck((yyvsp[0].lex).loc, "unsigned integer vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(4); + } +#line 6517 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 223: +#line 1746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 6527 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 224: +#line 1751 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 6537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 225: +#line 1756 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 6547 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 226: +#line 1761 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 6557 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 227: +#line 1766 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 3); + } +#line 6567 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 228: +#line 1771 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 4); + } +#line 6577 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 229: +#line 1776 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 2); + } +#line 6587 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 230: +#line 1781 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 6597 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 231: +#line 1786 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 4); + } +#line 6607 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 232: +#line 1791 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 2); + } +#line 6617 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 233: +#line 1796 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 3); + } +#line 6627 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 234: +#line 1801 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 6637 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 235: +#line 1807 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + } +#line 6649 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 236: +#line 1814 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "float16_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + } +#line 6659 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 237: +#line 1819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + } +#line 6669 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 238: +#line 1824 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + } +#line 6679 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 239: +#line 1829 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + } +#line 6689 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 240: +#line 1834 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + } +#line 6699 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 241: +#line 1839 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + } +#line 6709 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 242: +#line 1844 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + } +#line 6719 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 243: +#line 1849 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + } +#line 6729 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 244: +#line 1854 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + } +#line 6739 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 245: +#line 1859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + } +#line 6749 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 246: +#line 1864 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + } +#line 6759 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 247: +#line 1869 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(2); + } +#line 6772 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 248: +#line 1877 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(3); + } +#line 6785 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 249: +#line 1885 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double vector"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double vector"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(4); + } +#line 6798 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 250: +#line 1893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(2); + } +#line 6809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 251: +#line 1899 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(3); + } +#line 6820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 252: +#line 1905 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16ScalarVectorCheck((yyvsp[0].lex).loc, "half float vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setVector(4); + } +#line 6831 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 253: +#line 1911 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(2); + } +#line 6842 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 254: +#line 1917 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(3); + } +#line 6853 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 255: +#line 1923 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setVector(4); + } +#line 6864 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 256: +#line 1929 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(2); + } +#line 6875 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 257: +#line 1935 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(3); + } +#line 6886 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 258: +#line 1941 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setVector(4); + } +#line 6897 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 259: +#line 1947 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(2); + } +#line 6908 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 260: +#line 1953 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(3); + } +#line 6919 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 261: +#line 1959 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt8; + (yyval.interm.type).setVector(4); + } +#line 6930 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 262: +#line 1965 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(2); + } +#line 6941 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 263: +#line 1971 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(3); + } +#line 6952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 264: +#line 1977 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt16; + (yyval.interm.type).setVector(4); + } +#line 6963 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 265: +#line 1983 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(2); + } +#line 6974 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 266: +#line 1989 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(3); + } +#line 6985 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 267: +#line 1995 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).setVector(4); + } +#line 6996 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 268: +#line 2001 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(2); + } +#line 7007 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 269: +#line 2007 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(3); + } +#line 7018 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 270: +#line 2013 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt64; + (yyval.interm.type).setVector(4); + } +#line 7029 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 271: +#line 2019 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(2); + } +#line 7040 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 272: +#line 2025 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(3); + } +#line 7051 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 273: +#line 2031 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int8ScalarVectorCheck((yyvsp[0].lex).loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint8; + (yyval.interm.type).setVector(4); + } +#line 7062 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 274: +#line 2037 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(2); + } +#line 7073 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 275: +#line 2043 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(3); + } +#line 7084 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 276: +#line 2049 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int16ScalarVectorCheck((yyvsp[0].lex).loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint16; + (yyval.interm.type).setVector(4); + } +#line 7095 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 277: +#line 2055 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(2); + } +#line 7106 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 278: +#line 2061 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(3); + } +#line 7117 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 279: +#line 2067 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitInt32Check((yyvsp[0].lex).loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).setVector(4); + } +#line 7128 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 280: +#line 2073 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(2); + } +#line 7139 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 281: +#line 2079 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(3); + } +#line 7150 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 282: +#line 2085 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.int64Check((yyvsp[0].lex).loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint64; + (yyval.interm.type).setVector(4); + } +#line 7161 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 283: +#line 2091 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7174 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 284: +#line 2099 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7187 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 285: +#line 2107 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7200 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 286: +#line 2115 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7213 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 287: +#line 2123 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7226 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 288: +#line 2131 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 289: +#line 2139 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7252 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 290: +#line 2147 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7265 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 291: +#line 2155 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7278 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 292: +#line 2163 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7291 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 293: +#line 2171 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7304 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 294: +#line 2179 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ECoreProfile | ECompatibilityProfile, "double matrix"); + if (! parseContext.symbolTable.atBuiltInLevel()) + parseContext.doubleCheck((yyvsp[0].lex).loc, "double matrix"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7317 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 295: +#line 2187 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7328 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 296: +#line 2193 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7339 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 297: +#line 2199 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7350 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 298: +#line 2205 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7361 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 299: +#line 2211 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7372 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 300: +#line 2217 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 301: +#line 2223 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7394 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 302: +#line 2229 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7405 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 303: +#line 2235 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7416 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 304: +#line 2241 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7427 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 305: +#line 2247 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7438 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 306: +#line 2253 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16Check((yyvsp[0].lex).loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat16; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7449 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 307: +#line 2259 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7460 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 308: +#line 2265 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7471 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 309: +#line 2271 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7482 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 310: +#line 2277 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7493 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 311: +#line 2283 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7504 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 312: +#line 2289 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7515 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 313: +#line 2295 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7526 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 314: +#line 2301 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7537 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 315: +#line 2307 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7548 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 316: +#line 2313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7559 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 317: +#line 2319 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7570 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 318: +#line 2325 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat32Check((yyvsp[0].lex).loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7581 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 319: +#line 2331 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7592 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 320: +#line 2337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7603 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 321: +#line 2343 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7614 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 322: +#line 2349 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 2); + } +#line 7625 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 323: +#line 2355 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 3); + } +#line 7636 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 324: +#line 2361 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(2, 4); + } +#line 7647 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 325: +#line 2367 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 2); + } +#line 7658 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 326: +#line 2373 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 3); + } +#line 7669 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 327: +#line 2379 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(3, 4); + } +#line 7680 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 328: +#line 2385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 2); + } +#line 7691 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 329: +#line 2391 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 3); + } +#line 7702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 330: +#line 2397 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.explicitFloat64Check((yyvsp[0].lex).loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtDouble; + (yyval.interm.type).setMatrix(4, 4); + } +#line 7713 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 331: +#line 2403 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAccStruct; + } +#line 7722 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 332: +#line 2407 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAccStruct; + } +#line 7731 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 333: +#line 2411 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtRayQuery; + } +#line 7740 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 334: +#line 2415 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.vulkanRemoved((yyvsp[0].lex).loc, "atomic counter types"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtAtomicUint; + } +#line 7750 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 335: +#line 2420 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D); + } +#line 7760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 336: +#line 2426 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + } +#line 7770 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 337: +#line 2431 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd3D); + } +#line 7780 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 338: +#line 2436 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube); + } +#line 7790 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 339: +#line 2441 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, true); + } +#line 7800 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 340: +#line 2446 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, false, true); + } +#line 7810 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 341: +#line 2451 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true); + } +#line 7820 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 342: +#line 2456 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, true); + } +#line 7830 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 343: +#line 2462 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, false, true); + } +#line 7840 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 344: +#line 2467 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true); + } +#line 7850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 345: +#line 2472 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd1D, true, true); + } +#line 7860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 346: +#line 2477 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true); + } +#line 7870 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 347: +#line 2482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdCube, true, true); + } +#line 7880 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 348: +#line 2487 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D); + } +#line 7891 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 349: +#line 2493 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D); + } +#line 7902 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 350: +#line 2499 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd3D); + } +#line 7913 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 351: +#line 2505 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube); + } +#line 7924 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 352: +#line 2511 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, false, true); + } +#line 7935 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 353: +#line 2517 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, true); + } +#line 7946 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 354: +#line 2523 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, false, true); + } +#line 7957 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 355: +#line 2529 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true); + } +#line 7968 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 356: +#line 2535 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true); + } +#line 7979 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 357: +#line 2541 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd1D, true, true); + } +#line 7990 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 358: +#line 2547 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, true); + } +#line 8001 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 359: +#line 2553 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true); + } +#line 8012 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 360: +#line 2559 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdCube, true, true); + } +#line 8023 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 361: +#line 2565 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd1D); + } +#line 8033 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 362: +#line 2571 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D); + } +#line 8043 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 363: +#line 2576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd3D); + } +#line 8053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 364: +#line 2581 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdCube); + } +#line 8063 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 365: +#line 2586 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, true); + } +#line 8073 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 366: +#line 2591 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D); + } +#line 8083 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 367: +#line 2596 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd3D); + } +#line 8093 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 368: +#line 2601 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdCube); + } +#line 8103 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 369: +#line 2607 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd1D, true); + } +#line 8113 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 370: +#line 2612 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdCube, true); + } +#line 8123 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 371: +#line 2617 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd1D); + } +#line 8133 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 372: +#line 2622 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd1D, true); + } +#line 8143 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 373: +#line 2627 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdCube, true); + } +#line 8153 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 374: +#line 2632 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube, true); + } +#line 8163 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 375: +#line 2637 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube, true); + } +#line 8173 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 376: +#line 2642 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube, true); + } +#line 8183 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 377: +#line 2648 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, true); + } +#line 8193 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 378: +#line 2653 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D); + } +#line 8203 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 379: +#line 2658 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd3D); + } +#line 8213 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 380: +#line 2663 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true); + } +#line 8223 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 381: +#line 2668 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdCube); + } +#line 8233 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 382: +#line 2673 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D); + } +#line 8243 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 383: +#line 2678 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd3D); + } +#line 8253 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 384: +#line 2683 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdCube); + } +#line 8263 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 385: +#line 2688 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true); + } +#line 8273 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 386: +#line 2693 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D); + } +#line 8283 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 387: +#line 2698 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd3D); + } +#line 8293 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 388: +#line 2703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdCube); + } +#line 8303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 389: +#line 2708 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true); + } +#line 8313 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 390: +#line 2713 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setPureSampler(false); + } +#line 8323 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 391: +#line 2718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setPureSampler(true); + } +#line 8333 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 392: +#line 2724 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdRect); + } +#line 8343 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 393: +#line 2729 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdRect, false, true); + } +#line 8353 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 394: +#line 2734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdRect); + } +#line 8364 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 395: +#line 2740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdRect, false, true); + } +#line 8375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 396: +#line 2746 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdRect); + } +#line 8385 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 397: +#line 2751 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdRect); + } +#line 8395 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 398: +#line 2756 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, EsdBuffer); + } +#line 8405 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 399: +#line 2761 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, EsdBuffer); + } +#line 8416 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 400: +#line 2767 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, EsdBuffer); + } +#line 8426 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 401: +#line 2772 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, EsdBuffer); + } +#line 8436 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 402: +#line 2777 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, false, false, true); + } +#line 8446 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 403: +#line 2782 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, false, false, true); + } +#line 8457 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 404: +#line 2788 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, false, false, true); + } +#line 8467 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 405: +#line 2793 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, false, false, true); + } +#line 8477 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 406: +#line 2798 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D, true, false, true); + } +#line 8487 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 407: +#line 2803 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float sampler", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat16, Esd2D, true, false, true); + } +#line 8498 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 408: +#line 2809 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtInt, Esd2D, true, false, true); + } +#line 8508 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 409: +#line 2814 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtUint, Esd2D, true, false, true); + } +#line 8518 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 410: +#line 2819 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D); + } +#line 8528 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 411: +#line 2824 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D); + } +#line 8539 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 412: +#line 2830 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D); + } +#line 8550 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 413: +#line 2836 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd3D); + } +#line 8561 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 414: +#line 2842 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube); + } +#line 8572 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 415: +#line 2848 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd1D, true); + } +#line 8582 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 416: +#line 2853 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd1D, true); + } +#line 8593 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 417: +#line 2859 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true); + } +#line 8604 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 418: +#line 2865 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdCube, true); + } +#line 8615 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 419: +#line 2871 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D); + } +#line 8625 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 420: +#line 2876 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd1D, true); + } +#line 8635 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 421: +#line 2881 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D); + } +#line 8645 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 422: +#line 2886 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd1D, true); + } +#line 8655 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 423: +#line 2891 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdRect); + } +#line 8665 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 424: +#line 2896 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdRect); + } +#line 8676 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 425: +#line 2902 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdRect); + } +#line 8686 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 426: +#line 2907 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdRect); + } +#line 8696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 427: +#line 2912 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, EsdBuffer); + } +#line 8706 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 428: +#line 2917 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, EsdBuffer); + } +#line 8717 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 429: +#line 2923 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, EsdBuffer); + } +#line 8727 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 430: +#line 2928 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, EsdBuffer); + } +#line 8737 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 431: +#line 2933 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, false, false, true); + } +#line 8747 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 432: +#line 2938 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, false, false, true); + } +#line 8758 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 433: +#line 2944 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, false, false, true); + } +#line 8768 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 434: +#line 2949 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, false, false, true); + } +#line 8778 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 435: +#line 2954 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat, Esd2D, true, false, true); + } +#line 8788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 436: +#line 2959 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float texture", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtFloat16, Esd2D, true, false, true); + } +#line 8799 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 437: +#line 2965 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtInt, Esd2D, true, false, true); + } +#line 8809 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 438: +#line 2970 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setTexture(EbtUint, Esd2D, true, false, true); + } +#line 8819 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 439: +#line 2975 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D); + } +#line 8829 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 440: +#line 2980 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D); + } +#line 8840 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 441: +#line 2986 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd1D); + } +#line 8850 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 442: +#line 2991 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd1D); + } +#line 8860 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 443: +#line 2996 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D); + } +#line 8870 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 444: +#line 3001 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D); + } +#line 8881 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 445: +#line 3007 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D); + } +#line 8891 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 446: +#line 3012 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D); + } +#line 8901 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 447: +#line 3017 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd3D); + } +#line 8911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 448: +#line 3022 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd3D); + } +#line 8922 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 449: +#line 3028 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd3D); + } +#line 8932 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 450: +#line 3033 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd3D); + } +#line 8942 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 451: +#line 3038 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdRect); + } +#line 8952 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 452: +#line 3043 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdRect); + } +#line 8963 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 453: +#line 3049 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdRect); + } +#line 8973 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 454: +#line 3054 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdRect); + } +#line 8983 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 455: +#line 3059 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube); + } +#line 8993 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 456: +#line 3064 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube); + } +#line 9004 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 457: +#line 3070 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdCube); + } +#line 9014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 458: +#line 3075 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdCube); + } +#line 9024 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 459: +#line 3080 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdBuffer); + } +#line 9034 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 460: +#line 3085 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdBuffer); + } +#line 9045 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 461: +#line 3091 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdBuffer); + } +#line 9055 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 462: +#line 3096 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdBuffer); + } +#line 9065 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 463: +#line 3101 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd1D, true); + } +#line 9075 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 464: +#line 3106 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd1D, true); + } +#line 9086 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 465: +#line 3112 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd1D, true); + } +#line 9096 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 466: +#line 3117 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd1D, true); + } +#line 9106 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 467: +#line 3122 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true); + } +#line 9116 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 468: +#line 3127 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true); + } +#line 9127 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 469: +#line 3133 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true); + } +#line 9137 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 470: +#line 3138 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true); + } +#line 9147 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 471: +#line 3143 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, EsdCube, true); + } +#line 9157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 472: +#line 3148 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, EsdCube, true); + } +#line 9168 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 473: +#line 3154 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, EsdCube, true); + } +#line 9178 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 474: +#line 3159 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, EsdCube, true); + } +#line 9188 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 475: +#line 3164 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, false, false, true); + } +#line 9198 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 476: +#line 3169 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, false, false, true); + } +#line 9209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 477: +#line 3175 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, false, false, true); + } +#line 9219 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 478: +#line 3180 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, false, false, true); + } +#line 9229 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 479: +#line 3185 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat, Esd2D, true, false, true); + } +#line 9239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 480: +#line 3190 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float image", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtFloat16, Esd2D, true, false, true); + } +#line 9250 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 481: +#line 3196 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtInt, Esd2D, true, false, true); + } +#line 9260 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 482: +#line 3201 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setImage(EbtUint, Esd2D, true, false, true); + } +#line 9270 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 483: +#line 3206 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { // GL_OES_EGL_image_external + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + (yyval.interm.type).sampler.external = true; + } +#line 9281 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 484: +#line 3212 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { // GL_EXT_YUV_target + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.set(EbtFloat, Esd2D); + (yyval.interm.type).sampler.yuv = true; + } +#line 9292 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 485: +#line 3218 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat); + } +#line 9303 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 486: +#line 3224 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat, true); + } +#line 9314 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 487: +#line 3230 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat16); + } +#line 9326 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 488: +#line 3237 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.float16OpaqueCheck((yyvsp[0].lex).loc, "half float subpass input", parseContext.symbolTable.atBuiltInLevel()); + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtFloat16, true); + } +#line 9338 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 489: +#line 3244 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtInt); + } +#line 9349 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 490: +#line 3250 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtInt, true); + } +#line 9360 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 491: +#line 3256 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtUint); + } +#line 9371 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 492: +#line 3262 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[0].lex).loc, EShLangFragment, "subpass input"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtSampler; + (yyval.interm.type).sampler.setSubpass(EbtUint, true); + } +#line 9382 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 493: +#line 3268 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.fcoopmatCheck((yyvsp[0].lex).loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtFloat; + (yyval.interm.type).coopmat = true; + } +#line 9393 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 494: +#line 3274 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "icoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtInt; + (yyval.interm.type).coopmat = true; + } +#line 9404 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 495: +#line 3280 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.intcoopmatCheck((yyvsp[0].lex).loc, "ucoopmatNV", parseContext.symbolTable.atBuiltInLevel()); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtUint; + (yyval.interm.type).coopmat = true; + } +#line 9415 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 496: +#line 3287 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.type) = (yyvsp[0].interm.type); + (yyval.interm.type).qualifier.storage = parseContext.symbolTable.atGlobalLevel() ? EvqGlobal : EvqTemporary; + parseContext.structTypeCheck((yyval.interm.type).loc, (yyval.interm.type)); + } +#line 9425 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 497: +#line 3292 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // + // This is for user defined type names. The lexical phase looked up the + // type. + // + if (const TVariable* variable = ((yyvsp[0].lex).symbol)->getAsVariable()) { + const TType& structure = variable->getType(); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = &structure; + } else + parseContext.error((yyvsp[0].lex).loc, "expected type name", (yyvsp[0].lex).string->c_str(), ""); + } +#line 9443 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 498: +#line 3308 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "highp precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqHigh); + } +#line 9453 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 499: +#line 3313 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "mediump precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqMedium); + } +#line 9463 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 500: +#line 3318 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.profileRequires((yyvsp[0].lex).loc, ENoProfile, 130, 0, "lowp precision qualifier"); + (yyval.interm.type).init((yyvsp[0].lex).loc, parseContext.symbolTable.atGlobalLevel()); + parseContext.handlePrecisionQualifier((yyvsp[0].lex).loc, (yyval.interm.type).qualifier, EpqLow); + } +#line 9473 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 501: +#line 3326 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { parseContext.nestedStructCheck((yyvsp[-2].lex).loc); } +#line 9479 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 502: +#line 3326 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + TType* structure = new TType((yyvsp[-1].interm.typeList), *(yyvsp[-4].lex).string); + parseContext.structArrayCheck((yyvsp[-4].lex).loc, *structure); + TVariable* userTypeDef = new TVariable((yyvsp[-4].lex).string, *structure, true); + if (! parseContext.symbolTable.insert(*userTypeDef)) + parseContext.error((yyvsp[-4].lex).loc, "redefinition", (yyvsp[-4].lex).string->c_str(), "struct"); + (yyval.interm.type).init((yyvsp[-5].lex).loc); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = structure; + --parseContext.structNestingLevel; + } +#line 9495 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 503: +#line 3337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { parseContext.nestedStructCheck((yyvsp[-1].lex).loc); } +#line 9501 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 504: +#line 3337 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + TType* structure = new TType((yyvsp[-1].interm.typeList), TString("")); + (yyval.interm.type).init((yyvsp[-4].lex).loc); + (yyval.interm.type).basicType = EbtStruct; + (yyval.interm.type).userDef = structure; + --parseContext.structNestingLevel; + } +#line 9513 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 505: +#line 3347 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = (yyvsp[0].interm.typeList); + } +#line 9521 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 506: +#line 3350 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + for (unsigned int i = 0; i < (yyvsp[0].interm.typeList)->size(); ++i) { + for (unsigned int j = 0; j < (yyval.interm.typeList)->size(); ++j) { + if ((*(yyval.interm.typeList))[j].type->getFieldName() == (*(yyvsp[0].interm.typeList))[i].type->getFieldName()) + parseContext.error((*(yyvsp[0].interm.typeList))[i].loc, "duplicate member name:", "", (*(yyvsp[0].interm.typeList))[i].type->getFieldName().c_str()); + } + (yyval.interm.typeList)->push_back((*(yyvsp[0].interm.typeList))[i]); + } + } +#line 9536 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 507: +#line 3363 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + + parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType); + parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier); + + for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) { + TType type((yyvsp[-2].interm.type)); + type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName()); + type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes()); + type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes()); + (*(yyval.interm.typeList))[i].type->shallowCopy(type); + } + } +#line 9563 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 508: +#line 3385 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.type).arraySizes) { + parseContext.profileRequires((yyvsp[-2].interm.type).loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed type"); + parseContext.profileRequires((yyvsp[-2].interm.type).loc, EEsProfile, 300, 0, "arrayed type"); + if (parseContext.isEsProfile()) + parseContext.arraySizeRequiredCheck((yyvsp[-2].interm.type).loc, *(yyvsp[-2].interm.type).arraySizes); + } + + (yyval.interm.typeList) = (yyvsp[-1].interm.typeList); + + parseContext.memberQualifierCheck((yyvsp[-3].interm.type)); + parseContext.voidErrorCheck((yyvsp[-2].interm.type).loc, (*(yyvsp[-1].interm.typeList))[0].type->getFieldName(), (yyvsp[-2].interm.type).basicType); + parseContext.mergeQualifiers((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).qualifier, (yyvsp[-3].interm.type).qualifier, true); + parseContext.precisionQualifierCheck((yyvsp[-2].interm.type).loc, (yyvsp[-2].interm.type).basicType, (yyvsp[-2].interm.type).qualifier); + + for (unsigned int i = 0; i < (yyval.interm.typeList)->size(); ++i) { + TType type((yyvsp[-2].interm.type)); + type.setFieldName((*(yyval.interm.typeList))[i].type->getFieldName()); + type.transferArraySizes((*(yyval.interm.typeList))[i].type->getArraySizes()); + type.copyArrayInnerSizes((yyvsp[-2].interm.type).arraySizes); + parseContext.arrayOfArrayVersionCheck((*(yyval.interm.typeList))[i].loc, type.getArraySizes()); + (*(yyval.interm.typeList))[i].type->shallowCopy(type); + } + } +#line 9592 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 509: +#line 3412 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList) = new TTypeList; + (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); + } +#line 9601 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 510: +#line 3416 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeList)->push_back((yyvsp[0].interm.typeLine)); + } +#line 9609 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 511: +#line 3422 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.typeLine).type = new TType(EbtVoid); + (yyval.interm.typeLine).loc = (yyvsp[0].lex).loc; + (yyval.interm.typeLine).type->setFieldName(*(yyvsp[0].lex).string); + } +#line 9619 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 512: +#line 3427 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.arrayOfArrayVersionCheck((yyvsp[-1].lex).loc, (yyvsp[0].interm).arraySizes); + + (yyval.interm.typeLine).type = new TType(EbtVoid); + (yyval.interm.typeLine).loc = (yyvsp[-1].lex).loc; + (yyval.interm.typeLine).type->setFieldName(*(yyvsp[-1].lex).string); + (yyval.interm.typeLine).type->transferArraySizes((yyvsp[0].interm).arraySizes); + } +#line 9632 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 513: +#line 3438 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 9640 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 514: +#line 3442 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile((yyvsp[-2].lex).loc, ~EEsProfile, initFeature); + parseContext.profileRequires((yyvsp[-2].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + (yyval.interm.intermTypedNode) = (yyvsp[-1].interm.intermTypedNode); + } +#line 9651 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 515: +#line 3448 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + const char* initFeature = "{ } style initializers"; + parseContext.requireProfile((yyvsp[-3].lex).loc, ~EEsProfile, initFeature); + parseContext.profileRequires((yyvsp[-3].lex).loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, initFeature); + (yyval.interm.intermTypedNode) = (yyvsp[-2].interm.intermTypedNode); + } +#line 9662 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 516: +#line 3459 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate(0, (yyvsp[0].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)->getLoc()); + } +#line 9670 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 517: +#line 3462 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.intermTypedNode)); + } +#line 9678 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 518: +#line 3469 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9684 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 519: +#line 3473 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9690 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 520: +#line 3474 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9696 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 521: +#line 3480 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9702 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 522: +#line 3481 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9708 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 523: +#line 3482 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9714 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 524: +#line 3483 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9720 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 525: +#line 3484 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9726 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 526: +#line 3485 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9732 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 527: +#line 3486 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9738 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 528: +#line 3488 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9744 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 529: +#line 3494 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "demote"); + parseContext.requireExtensions((yyvsp[-1].lex).loc, 1, &E_GL_EXT_demote_to_helper_invocation, "demote"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDemote, (yyvsp[-1].lex).loc); + } +#line 9754 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 530: +#line 3503 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = 0; } +#line 9760 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 531: +#line 3504 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } +#line 9769 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 532: +#line 3508 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } +#line 9778 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 533: +#line 3512 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-2].interm.intermNode) && (yyvsp[-2].interm.intermNode)->getAsAggregate()) + (yyvsp[-2].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-2].interm.intermNode); + } +#line 9788 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 534: +#line 3520 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9794 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 535: +#line 3521 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); } +#line 9800 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 536: +#line 3525 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + ++parseContext.controlFlowNestingLevel; + } +#line 9808 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 537: +#line 3528 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + --parseContext.controlFlowNestingLevel; + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9817 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 538: +#line 3532 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 9827 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 539: +#line 3537 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9838 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 540: +#line 3546 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + } +#line 9846 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 541: +#line 3549 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[-1].interm.intermNode) && (yyvsp[-1].interm.intermNode)->getAsAggregate()) + (yyvsp[-1].interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + (yyval.interm.intermNode) = (yyvsp[-1].interm.intermNode); + } +#line 9856 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 542: +#line 3557 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[0].interm.intermNode)); + if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || + (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence(0, (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case + } + } +#line 9869 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 543: +#line 3565 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[0].interm.intermNode) && (yyvsp[0].interm.intermNode)->getAsBranchNode() && ((yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpCase || + (yyvsp[0].interm.intermNode)->getAsBranchNode()->getFlowOp() == EOpDefault)) { + parseContext.wrapupSwitchSubsequence((yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0, (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = 0; // start a fresh subsequence for what's after this case + } else + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); + } +#line 9882 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 544: +#line 3576 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = 0; } +#line 9888 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 545: +#line 3577 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { (yyval.interm.intermNode) = static_cast((yyvsp[-1].interm.intermTypedNode)); } +#line 9894 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 546: +#line 3581 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9902 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 547: +#line 3585 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.handleSelectionAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9911 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 548: +#line 3592 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.boolCheck((yyvsp[-4].lex).loc, (yyvsp[-2].interm.intermTypedNode)); + (yyval.interm.intermNode) = parseContext.intermediate.addSelection((yyvsp[-2].interm.intermTypedNode), (yyvsp[0].interm.nodePair), (yyvsp[-4].lex).loc); + } +#line 9920 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 549: +#line 3599 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermNode); + (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermNode); + } +#line 9929 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 550: +#line 3603 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[0].interm.intermNode); + (yyval.interm.nodePair).node2 = 0; + } +#line 9938 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 551: +#line 3611 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + parseContext.boolCheck((yyvsp[0].interm.intermTypedNode)->getLoc(), (yyvsp[0].interm.intermTypedNode)); + } +#line 9947 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 552: +#line 3615 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.boolCheck((yyvsp[-2].lex).loc, (yyvsp[-3].interm.type)); + + TType type((yyvsp[-3].interm.type)); + TIntermNode* initNode = parseContext.declareVariable((yyvsp[-2].lex).loc, *(yyvsp[-2].lex).string, (yyvsp[-3].interm.type), 0, (yyvsp[0].interm.intermTypedNode)); + if (initNode) + (yyval.interm.intermTypedNode) = initNode->getAsTyped(); + else + (yyval.interm.intermTypedNode) = 0; + } +#line 9962 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 553: +#line 3628 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9970 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 554: +#line 3632 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.handleSwitchAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 9979 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 555: +#line 3639 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // start new switch sequence on the switch stack + ++parseContext.controlFlowNestingLevel; + ++parseContext.statementNestingLevel; + parseContext.switchSequenceStack.push_back(new TIntermSequence); + parseContext.switchLevel.push_back(parseContext.statementNestingLevel); + parseContext.symbolTable.push(); + } +#line 9992 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 556: +#line 3647 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.addSwitch((yyvsp[-7].lex).loc, (yyvsp[-5].interm.intermTypedNode), (yyvsp[-1].interm.intermNode) ? (yyvsp[-1].interm.intermNode)->getAsAggregate() : 0); + delete parseContext.switchSequenceStack.back(); + parseContext.switchSequenceStack.pop_back(); + parseContext.switchLevel.pop_back(); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10006 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 557: +#line 3659 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + } +#line 10014 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 558: +#line 3662 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10022 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 559: +#line 3668 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error((yyvsp[-2].lex).loc, "cannot appear outside switch statement", "case", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error((yyvsp[-2].lex).loc, "cannot be nested inside control flow", "case", ""); + else { + parseContext.constantValueCheck((yyvsp[-1].interm.intermTypedNode), "case"); + parseContext.integerCheck((yyvsp[-1].interm.intermTypedNode), "case"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpCase, (yyvsp[-1].interm.intermTypedNode), (yyvsp[-2].lex).loc); + } + } +#line 10039 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 560: +#line 3680 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = 0; + if (parseContext.switchLevel.size() == 0) + parseContext.error((yyvsp[-1].lex).loc, "cannot appear outside switch statement", "default", ""); + else if (parseContext.switchLevel.back() != parseContext.statementNestingLevel) + parseContext.error((yyvsp[-1].lex).loc, "cannot be nested inside control flow", "default", ""); + else + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpDefault, (yyvsp[-1].lex).loc); + } +#line 10053 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 561: +#line 3692 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10061 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 562: +#line 3696 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.handleLoopAttributes(*(yyvsp[-1].interm.attributes), (yyvsp[0].interm.intermNode)); + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10070 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 563: +#line 3703 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if (! parseContext.limits.whileLoops) + parseContext.error((yyvsp[-1].lex).loc, "while loops not available", "limitation", ""); + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10083 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 564: +#line 3711 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, true, (yyvsp[-5].lex).loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10095 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 565: +#line 3718 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10105 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 566: +#line 3723 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if (! parseContext.limits.whileLoops) + parseContext.error((yyvsp[-7].lex).loc, "do-while loops not available", "limitation", ""); + + parseContext.boolCheck((yyvsp[0].lex).loc, (yyvsp[-2].interm.intermTypedNode)); + + (yyval.interm.intermNode) = parseContext.intermediate.addLoop((yyvsp[-5].interm.intermNode), (yyvsp[-2].interm.intermTypedNode), 0, false, (yyvsp[-4].lex).loc); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10121 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 567: +#line 3734 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.push(); + ++parseContext.loopNestingLevel; + ++parseContext.statementNestingLevel; + ++parseContext.controlFlowNestingLevel; + } +#line 10132 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 568: +#line 3740 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.makeAggregate((yyvsp[-3].interm.intermNode), (yyvsp[-5].lex).loc); + TIntermLoop* forLoop = parseContext.intermediate.addLoop((yyvsp[0].interm.intermNode), reinterpret_cast((yyvsp[-2].interm.nodePair).node1), reinterpret_cast((yyvsp[-2].interm.nodePair).node2), true, (yyvsp[-6].lex).loc); + if (! parseContext.limits.nonInductiveForLoops) + parseContext.inductiveLoopCheck((yyvsp[-6].lex).loc, (yyvsp[-3].interm.intermNode), forLoop); + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyval.interm.intermNode), forLoop, (yyvsp[-6].lex).loc); + (yyval.interm.intermNode)->getAsAggregate()->setOperator(EOpSequence); + --parseContext.loopNestingLevel; + --parseContext.statementNestingLevel; + --parseContext.controlFlowNestingLevel; + } +#line 10149 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 569: +#line 3755 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10157 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 570: +#line 3758 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10165 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 571: +#line 3764 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = (yyvsp[0].interm.intermTypedNode); + } +#line 10173 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 572: +#line 3767 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermTypedNode) = 0; + } +#line 10181 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 573: +#line 3773 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-1].interm.intermTypedNode); + (yyval.interm.nodePair).node2 = 0; + } +#line 10190 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 574: +#line 3777 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.nodePair).node1 = (yyvsp[-2].interm.intermTypedNode); + (yyval.interm.nodePair).node2 = (yyvsp[0].interm.intermTypedNode); + } +#line 10199 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 575: +#line 3784 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if (parseContext.loopNestingLevel <= 0) + parseContext.error((yyvsp[-1].lex).loc, "continue statement only allowed in loops", "", ""); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpContinue, (yyvsp[-1].lex).loc); + } +#line 10209 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 576: +#line 3789 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if (parseContext.loopNestingLevel + parseContext.switchSequenceStack.size() <= 0) + parseContext.error((yyvsp[-1].lex).loc, "break statement only allowed in switch and loops", "", ""); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpBreak, (yyvsp[-1].lex).loc); + } +#line 10219 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 577: +#line 3794 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpReturn, (yyvsp[-1].lex).loc); + if (parseContext.currentFunctionType->getBasicType() != EbtVoid) + parseContext.error((yyvsp[-1].lex).loc, "non-void function must return a value", "return", ""); + if (parseContext.inMain) + parseContext.postEntryPointReturn = true; + } +#line 10231 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 578: +#line 3801 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = parseContext.handleReturnValue((yyvsp[-2].lex).loc, (yyvsp[-1].interm.intermTypedNode)); + } +#line 10239 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 579: +#line 3804 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireStage((yyvsp[-1].lex).loc, EShLangFragment, "discard"); + (yyval.interm.intermNode) = parseContext.intermediate.addBranch(EOpKill, (yyvsp[-1].lex).loc); + } +#line 10248 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 580: +#line 3813 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); + } +#line 10257 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 581: +#line 3817 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + if ((yyvsp[0].interm.intermNode) != nullptr) { + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-1].interm.intermNode), (yyvsp[0].interm.intermNode)); + parseContext.intermediate.setTreeRoot((yyval.interm.intermNode)); + } + } +#line 10268 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 582: +#line 3826 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10276 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 583: +#line 3829 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.intermNode) = (yyvsp[0].interm.intermNode); + } +#line 10284 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 584: +#line 3833 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + parseContext.requireProfile((yyvsp[0].lex).loc, ~EEsProfile, "extraneous semicolon"); + parseContext.profileRequires((yyvsp[0].lex).loc, ~EEsProfile, 460, nullptr, "extraneous semicolon"); + (yyval.interm.intermNode) = nullptr; + } +#line 10294 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 585: +#line 3842 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyvsp[0].interm).function = parseContext.handleFunctionDeclarator((yyvsp[0].interm).loc, *(yyvsp[0].interm).function, false /* not prototype */); + (yyvsp[0].interm).intermNode = parseContext.handleFunctionDefinition((yyvsp[0].interm).loc, *(yyvsp[0].interm).function); + + // For ES 100 only, according to ES shading language 100 spec: A function + // body has a scope nested inside the function's definition. + if (parseContext.profile == EEsProfile && parseContext.version == 100) + { + parseContext.symbolTable.push(); + ++parseContext.statementNestingLevel; + } + } +#line 10311 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 586: +#line 3854 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + // May be best done as post process phase on intermediate code + if (parseContext.currentFunctionType->getBasicType() != EbtVoid && ! parseContext.functionReturnsValue) + parseContext.error((yyvsp[-2].interm).loc, "function does not return a value:", "", (yyvsp[-2].interm).function->getName().c_str()); + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + (yyval.interm.intermNode) = parseContext.intermediate.growAggregate((yyvsp[-2].interm).intermNode, (yyvsp[0].interm.intermNode)); + parseContext.intermediate.setAggregateOperator((yyval.interm.intermNode), EOpFunction, (yyvsp[-2].interm).function->getType(), (yyvsp[-2].interm).loc); + (yyval.interm.intermNode)->getAsAggregate()->setName((yyvsp[-2].interm).function->getMangledName().c_str()); + + // store the pragma information for debug and optimize and other vendor specific + // information. This information can be queried from the parse tree + (yyval.interm.intermNode)->getAsAggregate()->setOptimize(parseContext.contextPragma.optimize); + (yyval.interm.intermNode)->getAsAggregate()->setDebug(parseContext.contextPragma.debug); + (yyval.interm.intermNode)->getAsAggregate()->setPragmaTable(parseContext.contextPragma.pragmaTable); + + // Set currentFunctionType to empty pointer when goes outside of the function + parseContext.currentFunctionType = nullptr; + + // For ES 100 only, according to ES shading language 100 spec: A function + // body has a scope nested inside the function's definition. + if (parseContext.profile == EEsProfile && parseContext.version == 100) + { + parseContext.symbolTable.pop(&parseContext.defaultPrecision[0]); + --parseContext.statementNestingLevel; + } + } +#line 10342 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 587: +#line 3884 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = (yyvsp[-2].interm.attributes); + parseContext.requireExtensions((yyvsp[-4].lex).loc, 1, &E_GL_EXT_control_flow_attributes, "attribute"); + } +#line 10351 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 588: +#line 3890 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = (yyvsp[0].interm.attributes); + } +#line 10359 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 589: +#line 3893 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.mergeAttributes((yyvsp[-2].interm.attributes), (yyvsp[0].interm.attributes)); + } +#line 10367 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 590: +#line 3898 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[0].lex).string); + } +#line 10375 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + case 591: +#line 3901 "MachineIndependent/glslang.y" /* yacc.c:1646 */ + { + (yyval.interm.attributes) = parseContext.makeAttributes(*(yyvsp[-3].lex).string, (yyvsp[-1].interm.intermTypedNode)); + } +#line 10383 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + break; + + +#line 10387 "MachineIndependent/glslang_tab.cpp" /* yacc.c:1646 */ + default: break; + } + /* User semantic actions sometimes alter yychar, and that requires + that yytoken be updated with the new translation. We take the + approach of translating immediately before every use of yytoken. + One alternative is translating here after every semantic action, + but that translation would be missed if the semantic action invokes + YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or + if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an + incorrect destructor might then be invoked immediately. In the + case of YYERROR or YYBACKUP, subsequent parser actions might lead + to an incorrect destructor call or verbose syntax error message + before the lookahead is translated. */ + YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); + + YYPOPSTACK (yylen); + yylen = 0; + YY_STACK_PRINT (yyss, yyssp); + + *++yyvsp = yyval; + + /* Now 'shift' the result of the reduction. Determine what state + that goes to, based on the state we popped back to and the rule + number reduced by. */ + + yyn = yyr1[yyn]; + + yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; + if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) + yystate = yytable[yystate]; + else + yystate = yydefgoto[yyn - YYNTOKENS]; + + goto yynewstate; + + +/*--------------------------------------. +| yyerrlab -- here on detecting error. | +`--------------------------------------*/ +yyerrlab: + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); + + /* If not already recovering from an error, report this error. */ + if (!yyerrstatus) + { + ++yynerrs; +#if ! YYERROR_VERBOSE + yyerror (pParseContext, YY_("syntax error")); +#else +# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ + yyssp, yytoken) + { + char const *yymsgp = YY_("syntax error"); + int yysyntax_error_status; + yysyntax_error_status = YYSYNTAX_ERROR; + if (yysyntax_error_status == 0) + yymsgp = yymsg; + else if (yysyntax_error_status == 1) + { + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); + yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); + if (!yymsg) + { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + yysyntax_error_status = 2; + } + else + { + yysyntax_error_status = YYSYNTAX_ERROR; + yymsgp = yymsg; + } + } + yyerror (pParseContext, yymsgp); + if (yysyntax_error_status == 2) + goto yyexhaustedlab; + } +# undef YYSYNTAX_ERROR +#endif + } + + + + if (yyerrstatus == 3) + { + /* If just tried and failed to reuse lookahead token after an + error, discard it. */ + + if (yychar <= YYEOF) + { + /* Return failure if at end of input. */ + if (yychar == YYEOF) + YYABORT; + } + else + { + yydestruct ("Error: discarding", + yytoken, &yylval, pParseContext); + yychar = YYEMPTY; + } + } + + /* Else will try to reuse lookahead token after shifting the error + token. */ + goto yyerrlab1; + + +/*---------------------------------------------------. +| yyerrorlab -- error raised explicitly by YYERROR. | +`---------------------------------------------------*/ +yyerrorlab: + + /* Pacify compilers like GCC when the user code never invokes + YYERROR and the label yyerrorlab therefore never appears in user + code. */ + if (/*CONSTCOND*/ 0) + goto yyerrorlab; + + /* Do not reclaim the symbols of the rule whose action triggered + this YYERROR. */ + YYPOPSTACK (yylen); + yylen = 0; + YY_STACK_PRINT (yyss, yyssp); + yystate = *yyssp; + goto yyerrlab1; + + +/*-------------------------------------------------------------. +| yyerrlab1 -- common code for both syntax error and YYERROR. | +`-------------------------------------------------------------*/ +yyerrlab1: + yyerrstatus = 3; /* Each real token shifted decrements this. */ + + for (;;) + { + yyn = yypact[yystate]; + if (!yypact_value_is_default (yyn)) + { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) + { + yyn = yytable[yyn]; + if (0 < yyn) + break; + } + } + + /* Pop the current state because it cannot handle the error token. */ + if (yyssp == yyss) + YYABORT; + + + yydestruct ("Error: popping", + yystos[yystate], yyvsp, pParseContext); + YYPOPSTACK (1); + yystate = *yyssp; + YY_STACK_PRINT (yyss, yyssp); + } + + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN + *++yyvsp = yylval; + YY_IGNORE_MAYBE_UNINITIALIZED_END + + + /* Shift the error token. */ + YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); + + yystate = yyn; + goto yynewstate; + + +/*-------------------------------------. +| yyacceptlab -- YYACCEPT comes here. | +`-------------------------------------*/ +yyacceptlab: + yyresult = 0; + goto yyreturn; + +/*-----------------------------------. +| yyabortlab -- YYABORT comes here. | +`-----------------------------------*/ +yyabortlab: + yyresult = 1; + goto yyreturn; + +#if !defined yyoverflow || YYERROR_VERBOSE +/*-------------------------------------------------. +| yyexhaustedlab -- memory exhaustion comes here. | +`-------------------------------------------------*/ +yyexhaustedlab: + yyerror (pParseContext, YY_("memory exhausted")); + yyresult = 2; + /* Fall through. */ +#endif + +yyreturn: + if (yychar != YYEMPTY) + { + /* Make sure we have latest lookahead translation. See comments at + user semantic actions for why this is necessary. */ + yytoken = YYTRANSLATE (yychar); + yydestruct ("Cleanup: discarding lookahead", + yytoken, &yylval, pParseContext); + } + /* Do not reclaim the symbols of the rule whose action triggered + this YYABORT or YYACCEPT. */ + YYPOPSTACK (yylen); + YY_STACK_PRINT (yyss, yyssp); + while (yyssp != yyss) + { + yydestruct ("Cleanup: popping", + yystos[*yyssp], yyvsp, pParseContext); + YYPOPSTACK (1); + } +#ifndef yyoverflow + if (yyss != yyssa) + YYSTACK_FREE (yyss); +#endif +#if YYERROR_VERBOSE + if (yymsg != yymsgbuf) + YYSTACK_FREE (yymsg); +#endif + return yyresult; +} +#line 3906 "MachineIndependent/glslang.y" /* yacc.c:1906 */ + diff --git a/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp.h b/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp.h new file mode 100644 index 0000000..78b72a2 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/glslang_tab.cpp.h @@ -0,0 +1,521 @@ +/* A Bison parser, made by GNU Bison 3.0.4. */ + +/* Bison interface for Yacc-like parsers in C + + Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ + +/* As a special exception, you may create a larger work that contains + part or all of the Bison parser skeleton and distribute that work + under terms of your choice, so long as that work isn't itself a + parser generator using the skeleton or a modified version thereof + as a parser skeleton. Alternatively, if you modify or redistribute + the parser skeleton itself, you may (at your option) remove this + special exception, which will cause the skeleton and the resulting + Bison output files to be licensed under the GNU General Public + License without this special exception. + + This special exception was added by the Free Software Foundation in + version 2.2 of Bison. */ + +#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED +# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED +/* Debug traces. */ +#ifndef YYDEBUG +# define YYDEBUG 1 +#endif +#if YYDEBUG +extern int yydebug; +#endif + +/* Token type. */ +#ifndef YYTOKENTYPE +# define YYTOKENTYPE + enum yytokentype + { + CONST = 258, + BOOL = 259, + INT = 260, + UINT = 261, + FLOAT = 262, + BVEC2 = 263, + BVEC3 = 264, + BVEC4 = 265, + IVEC2 = 266, + IVEC3 = 267, + IVEC4 = 268, + UVEC2 = 269, + UVEC3 = 270, + UVEC4 = 271, + VEC2 = 272, + VEC3 = 273, + VEC4 = 274, + MAT2 = 275, + MAT3 = 276, + MAT4 = 277, + MAT2X2 = 278, + MAT2X3 = 279, + MAT2X4 = 280, + MAT3X2 = 281, + MAT3X3 = 282, + MAT3X4 = 283, + MAT4X2 = 284, + MAT4X3 = 285, + MAT4X4 = 286, + SAMPLER2D = 287, + SAMPLER3D = 288, + SAMPLERCUBE = 289, + SAMPLER2DSHADOW = 290, + SAMPLERCUBESHADOW = 291, + SAMPLER2DARRAY = 292, + SAMPLER2DARRAYSHADOW = 293, + ISAMPLER2D = 294, + ISAMPLER3D = 295, + ISAMPLERCUBE = 296, + ISAMPLER2DARRAY = 297, + USAMPLER2D = 298, + USAMPLER3D = 299, + USAMPLERCUBE = 300, + USAMPLER2DARRAY = 301, + SAMPLER = 302, + SAMPLERSHADOW = 303, + TEXTURE2D = 304, + TEXTURE3D = 305, + TEXTURECUBE = 306, + TEXTURE2DARRAY = 307, + ITEXTURE2D = 308, + ITEXTURE3D = 309, + ITEXTURECUBE = 310, + ITEXTURE2DARRAY = 311, + UTEXTURE2D = 312, + UTEXTURE3D = 313, + UTEXTURECUBE = 314, + UTEXTURE2DARRAY = 315, + ATTRIBUTE = 316, + VARYING = 317, + FLOAT16_T = 318, + FLOAT32_T = 319, + DOUBLE = 320, + FLOAT64_T = 321, + INT64_T = 322, + UINT64_T = 323, + INT32_T = 324, + UINT32_T = 325, + INT16_T = 326, + UINT16_T = 327, + INT8_T = 328, + UINT8_T = 329, + I64VEC2 = 330, + I64VEC3 = 331, + I64VEC4 = 332, + U64VEC2 = 333, + U64VEC3 = 334, + U64VEC4 = 335, + I32VEC2 = 336, + I32VEC3 = 337, + I32VEC4 = 338, + U32VEC2 = 339, + U32VEC3 = 340, + U32VEC4 = 341, + I16VEC2 = 342, + I16VEC3 = 343, + I16VEC4 = 344, + U16VEC2 = 345, + U16VEC3 = 346, + U16VEC4 = 347, + I8VEC2 = 348, + I8VEC3 = 349, + I8VEC4 = 350, + U8VEC2 = 351, + U8VEC3 = 352, + U8VEC4 = 353, + DVEC2 = 354, + DVEC3 = 355, + DVEC4 = 356, + DMAT2 = 357, + DMAT3 = 358, + DMAT4 = 359, + F16VEC2 = 360, + F16VEC3 = 361, + F16VEC4 = 362, + F16MAT2 = 363, + F16MAT3 = 364, + F16MAT4 = 365, + F32VEC2 = 366, + F32VEC3 = 367, + F32VEC4 = 368, + F32MAT2 = 369, + F32MAT3 = 370, + F32MAT4 = 371, + F64VEC2 = 372, + F64VEC3 = 373, + F64VEC4 = 374, + F64MAT2 = 375, + F64MAT3 = 376, + F64MAT4 = 377, + DMAT2X2 = 378, + DMAT2X3 = 379, + DMAT2X4 = 380, + DMAT3X2 = 381, + DMAT3X3 = 382, + DMAT3X4 = 383, + DMAT4X2 = 384, + DMAT4X3 = 385, + DMAT4X4 = 386, + F16MAT2X2 = 387, + F16MAT2X3 = 388, + F16MAT2X4 = 389, + F16MAT3X2 = 390, + F16MAT3X3 = 391, + F16MAT3X4 = 392, + F16MAT4X2 = 393, + F16MAT4X3 = 394, + F16MAT4X4 = 395, + F32MAT2X2 = 396, + F32MAT2X3 = 397, + F32MAT2X4 = 398, + F32MAT3X2 = 399, + F32MAT3X3 = 400, + F32MAT3X4 = 401, + F32MAT4X2 = 402, + F32MAT4X3 = 403, + F32MAT4X4 = 404, + F64MAT2X2 = 405, + F64MAT2X3 = 406, + F64MAT2X4 = 407, + F64MAT3X2 = 408, + F64MAT3X3 = 409, + F64MAT3X4 = 410, + F64MAT4X2 = 411, + F64MAT4X3 = 412, + F64MAT4X4 = 413, + ATOMIC_UINT = 414, + ACCSTRUCTNV = 415, + ACCSTRUCTEXT = 416, + RAYQUERYEXT = 417, + FCOOPMATNV = 418, + ICOOPMATNV = 419, + UCOOPMATNV = 420, + SAMPLERCUBEARRAY = 421, + SAMPLERCUBEARRAYSHADOW = 422, + ISAMPLERCUBEARRAY = 423, + USAMPLERCUBEARRAY = 424, + SAMPLER1D = 425, + SAMPLER1DARRAY = 426, + SAMPLER1DARRAYSHADOW = 427, + ISAMPLER1D = 428, + SAMPLER1DSHADOW = 429, + SAMPLER2DRECT = 430, + SAMPLER2DRECTSHADOW = 431, + ISAMPLER2DRECT = 432, + USAMPLER2DRECT = 433, + SAMPLERBUFFER = 434, + ISAMPLERBUFFER = 435, + USAMPLERBUFFER = 436, + SAMPLER2DMS = 437, + ISAMPLER2DMS = 438, + USAMPLER2DMS = 439, + SAMPLER2DMSARRAY = 440, + ISAMPLER2DMSARRAY = 441, + USAMPLER2DMSARRAY = 442, + SAMPLEREXTERNALOES = 443, + SAMPLEREXTERNAL2DY2YEXT = 444, + ISAMPLER1DARRAY = 445, + USAMPLER1D = 446, + USAMPLER1DARRAY = 447, + F16SAMPLER1D = 448, + F16SAMPLER2D = 449, + F16SAMPLER3D = 450, + F16SAMPLER2DRECT = 451, + F16SAMPLERCUBE = 452, + F16SAMPLER1DARRAY = 453, + F16SAMPLER2DARRAY = 454, + F16SAMPLERCUBEARRAY = 455, + F16SAMPLERBUFFER = 456, + F16SAMPLER2DMS = 457, + F16SAMPLER2DMSARRAY = 458, + F16SAMPLER1DSHADOW = 459, + F16SAMPLER2DSHADOW = 460, + F16SAMPLER1DARRAYSHADOW = 461, + F16SAMPLER2DARRAYSHADOW = 462, + F16SAMPLER2DRECTSHADOW = 463, + F16SAMPLERCUBESHADOW = 464, + F16SAMPLERCUBEARRAYSHADOW = 465, + IMAGE1D = 466, + IIMAGE1D = 467, + UIMAGE1D = 468, + IMAGE2D = 469, + IIMAGE2D = 470, + UIMAGE2D = 471, + IMAGE3D = 472, + IIMAGE3D = 473, + UIMAGE3D = 474, + IMAGE2DRECT = 475, + IIMAGE2DRECT = 476, + UIMAGE2DRECT = 477, + IMAGECUBE = 478, + IIMAGECUBE = 479, + UIMAGECUBE = 480, + IMAGEBUFFER = 481, + IIMAGEBUFFER = 482, + UIMAGEBUFFER = 483, + IMAGE1DARRAY = 484, + IIMAGE1DARRAY = 485, + UIMAGE1DARRAY = 486, + IMAGE2DARRAY = 487, + IIMAGE2DARRAY = 488, + UIMAGE2DARRAY = 489, + IMAGECUBEARRAY = 490, + IIMAGECUBEARRAY = 491, + UIMAGECUBEARRAY = 492, + IMAGE2DMS = 493, + IIMAGE2DMS = 494, + UIMAGE2DMS = 495, + IMAGE2DMSARRAY = 496, + IIMAGE2DMSARRAY = 497, + UIMAGE2DMSARRAY = 498, + F16IMAGE1D = 499, + F16IMAGE2D = 500, + F16IMAGE3D = 501, + F16IMAGE2DRECT = 502, + F16IMAGECUBE = 503, + F16IMAGE1DARRAY = 504, + F16IMAGE2DARRAY = 505, + F16IMAGECUBEARRAY = 506, + F16IMAGEBUFFER = 507, + F16IMAGE2DMS = 508, + F16IMAGE2DMSARRAY = 509, + TEXTURECUBEARRAY = 510, + ITEXTURECUBEARRAY = 511, + UTEXTURECUBEARRAY = 512, + TEXTURE1D = 513, + ITEXTURE1D = 514, + UTEXTURE1D = 515, + TEXTURE1DARRAY = 516, + ITEXTURE1DARRAY = 517, + UTEXTURE1DARRAY = 518, + TEXTURE2DRECT = 519, + ITEXTURE2DRECT = 520, + UTEXTURE2DRECT = 521, + TEXTUREBUFFER = 522, + ITEXTUREBUFFER = 523, + UTEXTUREBUFFER = 524, + TEXTURE2DMS = 525, + ITEXTURE2DMS = 526, + UTEXTURE2DMS = 527, + TEXTURE2DMSARRAY = 528, + ITEXTURE2DMSARRAY = 529, + UTEXTURE2DMSARRAY = 530, + F16TEXTURE1D = 531, + F16TEXTURE2D = 532, + F16TEXTURE3D = 533, + F16TEXTURE2DRECT = 534, + F16TEXTURECUBE = 535, + F16TEXTURE1DARRAY = 536, + F16TEXTURE2DARRAY = 537, + F16TEXTURECUBEARRAY = 538, + F16TEXTUREBUFFER = 539, + F16TEXTURE2DMS = 540, + F16TEXTURE2DMSARRAY = 541, + SUBPASSINPUT = 542, + SUBPASSINPUTMS = 543, + ISUBPASSINPUT = 544, + ISUBPASSINPUTMS = 545, + USUBPASSINPUT = 546, + USUBPASSINPUTMS = 547, + F16SUBPASSINPUT = 548, + F16SUBPASSINPUTMS = 549, + LEFT_OP = 550, + RIGHT_OP = 551, + INC_OP = 552, + DEC_OP = 553, + LE_OP = 554, + GE_OP = 555, + EQ_OP = 556, + NE_OP = 557, + AND_OP = 558, + OR_OP = 559, + XOR_OP = 560, + MUL_ASSIGN = 561, + DIV_ASSIGN = 562, + ADD_ASSIGN = 563, + MOD_ASSIGN = 564, + LEFT_ASSIGN = 565, + RIGHT_ASSIGN = 566, + AND_ASSIGN = 567, + XOR_ASSIGN = 568, + OR_ASSIGN = 569, + SUB_ASSIGN = 570, + STRING_LITERAL = 571, + LEFT_PAREN = 572, + RIGHT_PAREN = 573, + LEFT_BRACKET = 574, + RIGHT_BRACKET = 575, + LEFT_BRACE = 576, + RIGHT_BRACE = 577, + DOT = 578, + COMMA = 579, + COLON = 580, + EQUAL = 581, + SEMICOLON = 582, + BANG = 583, + DASH = 584, + TILDE = 585, + PLUS = 586, + STAR = 587, + SLASH = 588, + PERCENT = 589, + LEFT_ANGLE = 590, + RIGHT_ANGLE = 591, + VERTICAL_BAR = 592, + CARET = 593, + AMPERSAND = 594, + QUESTION = 595, + INVARIANT = 596, + HIGH_PRECISION = 597, + MEDIUM_PRECISION = 598, + LOW_PRECISION = 599, + PRECISION = 600, + PACKED = 601, + RESOURCE = 602, + SUPERP = 603, + FLOATCONSTANT = 604, + INTCONSTANT = 605, + UINTCONSTANT = 606, + BOOLCONSTANT = 607, + IDENTIFIER = 608, + TYPE_NAME = 609, + CENTROID = 610, + IN = 611, + OUT = 612, + INOUT = 613, + STRUCT = 614, + VOID = 615, + WHILE = 616, + BREAK = 617, + CONTINUE = 618, + DO = 619, + ELSE = 620, + FOR = 621, + IF = 622, + DISCARD = 623, + RETURN = 624, + SWITCH = 625, + CASE = 626, + DEFAULT = 627, + UNIFORM = 628, + SHARED = 629, + BUFFER = 630, + FLAT = 631, + SMOOTH = 632, + LAYOUT = 633, + DOUBLECONSTANT = 634, + INT16CONSTANT = 635, + UINT16CONSTANT = 636, + FLOAT16CONSTANT = 637, + INT32CONSTANT = 638, + UINT32CONSTANT = 639, + INT64CONSTANT = 640, + UINT64CONSTANT = 641, + SUBROUTINE = 642, + DEMOTE = 643, + PAYLOADNV = 644, + PAYLOADINNV = 645, + HITATTRNV = 646, + CALLDATANV = 647, + CALLDATAINNV = 648, + PAYLOADEXT = 649, + PAYLOADINEXT = 650, + HITATTREXT = 651, + CALLDATAEXT = 652, + CALLDATAINEXT = 653, + PATCH = 654, + SAMPLE = 655, + NONUNIFORM = 656, + COHERENT = 657, + VOLATILE = 658, + RESTRICT = 659, + READONLY = 660, + WRITEONLY = 661, + DEVICECOHERENT = 662, + QUEUEFAMILYCOHERENT = 663, + WORKGROUPCOHERENT = 664, + SUBGROUPCOHERENT = 665, + NONPRIVATE = 666, + SHADERCALLCOHERENT = 667, + NOPERSPECTIVE = 668, + EXPLICITINTERPAMD = 669, + PERVERTEXNV = 670, + PERPRIMITIVENV = 671, + PERVIEWNV = 672, + PERTASKNV = 673, + PRECISE = 674 + }; +#endif + +/* Value type. */ +#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED + +union YYSTYPE +{ +#line 97 "MachineIndependent/glslang.y" /* yacc.c:1909 */ + + struct { + glslang::TSourceLoc loc; + union { + glslang::TString *string; + int i; + unsigned int u; + long long i64; + unsigned long long u64; + bool b; + double d; + }; + glslang::TSymbol* symbol; + } lex; + struct { + glslang::TSourceLoc loc; + glslang::TOperator op; + union { + TIntermNode* intermNode; + glslang::TIntermNodePair nodePair; + glslang::TIntermTyped* intermTypedNode; + glslang::TAttributes* attributes; + }; + union { + glslang::TPublicType type; + glslang::TFunction* function; + glslang::TParameter param; + glslang::TTypeLoc typeLine; + glslang::TTypeList* typeList; + glslang::TArraySizes* arraySizes; + glslang::TIdentifierList* identifierList; + }; + glslang::TArraySizes* typeParameters; + } interm; + +#line 510 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */ +}; + +typedef union YYSTYPE YYSTYPE; +# define YYSTYPE_IS_TRIVIAL 1 +# define YYSTYPE_IS_DECLARED 1 +#endif + + + +int yyparse (glslang::TParseContext* pParseContext); + +#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */ diff --git a/src/third_party/glslang/glslang/MachineIndependent/intermOut.cpp b/src/third_party/glslang/glslang/MachineIndependent/intermOut.cpp new file mode 100644 index 0000000..f23a705 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/intermOut.cpp @@ -0,0 +1,1565 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2012-2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#include "localintermediate.h" +#include "../Include/InfoSink.h" + +#ifdef _MSC_VER +#include +#else +#include +#endif +#include + +namespace { + +bool IsInfinity(double x) { +#ifdef _MSC_VER + switch (_fpclass(x)) { + case _FPCLASS_NINF: + case _FPCLASS_PINF: + return true; + default: + return false; + } +#else + return std::isinf(x); +#endif +} + +bool IsNan(double x) { +#ifdef _MSC_VER + switch (_fpclass(x)) { + case _FPCLASS_SNAN: + case _FPCLASS_QNAN: + return true; + default: + return false; + } +#else + return std::isnan(x); +#endif +} + +} + +namespace glslang { + +// +// Two purposes: +// 1. Show an example of how to iterate tree. Functions can +// also directly call Traverse() on children themselves to +// have finer grained control over the process than shown here. +// See the last function for how to get started. +// 2. Print out a text based description of the tree. +// + +// +// Use this class to carry along data from node to node in +// the traversal +// +class TOutputTraverser : public TIntermTraverser { +public: + TOutputTraverser(TInfoSink& i) : infoSink(i), extraOutput(NoExtraOutput) { } + + enum EExtraOutput { + NoExtraOutput, + BinaryDoubleOutput + }; + void setDoubleOutput(EExtraOutput extra) { extraOutput = extra; } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual bool visitUnary(TVisit, TIntermUnary* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + virtual bool visitSelection(TVisit, TIntermSelection* node); + virtual void visitConstantUnion(TIntermConstantUnion* node); + virtual void visitSymbol(TIntermSymbol* node); + virtual bool visitLoop(TVisit, TIntermLoop* node); + virtual bool visitBranch(TVisit, TIntermBranch* node); + virtual bool visitSwitch(TVisit, TIntermSwitch* node); + + TInfoSink& infoSink; +protected: + TOutputTraverser(TOutputTraverser&); + TOutputTraverser& operator=(TOutputTraverser&); + + EExtraOutput extraOutput; +}; + +// +// Helper functions for printing, not part of traversing. +// + +static void OutputTreeText(TInfoSink& infoSink, const TIntermNode* node, const int depth) +{ + int i; + + infoSink.debug << node->getLoc().string << ":"; + if (node->getLoc().line) + infoSink.debug << node->getLoc().line; + else + infoSink.debug << "? "; + + for (i = 0; i < depth; ++i) + infoSink.debug << " "; +} + +// +// The rest of the file are the traversal functions. The last one +// is the one that starts the traversal. +// +// Return true from interior nodes to have the external traversal +// continue on to children. If you process children yourself, +// return false. +// + +bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpAssign: out.debug << "move second child to first child"; break; + case EOpAddAssign: out.debug << "add second child into first child"; break; + case EOpSubAssign: out.debug << "subtract second child into first child"; break; + case EOpMulAssign: out.debug << "multiply second child into first child"; break; + case EOpVectorTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break; + case EOpVectorTimesScalarAssign: out.debug << "vector scale second child into first child"; break; + case EOpMatrixTimesScalarAssign: out.debug << "matrix scale second child into first child"; break; + case EOpMatrixTimesMatrixAssign: out.debug << "matrix mult second child into first child"; break; + case EOpDivAssign: out.debug << "divide second child into first child"; break; + case EOpModAssign: out.debug << "mod second child into first child"; break; + case EOpAndAssign: out.debug << "and second child into first child"; break; + case EOpInclusiveOrAssign: out.debug << "or second child into first child"; break; + case EOpExclusiveOrAssign: out.debug << "exclusive or second child into first child"; break; + case EOpLeftShiftAssign: out.debug << "left shift second child into first child"; break; + case EOpRightShiftAssign: out.debug << "right shift second child into first child"; break; + + case EOpIndexDirect: out.debug << "direct index"; break; + case EOpIndexIndirect: out.debug << "indirect index"; break; + case EOpIndexDirectStruct: + { + bool reference = node->getLeft()->getType().isReference(); + const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct(); + out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName(); + out.debug << ": direct index for structure"; break; + } + case EOpVectorSwizzle: out.debug << "vector swizzle"; break; + case EOpMatrixSwizzle: out.debug << "matrix swizzle"; break; + + case EOpAdd: out.debug << "add"; break; + case EOpSub: out.debug << "subtract"; break; + case EOpMul: out.debug << "component-wise multiply"; break; + case EOpDiv: out.debug << "divide"; break; + case EOpMod: out.debug << "mod"; break; + case EOpRightShift: out.debug << "right-shift"; break; + case EOpLeftShift: out.debug << "left-shift"; break; + case EOpAnd: out.debug << "bitwise and"; break; + case EOpInclusiveOr: out.debug << "inclusive-or"; break; + case EOpExclusiveOr: out.debug << "exclusive-or"; break; + case EOpEqual: out.debug << "Compare Equal"; break; + case EOpNotEqual: out.debug << "Compare Not Equal"; break; + case EOpLessThan: out.debug << "Compare Less Than"; break; + case EOpGreaterThan: out.debug << "Compare Greater Than"; break; + case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break; + case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break; + case EOpVectorEqual: out.debug << "Equal"; break; + case EOpVectorNotEqual: out.debug << "NotEqual"; break; + + case EOpVectorTimesScalar: out.debug << "vector-scale"; break; + case EOpVectorTimesMatrix: out.debug << "vector-times-matrix"; break; + case EOpMatrixTimesVector: out.debug << "matrix-times-vector"; break; + case EOpMatrixTimesScalar: out.debug << "matrix-scale"; break; + case EOpMatrixTimesMatrix: out.debug << "matrix-multiply"; break; + + case EOpLogicalOr: out.debug << "logical-or"; break; + case EOpLogicalXor: out.debug << "logical-xor"; break; + case EOpLogicalAnd: out.debug << "logical-and"; break; + + case EOpAbsDifference: out.debug << "absoluteDifference"; break; + case EOpAddSaturate: out.debug << "addSaturate"; break; + case EOpSubSaturate: out.debug << "subtractSaturate"; break; + case EOpAverage: out.debug << "average"; break; + case EOpAverageRounded: out.debug << "averageRounded"; break; + case EOpMul32x16: out.debug << "multiply32x16"; break; + + default: out.debug << ""; + } + + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpNegative: out.debug << "Negate value"; break; + case EOpVectorLogicalNot: + case EOpLogicalNot: out.debug << "Negate conditional"; break; + case EOpBitwiseNot: out.debug << "Bitwise not"; break; + + case EOpPostIncrement: out.debug << "Post-Increment"; break; + case EOpPostDecrement: out.debug << "Post-Decrement"; break; + case EOpPreIncrement: out.debug << "Pre-Increment"; break; + case EOpPreDecrement: out.debug << "Pre-Decrement"; break; + case EOpCopyObject: out.debug << "copy object"; break; + + // * -> bool + case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break; + case EOpConvUint8ToBool: out.debug << "Convert uint8_t to bool"; break; + case EOpConvInt16ToBool: out.debug << "Convert int16_t to bool"; break; + case EOpConvUint16ToBool: out.debug << "Convert uint16_t to bool";break; + case EOpConvIntToBool: out.debug << "Convert int to bool"; break; + case EOpConvUintToBool: out.debug << "Convert uint to bool"; break; + case EOpConvInt64ToBool: out.debug << "Convert int64 to bool"; break; + case EOpConvUint64ToBool: out.debug << "Convert uint64 to bool"; break; + case EOpConvFloat16ToBool: out.debug << "Convert float16_t to bool"; break; + case EOpConvFloatToBool: out.debug << "Convert float to bool"; break; + case EOpConvDoubleToBool: out.debug << "Convert double to bool"; break; + + // bool -> * + case EOpConvBoolToInt8: out.debug << "Convert bool to int8_t"; break; + case EOpConvBoolToUint8: out.debug << "Convert bool to uint8_t"; break; + case EOpConvBoolToInt16: out.debug << "Convert bool to in16t_t"; break; + case EOpConvBoolToUint16: out.debug << "Convert bool to uint16_t";break; + case EOpConvBoolToInt: out.debug << "Convert bool to int" ; break; + case EOpConvBoolToUint: out.debug << "Convert bool to uint"; break; + case EOpConvBoolToInt64: out.debug << "Convert bool to int64"; break; + case EOpConvBoolToUint64: out.debug << "Convert bool to uint64";break; + case EOpConvBoolToFloat16: out.debug << "Convert bool to float16_t"; break; + case EOpConvBoolToFloat: out.debug << "Convert bool to float"; break; + case EOpConvBoolToDouble: out.debug << "Convert bool to double"; break; + + // int8_t -> (u)int* + case EOpConvInt8ToInt16: out.debug << "Convert int8_t to int16_t";break; + case EOpConvInt8ToInt: out.debug << "Convert int8_t to int"; break; + case EOpConvInt8ToInt64: out.debug << "Convert int8_t to int64"; break; + case EOpConvInt8ToUint8: out.debug << "Convert int8_t to uint8_t";break; + case EOpConvInt8ToUint16: out.debug << "Convert int8_t to uint16_t";break; + case EOpConvInt8ToUint: out.debug << "Convert int8_t to uint"; break; + case EOpConvInt8ToUint64: out.debug << "Convert int8_t to uint64"; break; + + // uint8_t -> (u)int* + case EOpConvUint8ToInt8: out.debug << "Convert uint8_t to int8_t";break; + case EOpConvUint8ToInt16: out.debug << "Convert uint8_t to int16_t";break; + case EOpConvUint8ToInt: out.debug << "Convert uint8_t to int"; break; + case EOpConvUint8ToInt64: out.debug << "Convert uint8_t to int64"; break; + case EOpConvUint8ToUint16: out.debug << "Convert uint8_t to uint16_t";break; + case EOpConvUint8ToUint: out.debug << "Convert uint8_t to uint"; break; + case EOpConvUint8ToUint64: out.debug << "Convert uint8_t to uint64"; break; + + // int8_t -> float* + case EOpConvInt8ToFloat16: out.debug << "Convert int8_t to float16_t";break; + case EOpConvInt8ToFloat: out.debug << "Convert int8_t to float"; break; + case EOpConvInt8ToDouble: out.debug << "Convert int8_t to double"; break; + + // uint8_t -> float* + case EOpConvUint8ToFloat16: out.debug << "Convert uint8_t to float16_t";break; + case EOpConvUint8ToFloat: out.debug << "Convert uint8_t to float"; break; + case EOpConvUint8ToDouble: out.debug << "Convert uint8_t to double"; break; + + // int16_t -> (u)int* + case EOpConvInt16ToInt8: out.debug << "Convert int16_t to int8_t";break; + case EOpConvInt16ToInt: out.debug << "Convert int16_t to int"; break; + case EOpConvInt16ToInt64: out.debug << "Convert int16_t to int64"; break; + case EOpConvInt16ToUint8: out.debug << "Convert int16_t to uint8_t";break; + case EOpConvInt16ToUint16: out.debug << "Convert int16_t to uint16_t";break; + case EOpConvInt16ToUint: out.debug << "Convert int16_t to uint"; break; + case EOpConvInt16ToUint64: out.debug << "Convert int16_t to uint64"; break; + + // int16_t -> float* + case EOpConvInt16ToFloat16: out.debug << "Convert int16_t to float16_t";break; + case EOpConvInt16ToFloat: out.debug << "Convert int16_t to float"; break; + case EOpConvInt16ToDouble: out.debug << "Convert int16_t to double"; break; + + // uint16_t -> (u)int* + case EOpConvUint16ToInt8: out.debug << "Convert uint16_t to int8_t";break; + case EOpConvUint16ToInt16: out.debug << "Convert uint16_t to int16_t";break; + case EOpConvUint16ToInt: out.debug << "Convert uint16_t to int"; break; + case EOpConvUint16ToInt64: out.debug << "Convert uint16_t to int64"; break; + case EOpConvUint16ToUint8: out.debug << "Convert uint16_t to uint8_t";break; + case EOpConvUint16ToUint: out.debug << "Convert uint16_t to uint"; break; + case EOpConvUint16ToUint64: out.debug << "Convert uint16_t to uint64"; break; + + // uint16_t -> float* + case EOpConvUint16ToFloat16: out.debug << "Convert uint16_t to float16_t";break; + case EOpConvUint16ToFloat: out.debug << "Convert uint16_t to float"; break; + case EOpConvUint16ToDouble: out.debug << "Convert uint16_t to double"; break; + + // int32_t -> (u)int* + case EOpConvIntToInt8: out.debug << "Convert int to int8_t";break; + case EOpConvIntToInt16: out.debug << "Convert int to int16_t";break; + case EOpConvIntToInt64: out.debug << "Convert int to int64"; break; + case EOpConvIntToUint8: out.debug << "Convert int to uint8_t";break; + case EOpConvIntToUint16: out.debug << "Convert int to uint16_t";break; + case EOpConvIntToUint: out.debug << "Convert int to uint"; break; + case EOpConvIntToUint64: out.debug << "Convert int to uint64"; break; + + // int32_t -> float* + case EOpConvIntToFloat16: out.debug << "Convert int to float16_t";break; + case EOpConvIntToFloat: out.debug << "Convert int to float"; break; + case EOpConvIntToDouble: out.debug << "Convert int to double"; break; + + // uint32_t -> (u)int* + case EOpConvUintToInt8: out.debug << "Convert uint to int8_t";break; + case EOpConvUintToInt16: out.debug << "Convert uint to int16_t";break; + case EOpConvUintToInt: out.debug << "Convert uint to int";break; + case EOpConvUintToInt64: out.debug << "Convert uint to int64"; break; + case EOpConvUintToUint8: out.debug << "Convert uint to uint8_t";break; + case EOpConvUintToUint16: out.debug << "Convert uint to uint16_t";break; + case EOpConvUintToUint64: out.debug << "Convert uint to uint64"; break; + + // uint32_t -> float* + case EOpConvUintToFloat16: out.debug << "Convert uint to float16_t";break; + case EOpConvUintToFloat: out.debug << "Convert uint to float"; break; + case EOpConvUintToDouble: out.debug << "Convert uint to double"; break; + + // int64 -> (u)int* + case EOpConvInt64ToInt8: out.debug << "Convert int64 to int8_t"; break; + case EOpConvInt64ToInt16: out.debug << "Convert int64 to int16_t"; break; + case EOpConvInt64ToInt: out.debug << "Convert int64 to int"; break; + case EOpConvInt64ToUint8: out.debug << "Convert int64 to uint8_t";break; + case EOpConvInt64ToUint16: out.debug << "Convert int64 to uint16_t";break; + case EOpConvInt64ToUint: out.debug << "Convert int64 to uint"; break; + case EOpConvInt64ToUint64: out.debug << "Convert int64 to uint64"; break; + + // int64 -> float* + case EOpConvInt64ToFloat16: out.debug << "Convert int64 to float16_t";break; + case EOpConvInt64ToFloat: out.debug << "Convert int64 to float"; break; + case EOpConvInt64ToDouble: out.debug << "Convert int64 to double"; break; + + // uint64 -> (u)int* + case EOpConvUint64ToInt8: out.debug << "Convert uint64 to int8_t";break; + case EOpConvUint64ToInt16: out.debug << "Convert uint64 to int16_t";break; + case EOpConvUint64ToInt: out.debug << "Convert uint64 to int"; break; + case EOpConvUint64ToInt64: out.debug << "Convert uint64 to int64"; break; + case EOpConvUint64ToUint8: out.debug << "Convert uint64 to uint8_t";break; + case EOpConvUint64ToUint16: out.debug << "Convert uint64 to uint16"; break; + case EOpConvUint64ToUint: out.debug << "Convert uint64 to uint"; break; + + // uint64 -> float* + case EOpConvUint64ToFloat16: out.debug << "Convert uint64 to float16_t";break; + case EOpConvUint64ToFloat: out.debug << "Convert uint64 to float"; break; + case EOpConvUint64ToDouble: out.debug << "Convert uint64 to double"; break; + + // float16_t -> int* + case EOpConvFloat16ToInt8: out.debug << "Convert float16_t to int8_t"; break; + case EOpConvFloat16ToInt16: out.debug << "Convert float16_t to int16_t"; break; + case EOpConvFloat16ToInt: out.debug << "Convert float16_t to int"; break; + case EOpConvFloat16ToInt64: out.debug << "Convert float16_t to int64"; break; + + // float16_t -> uint* + case EOpConvFloat16ToUint8: out.debug << "Convert float16_t to uint8_t"; break; + case EOpConvFloat16ToUint16: out.debug << "Convert float16_t to uint16_t"; break; + case EOpConvFloat16ToUint: out.debug << "Convert float16_t to uint"; break; + case EOpConvFloat16ToUint64: out.debug << "Convert float16_t to uint64"; break; + + // float16_t -> float* + case EOpConvFloat16ToFloat: out.debug << "Convert float16_t to float"; break; + case EOpConvFloat16ToDouble: out.debug << "Convert float16_t to double"; break; + + // float32 -> float* + case EOpConvFloatToFloat16: out.debug << "Convert float to float16_t"; break; + case EOpConvFloatToDouble: out.debug << "Convert float to double"; break; + + // float32_t -> int* + case EOpConvFloatToInt8: out.debug << "Convert float to int8_t"; break; + case EOpConvFloatToInt16: out.debug << "Convert float to int16_t"; break; + case EOpConvFloatToInt: out.debug << "Convert float to int"; break; + case EOpConvFloatToInt64: out.debug << "Convert float to int64"; break; + + // float32_t -> uint* + case EOpConvFloatToUint8: out.debug << "Convert float to uint8_t"; break; + case EOpConvFloatToUint16: out.debug << "Convert float to uint16_t"; break; + case EOpConvFloatToUint: out.debug << "Convert float to uint"; break; + case EOpConvFloatToUint64: out.debug << "Convert float to uint64"; break; + + // double -> float* + case EOpConvDoubleToFloat16: out.debug << "Convert double to float16_t"; break; + case EOpConvDoubleToFloat: out.debug << "Convert double to float"; break; + + // double -> int* + case EOpConvDoubleToInt8: out.debug << "Convert double to int8_t"; break; + case EOpConvDoubleToInt16: out.debug << "Convert double to int16_t"; break; + case EOpConvDoubleToInt: out.debug << "Convert double to int"; break; + case EOpConvDoubleToInt64: out.debug << "Convert double to int64"; break; + + // float32_t -> uint* + case EOpConvDoubleToUint8: out.debug << "Convert double to uint8_t"; break; + case EOpConvDoubleToUint16: out.debug << "Convert double to uint16_t"; break; + case EOpConvDoubleToUint: out.debug << "Convert double to uint"; break; + case EOpConvDoubleToUint64: out.debug << "Convert double to uint64"; break; + + case EOpConvUint64ToPtr: out.debug << "Convert uint64_t to pointer"; break; + case EOpConvPtrToUint64: out.debug << "Convert pointer to uint64_t"; break; + + case EOpRadians: out.debug << "radians"; break; + case EOpDegrees: out.debug << "degrees"; break; + case EOpSin: out.debug << "sine"; break; + case EOpCos: out.debug << "cosine"; break; + case EOpTan: out.debug << "tangent"; break; + case EOpAsin: out.debug << "arc sine"; break; + case EOpAcos: out.debug << "arc cosine"; break; + case EOpAtan: out.debug << "arc tangent"; break; + case EOpSinh: out.debug << "hyp. sine"; break; + case EOpCosh: out.debug << "hyp. cosine"; break; + case EOpTanh: out.debug << "hyp. tangent"; break; + case EOpAsinh: out.debug << "arc hyp. sine"; break; + case EOpAcosh: out.debug << "arc hyp. cosine"; break; + case EOpAtanh: out.debug << "arc hyp. tangent"; break; + + case EOpExp: out.debug << "exp"; break; + case EOpLog: out.debug << "log"; break; + case EOpExp2: out.debug << "exp2"; break; + case EOpLog2: out.debug << "log2"; break; + case EOpSqrt: out.debug << "sqrt"; break; + case EOpInverseSqrt: out.debug << "inverse sqrt"; break; + + case EOpAbs: out.debug << "Absolute value"; break; + case EOpSign: out.debug << "Sign"; break; + case EOpFloor: out.debug << "Floor"; break; + case EOpTrunc: out.debug << "trunc"; break; + case EOpRound: out.debug << "round"; break; + case EOpRoundEven: out.debug << "roundEven"; break; + case EOpCeil: out.debug << "Ceiling"; break; + case EOpFract: out.debug << "Fraction"; break; + + case EOpIsNan: out.debug << "isnan"; break; + case EOpIsInf: out.debug << "isinf"; break; + + case EOpFloatBitsToInt: out.debug << "floatBitsToInt"; break; + case EOpFloatBitsToUint:out.debug << "floatBitsToUint"; break; + case EOpIntBitsToFloat: out.debug << "intBitsToFloat"; break; + case EOpUintBitsToFloat:out.debug << "uintBitsToFloat"; break; + case EOpDoubleBitsToInt64: out.debug << "doubleBitsToInt64"; break; + case EOpDoubleBitsToUint64: out.debug << "doubleBitsToUint64"; break; + case EOpInt64BitsToDouble: out.debug << "int64BitsToDouble"; break; + case EOpUint64BitsToDouble: out.debug << "uint64BitsToDouble"; break; + case EOpFloat16BitsToInt16: out.debug << "float16BitsToInt16"; break; + case EOpFloat16BitsToUint16: out.debug << "float16BitsToUint16"; break; + case EOpInt16BitsToFloat16: out.debug << "int16BitsToFloat16"; break; + case EOpUint16BitsToFloat16: out.debug << "uint16BitsToFloat16"; break; + + case EOpPackSnorm2x16: out.debug << "packSnorm2x16"; break; + case EOpUnpackSnorm2x16:out.debug << "unpackSnorm2x16"; break; + case EOpPackUnorm2x16: out.debug << "packUnorm2x16"; break; + case EOpUnpackUnorm2x16:out.debug << "unpackUnorm2x16"; break; + case EOpPackHalf2x16: out.debug << "packHalf2x16"; break; + case EOpUnpackHalf2x16: out.debug << "unpackHalf2x16"; break; + case EOpPack16: out.debug << "pack16"; break; + case EOpPack32: out.debug << "pack32"; break; + case EOpPack64: out.debug << "pack64"; break; + case EOpUnpack32: out.debug << "unpack32"; break; + case EOpUnpack16: out.debug << "unpack16"; break; + case EOpUnpack8: out.debug << "unpack8"; break; + + case EOpPackSnorm4x8: out.debug << "PackSnorm4x8"; break; + case EOpUnpackSnorm4x8: out.debug << "UnpackSnorm4x8"; break; + case EOpPackUnorm4x8: out.debug << "PackUnorm4x8"; break; + case EOpUnpackUnorm4x8: out.debug << "UnpackUnorm4x8"; break; + case EOpPackDouble2x32: out.debug << "PackDouble2x32"; break; + case EOpUnpackDouble2x32: out.debug << "UnpackDouble2x32"; break; + + case EOpPackInt2x32: out.debug << "packInt2x32"; break; + case EOpUnpackInt2x32: out.debug << "unpackInt2x32"; break; + case EOpPackUint2x32: out.debug << "packUint2x32"; break; + case EOpUnpackUint2x32: out.debug << "unpackUint2x32"; break; + + case EOpPackInt2x16: out.debug << "packInt2x16"; break; + case EOpUnpackInt2x16: out.debug << "unpackInt2x16"; break; + case EOpPackUint2x16: out.debug << "packUint2x16"; break; + case EOpUnpackUint2x16: out.debug << "unpackUint2x16"; break; + + case EOpPackInt4x16: out.debug << "packInt4x16"; break; + case EOpUnpackInt4x16: out.debug << "unpackInt4x16"; break; + case EOpPackUint4x16: out.debug << "packUint4x16"; break; + case EOpUnpackUint4x16: out.debug << "unpackUint4x16"; break; + case EOpPackFloat2x16: out.debug << "packFloat2x16"; break; + case EOpUnpackFloat2x16: out.debug << "unpackFloat2x16"; break; + + case EOpLength: out.debug << "length"; break; + case EOpNormalize: out.debug << "normalize"; break; + case EOpDPdx: out.debug << "dPdx"; break; + case EOpDPdy: out.debug << "dPdy"; break; + case EOpFwidth: out.debug << "fwidth"; break; + case EOpDPdxFine: out.debug << "dPdxFine"; break; + case EOpDPdyFine: out.debug << "dPdyFine"; break; + case EOpFwidthFine: out.debug << "fwidthFine"; break; + case EOpDPdxCoarse: out.debug << "dPdxCoarse"; break; + case EOpDPdyCoarse: out.debug << "dPdyCoarse"; break; + case EOpFwidthCoarse: out.debug << "fwidthCoarse"; break; + + case EOpInterpolateAtCentroid: out.debug << "interpolateAtCentroid"; break; + + case EOpDeterminant: out.debug << "determinant"; break; + case EOpMatrixInverse: out.debug << "inverse"; break; + case EOpTranspose: out.debug << "transpose"; break; + + case EOpAny: out.debug << "any"; break; + case EOpAll: out.debug << "all"; break; + + case EOpArrayLength: out.debug << "array length"; break; + + case EOpEmitStreamVertex: out.debug << "EmitStreamVertex"; break; + case EOpEndStreamPrimitive: out.debug << "EndStreamPrimitive"; break; + + case EOpAtomicCounterIncrement: out.debug << "AtomicCounterIncrement";break; + case EOpAtomicCounterDecrement: out.debug << "AtomicCounterDecrement";break; + case EOpAtomicCounter: out.debug << "AtomicCounter"; break; + + case EOpTextureQuerySize: out.debug << "textureSize"; break; + case EOpTextureQueryLod: out.debug << "textureQueryLod"; break; + case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break; + case EOpTextureQuerySamples: out.debug << "textureSamples"; break; + case EOpImageQuerySize: out.debug << "imageQuerySize"; break; + case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break; + case EOpImageLoad: out.debug << "imageLoad"; break; + + case EOpBitFieldReverse: out.debug << "bitFieldReverse"; break; + case EOpBitCount: out.debug << "bitCount"; break; + case EOpFindLSB: out.debug << "findLSB"; break; + case EOpFindMSB: out.debug << "findMSB"; break; + + case EOpCountLeadingZeros: out.debug << "countLeadingZeros"; break; + case EOpCountTrailingZeros: out.debug << "countTrailingZeros"; break; + + case EOpNoise: out.debug << "noise"; break; + + case EOpBallot: out.debug << "ballot"; break; + case EOpReadFirstInvocation: out.debug << "readFirstInvocation"; break; + + case EOpAnyInvocation: out.debug << "anyInvocation"; break; + case EOpAllInvocations: out.debug << "allInvocations"; break; + case EOpAllInvocationsEqual: out.debug << "allInvocationsEqual"; break; + + case EOpSubgroupElect: out.debug << "subgroupElect"; break; + case EOpSubgroupAll: out.debug << "subgroupAll"; break; + case EOpSubgroupAny: out.debug << "subgroupAny"; break; + case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break; + case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break; + case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break; + case EOpSubgroupBallot: out.debug << "subgroupBallot"; break; + case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break; + case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break; + case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break; + case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break; + case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break; + case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break; + case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break; + case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break; + case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break; + case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break; + case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break; + case EOpSubgroupAdd: out.debug << "subgroupAdd"; break; + case EOpSubgroupMul: out.debug << "subgroupMul"; break; + case EOpSubgroupMin: out.debug << "subgroupMin"; break; + case EOpSubgroupMax: out.debug << "subgroupMax"; break; + case EOpSubgroupAnd: out.debug << "subgroupAnd"; break; + case EOpSubgroupOr: out.debug << "subgroupOr"; break; + case EOpSubgroupXor: out.debug << "subgroupXor"; break; + case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break; + case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break; + case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break; + case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break; + case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break; + case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break; + case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break; + case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break; + case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break; + case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break; + case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break; + case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break; + case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break; + case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break; + case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break; + case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break; + case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break; + case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break; + case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break; + case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break; + case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break; + case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break; + case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break; + case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break; + case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break; + + case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break; + case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break; + case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break; + case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break; + case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break; + case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break; + case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break; + case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break; + case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break; + case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break; + case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break; + case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break; + case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break; + case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break; + case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break; + case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break; + case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break; + case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break; + case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break; + case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break; + case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break; + case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break; + + case EOpClip: out.debug << "clip"; break; + case EOpIsFinite: out.debug << "isfinite"; break; + case EOpLog10: out.debug << "log10"; break; + case EOpRcp: out.debug << "rcp"; break; + case EOpSaturate: out.debug << "saturate"; break; + + case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break; + + case EOpMinInvocations: out.debug << "minInvocations"; break; + case EOpMaxInvocations: out.debug << "maxInvocations"; break; + case EOpAddInvocations: out.debug << "addInvocations"; break; + case EOpMinInvocationsNonUniform: out.debug << "minInvocationsNonUniform"; break; + case EOpMaxInvocationsNonUniform: out.debug << "maxInvocationsNonUniform"; break; + case EOpAddInvocationsNonUniform: out.debug << "addInvocationsNonUniform"; break; + + case EOpMinInvocationsInclusiveScan: out.debug << "minInvocationsInclusiveScan"; break; + case EOpMaxInvocationsInclusiveScan: out.debug << "maxInvocationsInclusiveScan"; break; + case EOpAddInvocationsInclusiveScan: out.debug << "addInvocationsInclusiveScan"; break; + case EOpMinInvocationsInclusiveScanNonUniform: out.debug << "minInvocationsInclusiveScanNonUniform"; break; + case EOpMaxInvocationsInclusiveScanNonUniform: out.debug << "maxInvocationsInclusiveScanNonUniform"; break; + case EOpAddInvocationsInclusiveScanNonUniform: out.debug << "addInvocationsInclusiveScanNonUniform"; break; + + case EOpMinInvocationsExclusiveScan: out.debug << "minInvocationsExclusiveScan"; break; + case EOpMaxInvocationsExclusiveScan: out.debug << "maxInvocationsExclusiveScan"; break; + case EOpAddInvocationsExclusiveScan: out.debug << "addInvocationsExclusiveScan"; break; + case EOpMinInvocationsExclusiveScanNonUniform: out.debug << "minInvocationsExclusiveScanNonUniform"; break; + case EOpMaxInvocationsExclusiveScanNonUniform: out.debug << "maxInvocationsExclusiveScanNonUniform"; break; + case EOpAddInvocationsExclusiveScanNonUniform: out.debug << "addInvocationsExclusiveScanNonUniform"; break; + + case EOpMbcnt: out.debug << "mbcnt"; break; + + case EOpFragmentMaskFetch: out.debug << "fragmentMaskFetchAMD"; break; + case EOpFragmentFetch: out.debug << "fragmentFetchAMD"; break; + + case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break; + case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break; + + case EOpSubpassLoad: out.debug << "subpassLoad"; break; + case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break; + + case EOpConstructReference: out.debug << "Construct reference type"; break; + + default: out.debug.message(EPrefixError, "Bad unary op"); + } + + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + TInfoSink& out = infoSink; + + if (node->getOp() == EOpNull) { + out.debug.message(EPrefixError, "node is still EOpNull!"); + return true; + } + + OutputTreeText(out, node, depth); + + switch (node->getOp()) { + case EOpSequence: out.debug << "Sequence\n"; return true; + case EOpLinkerObjects: out.debug << "Linker Objects\n"; return true; + case EOpComma: out.debug << "Comma"; break; + case EOpFunction: out.debug << "Function Definition: " << node->getName(); break; + case EOpFunctionCall: out.debug << "Function Call: " << node->getName(); break; + case EOpParameters: out.debug << "Function Parameters: "; break; + + case EOpConstructFloat: out.debug << "Construct float"; break; + case EOpConstructDouble:out.debug << "Construct double"; break; + + case EOpConstructVec2: out.debug << "Construct vec2"; break; + case EOpConstructVec3: out.debug << "Construct vec3"; break; + case EOpConstructVec4: out.debug << "Construct vec4"; break; + case EOpConstructDVec2: out.debug << "Construct dvec2"; break; + case EOpConstructDVec3: out.debug << "Construct dvec3"; break; + case EOpConstructDVec4: out.debug << "Construct dvec4"; break; + case EOpConstructBool: out.debug << "Construct bool"; break; + case EOpConstructBVec2: out.debug << "Construct bvec2"; break; + case EOpConstructBVec3: out.debug << "Construct bvec3"; break; + case EOpConstructBVec4: out.debug << "Construct bvec4"; break; + case EOpConstructInt8: out.debug << "Construct int8_t"; break; + case EOpConstructI8Vec2: out.debug << "Construct i8vec2"; break; + case EOpConstructI8Vec3: out.debug << "Construct i8vec3"; break; + case EOpConstructI8Vec4: out.debug << "Construct i8vec4"; break; + case EOpConstructInt: out.debug << "Construct int"; break; + case EOpConstructIVec2: out.debug << "Construct ivec2"; break; + case EOpConstructIVec3: out.debug << "Construct ivec3"; break; + case EOpConstructIVec4: out.debug << "Construct ivec4"; break; + case EOpConstructUint8: out.debug << "Construct uint8_t"; break; + case EOpConstructU8Vec2: out.debug << "Construct u8vec2"; break; + case EOpConstructU8Vec3: out.debug << "Construct u8vec3"; break; + case EOpConstructU8Vec4: out.debug << "Construct u8vec4"; break; + case EOpConstructUint: out.debug << "Construct uint"; break; + case EOpConstructUVec2: out.debug << "Construct uvec2"; break; + case EOpConstructUVec3: out.debug << "Construct uvec3"; break; + case EOpConstructUVec4: out.debug << "Construct uvec4"; break; + case EOpConstructInt64: out.debug << "Construct int64"; break; + case EOpConstructI64Vec2: out.debug << "Construct i64vec2"; break; + case EOpConstructI64Vec3: out.debug << "Construct i64vec3"; break; + case EOpConstructI64Vec4: out.debug << "Construct i64vec4"; break; + case EOpConstructUint64: out.debug << "Construct uint64"; break; + case EOpConstructU64Vec2: out.debug << "Construct u64vec2"; break; + case EOpConstructU64Vec3: out.debug << "Construct u64vec3"; break; + case EOpConstructU64Vec4: out.debug << "Construct u64vec4"; break; + case EOpConstructInt16: out.debug << "Construct int16_t"; break; + case EOpConstructI16Vec2: out.debug << "Construct i16vec2"; break; + case EOpConstructI16Vec3: out.debug << "Construct i16vec3"; break; + case EOpConstructI16Vec4: out.debug << "Construct i16vec4"; break; + case EOpConstructUint16: out.debug << "Construct uint16_t"; break; + case EOpConstructU16Vec2: out.debug << "Construct u16vec2"; break; + case EOpConstructU16Vec3: out.debug << "Construct u16vec3"; break; + case EOpConstructU16Vec4: out.debug << "Construct u16vec4"; break; + case EOpConstructMat2x2: out.debug << "Construct mat2"; break; + case EOpConstructMat2x3: out.debug << "Construct mat2x3"; break; + case EOpConstructMat2x4: out.debug << "Construct mat2x4"; break; + case EOpConstructMat3x2: out.debug << "Construct mat3x2"; break; + case EOpConstructMat3x3: out.debug << "Construct mat3"; break; + case EOpConstructMat3x4: out.debug << "Construct mat3x4"; break; + case EOpConstructMat4x2: out.debug << "Construct mat4x2"; break; + case EOpConstructMat4x3: out.debug << "Construct mat4x3"; break; + case EOpConstructMat4x4: out.debug << "Construct mat4"; break; + case EOpConstructDMat2x2: out.debug << "Construct dmat2"; break; + case EOpConstructDMat2x3: out.debug << "Construct dmat2x3"; break; + case EOpConstructDMat2x4: out.debug << "Construct dmat2x4"; break; + case EOpConstructDMat3x2: out.debug << "Construct dmat3x2"; break; + case EOpConstructDMat3x3: out.debug << "Construct dmat3"; break; + case EOpConstructDMat3x4: out.debug << "Construct dmat3x4"; break; + case EOpConstructDMat4x2: out.debug << "Construct dmat4x2"; break; + case EOpConstructDMat4x3: out.debug << "Construct dmat4x3"; break; + case EOpConstructDMat4x4: out.debug << "Construct dmat4"; break; + case EOpConstructIMat2x2: out.debug << "Construct imat2"; break; + case EOpConstructIMat2x3: out.debug << "Construct imat2x3"; break; + case EOpConstructIMat2x4: out.debug << "Construct imat2x4"; break; + case EOpConstructIMat3x2: out.debug << "Construct imat3x2"; break; + case EOpConstructIMat3x3: out.debug << "Construct imat3"; break; + case EOpConstructIMat3x4: out.debug << "Construct imat3x4"; break; + case EOpConstructIMat4x2: out.debug << "Construct imat4x2"; break; + case EOpConstructIMat4x3: out.debug << "Construct imat4x3"; break; + case EOpConstructIMat4x4: out.debug << "Construct imat4"; break; + case EOpConstructUMat2x2: out.debug << "Construct umat2"; break; + case EOpConstructUMat2x3: out.debug << "Construct umat2x3"; break; + case EOpConstructUMat2x4: out.debug << "Construct umat2x4"; break; + case EOpConstructUMat3x2: out.debug << "Construct umat3x2"; break; + case EOpConstructUMat3x3: out.debug << "Construct umat3"; break; + case EOpConstructUMat3x4: out.debug << "Construct umat3x4"; break; + case EOpConstructUMat4x2: out.debug << "Construct umat4x2"; break; + case EOpConstructUMat4x3: out.debug << "Construct umat4x3"; break; + case EOpConstructUMat4x4: out.debug << "Construct umat4"; break; + case EOpConstructBMat2x2: out.debug << "Construct bmat2"; break; + case EOpConstructBMat2x3: out.debug << "Construct bmat2x3"; break; + case EOpConstructBMat2x4: out.debug << "Construct bmat2x4"; break; + case EOpConstructBMat3x2: out.debug << "Construct bmat3x2"; break; + case EOpConstructBMat3x3: out.debug << "Construct bmat3"; break; + case EOpConstructBMat3x4: out.debug << "Construct bmat3x4"; break; + case EOpConstructBMat4x2: out.debug << "Construct bmat4x2"; break; + case EOpConstructBMat4x3: out.debug << "Construct bmat4x3"; break; + case EOpConstructBMat4x4: out.debug << "Construct bmat4"; break; + case EOpConstructFloat16: out.debug << "Construct float16_t"; break; + case EOpConstructF16Vec2: out.debug << "Construct f16vec2"; break; + case EOpConstructF16Vec3: out.debug << "Construct f16vec3"; break; + case EOpConstructF16Vec4: out.debug << "Construct f16vec4"; break; + case EOpConstructF16Mat2x2: out.debug << "Construct f16mat2"; break; + case EOpConstructF16Mat2x3: out.debug << "Construct f16mat2x3"; break; + case EOpConstructF16Mat2x4: out.debug << "Construct f16mat2x4"; break; + case EOpConstructF16Mat3x2: out.debug << "Construct f16mat3x2"; break; + case EOpConstructF16Mat3x3: out.debug << "Construct f16mat3"; break; + case EOpConstructF16Mat3x4: out.debug << "Construct f16mat3x4"; break; + case EOpConstructF16Mat4x2: out.debug << "Construct f16mat4x2"; break; + case EOpConstructF16Mat4x3: out.debug << "Construct f16mat4x3"; break; + case EOpConstructF16Mat4x4: out.debug << "Construct f16mat4"; break; + case EOpConstructStruct: out.debug << "Construct structure"; break; + case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break; + case EOpConstructReference: out.debug << "Construct reference"; break; + case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break; + + case EOpLessThan: out.debug << "Compare Less Than"; break; + case EOpGreaterThan: out.debug << "Compare Greater Than"; break; + case EOpLessThanEqual: out.debug << "Compare Less Than or Equal"; break; + case EOpGreaterThanEqual: out.debug << "Compare Greater Than or Equal"; break; + case EOpVectorEqual: out.debug << "Equal"; break; + case EOpVectorNotEqual: out.debug << "NotEqual"; break; + + case EOpMod: out.debug << "mod"; break; + case EOpModf: out.debug << "modf"; break; + case EOpPow: out.debug << "pow"; break; + + case EOpAtan: out.debug << "arc tangent"; break; + + case EOpMin: out.debug << "min"; break; + case EOpMax: out.debug << "max"; break; + case EOpClamp: out.debug << "clamp"; break; + case EOpMix: out.debug << "mix"; break; + case EOpStep: out.debug << "step"; break; + case EOpSmoothStep: out.debug << "smoothstep"; break; + + case EOpDistance: out.debug << "distance"; break; + case EOpDot: out.debug << "dot-product"; break; + case EOpCross: out.debug << "cross-product"; break; + case EOpFaceForward: out.debug << "face-forward"; break; + case EOpReflect: out.debug << "reflect"; break; + case EOpRefract: out.debug << "refract"; break; + case EOpMul: out.debug << "component-wise multiply"; break; + case EOpOuterProduct: out.debug << "outer product"; break; + + case EOpEmitVertex: out.debug << "EmitVertex"; break; + case EOpEndPrimitive: out.debug << "EndPrimitive"; break; + + case EOpBarrier: out.debug << "Barrier"; break; + case EOpMemoryBarrier: out.debug << "MemoryBarrier"; break; + case EOpMemoryBarrierAtomicCounter: out.debug << "MemoryBarrierAtomicCounter"; break; + case EOpMemoryBarrierBuffer: out.debug << "MemoryBarrierBuffer"; break; + case EOpMemoryBarrierImage: out.debug << "MemoryBarrierImage"; break; + case EOpMemoryBarrierShared: out.debug << "MemoryBarrierShared"; break; + case EOpGroupMemoryBarrier: out.debug << "GroupMemoryBarrier"; break; + + case EOpReadInvocation: out.debug << "readInvocation"; break; + + case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break; + case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break; + case EOpWriteInvocation: out.debug << "writeInvocation"; break; + + case EOpMin3: out.debug << "min3"; break; + case EOpMax3: out.debug << "max3"; break; + case EOpMid3: out.debug << "mid3"; break; + case EOpTime: out.debug << "time"; break; + + case EOpAtomicAdd: out.debug << "AtomicAdd"; break; + case EOpAtomicMin: out.debug << "AtomicMin"; break; + case EOpAtomicMax: out.debug << "AtomicMax"; break; + case EOpAtomicAnd: out.debug << "AtomicAnd"; break; + case EOpAtomicOr: out.debug << "AtomicOr"; break; + case EOpAtomicXor: out.debug << "AtomicXor"; break; + case EOpAtomicExchange: out.debug << "AtomicExchange"; break; + case EOpAtomicCompSwap: out.debug << "AtomicCompSwap"; break; + case EOpAtomicLoad: out.debug << "AtomicLoad"; break; + case EOpAtomicStore: out.debug << "AtomicStore"; break; + + case EOpAtomicCounterAdd: out.debug << "AtomicCounterAdd"; break; + case EOpAtomicCounterSubtract: out.debug << "AtomicCounterSubtract"; break; + case EOpAtomicCounterMin: out.debug << "AtomicCounterMin"; break; + case EOpAtomicCounterMax: out.debug << "AtomicCounterMax"; break; + case EOpAtomicCounterAnd: out.debug << "AtomicCounterAnd"; break; + case EOpAtomicCounterOr: out.debug << "AtomicCounterOr"; break; + case EOpAtomicCounterXor: out.debug << "AtomicCounterXor"; break; + case EOpAtomicCounterExchange: out.debug << "AtomicCounterExchange"; break; + case EOpAtomicCounterCompSwap: out.debug << "AtomicCounterCompSwap"; break; + + case EOpImageQuerySize: out.debug << "imageQuerySize"; break; + case EOpImageQuerySamples: out.debug << "imageQuerySamples"; break; + case EOpImageLoad: out.debug << "imageLoad"; break; + case EOpImageStore: out.debug << "imageStore"; break; + case EOpImageAtomicAdd: out.debug << "imageAtomicAdd"; break; + case EOpImageAtomicMin: out.debug << "imageAtomicMin"; break; + case EOpImageAtomicMax: out.debug << "imageAtomicMax"; break; + case EOpImageAtomicAnd: out.debug << "imageAtomicAnd"; break; + case EOpImageAtomicOr: out.debug << "imageAtomicOr"; break; + case EOpImageAtomicXor: out.debug << "imageAtomicXor"; break; + case EOpImageAtomicExchange: out.debug << "imageAtomicExchange"; break; + case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break; + case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break; + case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break; + case EOpImageLoadLod: out.debug << "imageLoadLod"; break; + case EOpImageStoreLod: out.debug << "imageStoreLod"; break; + + case EOpTextureQuerySize: out.debug << "textureSize"; break; + case EOpTextureQueryLod: out.debug << "textureQueryLod"; break; + case EOpTextureQueryLevels: out.debug << "textureQueryLevels"; break; + case EOpTextureQuerySamples: out.debug << "textureSamples"; break; + case EOpTexture: out.debug << "texture"; break; + case EOpTextureProj: out.debug << "textureProj"; break; + case EOpTextureLod: out.debug << "textureLod"; break; + case EOpTextureOffset: out.debug << "textureOffset"; break; + case EOpTextureFetch: out.debug << "textureFetch"; break; + case EOpTextureFetchOffset: out.debug << "textureFetchOffset"; break; + case EOpTextureProjOffset: out.debug << "textureProjOffset"; break; + case EOpTextureLodOffset: out.debug << "textureLodOffset"; break; + case EOpTextureProjLod: out.debug << "textureProjLod"; break; + case EOpTextureProjLodOffset: out.debug << "textureProjLodOffset"; break; + case EOpTextureGrad: out.debug << "textureGrad"; break; + case EOpTextureGradOffset: out.debug << "textureGradOffset"; break; + case EOpTextureProjGrad: out.debug << "textureProjGrad"; break; + case EOpTextureProjGradOffset: out.debug << "textureProjGradOffset"; break; + case EOpTextureGather: out.debug << "textureGather"; break; + case EOpTextureGatherOffset: out.debug << "textureGatherOffset"; break; + case EOpTextureGatherOffsets: out.debug << "textureGatherOffsets"; break; + case EOpTextureClamp: out.debug << "textureClamp"; break; + case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break; + case EOpTextureGradClamp: out.debug << "textureGradClamp"; break; + case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break; + case EOpTextureGatherLod: out.debug << "textureGatherLod"; break; + case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break; + case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break; + + case EOpSparseTexture: out.debug << "sparseTexture"; break; + case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break; + case EOpSparseTextureLod: out.debug << "sparseTextureLod"; break; + case EOpSparseTextureLodOffset: out.debug << "sparseTextureLodOffset"; break; + case EOpSparseTextureFetch: out.debug << "sparseTexelFetch"; break; + case EOpSparseTextureFetchOffset: out.debug << "sparseTexelFetchOffset"; break; + case EOpSparseTextureGrad: out.debug << "sparseTextureGrad"; break; + case EOpSparseTextureGradOffset: out.debug << "sparseTextureGradOffset"; break; + case EOpSparseTextureGather: out.debug << "sparseTextureGather"; break; + case EOpSparseTextureGatherOffset: out.debug << "sparseTextureGatherOffset"; break; + case EOpSparseTextureGatherOffsets: out.debug << "sparseTextureGatherOffsets"; break; + case EOpSparseImageLoad: out.debug << "sparseImageLoad"; break; + case EOpSparseTextureClamp: out.debug << "sparseTextureClamp"; break; + case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break; + case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break; + case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break; + case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break; + case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break; + case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break; + case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break; + case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break; + case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break; + case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break; + case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break; + case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break; + case EOpAddCarry: out.debug << "addCarry"; break; + case EOpSubBorrow: out.debug << "subBorrow"; break; + case EOpUMulExtended: out.debug << "uMulExtended"; break; + case EOpIMulExtended: out.debug << "iMulExtended"; break; + case EOpBitfieldExtract: out.debug << "bitfieldExtract"; break; + case EOpBitfieldInsert: out.debug << "bitfieldInsert"; break; + + case EOpFma: out.debug << "fma"; break; + case EOpFrexp: out.debug << "frexp"; break; + case EOpLdexp: out.debug << "ldexp"; break; + + case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break; + case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break; + case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break; + + case EOpSinCos: out.debug << "sincos"; break; + case EOpGenMul: out.debug << "mul"; break; + + case EOpAllMemoryBarrierWithGroupSync: out.debug << "AllMemoryBarrierWithGroupSync"; break; + case EOpDeviceMemoryBarrier: out.debug << "DeviceMemoryBarrier"; break; + case EOpDeviceMemoryBarrierWithGroupSync: out.debug << "DeviceMemoryBarrierWithGroupSync"; break; + case EOpWorkgroupMemoryBarrier: out.debug << "WorkgroupMemoryBarrier"; break; + case EOpWorkgroupMemoryBarrierWithGroupSync: out.debug << "WorkgroupMemoryBarrierWithGroupSync"; break; + + case EOpSubgroupBarrier: out.debug << "subgroupBarrier"; break; + case EOpSubgroupMemoryBarrier: out.debug << "subgroupMemoryBarrier"; break; + case EOpSubgroupMemoryBarrierBuffer: out.debug << "subgroupMemoryBarrierBuffer"; break; + case EOpSubgroupMemoryBarrierImage: out.debug << "subgroupMemoryBarrierImage"; break; + case EOpSubgroupMemoryBarrierShared: out.debug << "subgroupMemoryBarrierShared"; break; + case EOpSubgroupElect: out.debug << "subgroupElect"; break; + case EOpSubgroupAll: out.debug << "subgroupAll"; break; + case EOpSubgroupAny: out.debug << "subgroupAny"; break; + case EOpSubgroupAllEqual: out.debug << "subgroupAllEqual"; break; + case EOpSubgroupBroadcast: out.debug << "subgroupBroadcast"; break; + case EOpSubgroupBroadcastFirst: out.debug << "subgroupBroadcastFirst"; break; + case EOpSubgroupBallot: out.debug << "subgroupBallot"; break; + case EOpSubgroupInverseBallot: out.debug << "subgroupInverseBallot"; break; + case EOpSubgroupBallotBitExtract: out.debug << "subgroupBallotBitExtract"; break; + case EOpSubgroupBallotBitCount: out.debug << "subgroupBallotBitCount"; break; + case EOpSubgroupBallotInclusiveBitCount: out.debug << "subgroupBallotInclusiveBitCount"; break; + case EOpSubgroupBallotExclusiveBitCount: out.debug << "subgroupBallotExclusiveBitCount"; break; + case EOpSubgroupBallotFindLSB: out.debug << "subgroupBallotFindLSB"; break; + case EOpSubgroupBallotFindMSB: out.debug << "subgroupBallotFindMSB"; break; + case EOpSubgroupShuffle: out.debug << "subgroupShuffle"; break; + case EOpSubgroupShuffleXor: out.debug << "subgroupShuffleXor"; break; + case EOpSubgroupShuffleUp: out.debug << "subgroupShuffleUp"; break; + case EOpSubgroupShuffleDown: out.debug << "subgroupShuffleDown"; break; + case EOpSubgroupAdd: out.debug << "subgroupAdd"; break; + case EOpSubgroupMul: out.debug << "subgroupMul"; break; + case EOpSubgroupMin: out.debug << "subgroupMin"; break; + case EOpSubgroupMax: out.debug << "subgroupMax"; break; + case EOpSubgroupAnd: out.debug << "subgroupAnd"; break; + case EOpSubgroupOr: out.debug << "subgroupOr"; break; + case EOpSubgroupXor: out.debug << "subgroupXor"; break; + case EOpSubgroupInclusiveAdd: out.debug << "subgroupInclusiveAdd"; break; + case EOpSubgroupInclusiveMul: out.debug << "subgroupInclusiveMul"; break; + case EOpSubgroupInclusiveMin: out.debug << "subgroupInclusiveMin"; break; + case EOpSubgroupInclusiveMax: out.debug << "subgroupInclusiveMax"; break; + case EOpSubgroupInclusiveAnd: out.debug << "subgroupInclusiveAnd"; break; + case EOpSubgroupInclusiveOr: out.debug << "subgroupInclusiveOr"; break; + case EOpSubgroupInclusiveXor: out.debug << "subgroupInclusiveXor"; break; + case EOpSubgroupExclusiveAdd: out.debug << "subgroupExclusiveAdd"; break; + case EOpSubgroupExclusiveMul: out.debug << "subgroupExclusiveMul"; break; + case EOpSubgroupExclusiveMin: out.debug << "subgroupExclusiveMin"; break; + case EOpSubgroupExclusiveMax: out.debug << "subgroupExclusiveMax"; break; + case EOpSubgroupExclusiveAnd: out.debug << "subgroupExclusiveAnd"; break; + case EOpSubgroupExclusiveOr: out.debug << "subgroupExclusiveOr"; break; + case EOpSubgroupExclusiveXor: out.debug << "subgroupExclusiveXor"; break; + case EOpSubgroupClusteredAdd: out.debug << "subgroupClusteredAdd"; break; + case EOpSubgroupClusteredMul: out.debug << "subgroupClusteredMul"; break; + case EOpSubgroupClusteredMin: out.debug << "subgroupClusteredMin"; break; + case EOpSubgroupClusteredMax: out.debug << "subgroupClusteredMax"; break; + case EOpSubgroupClusteredAnd: out.debug << "subgroupClusteredAnd"; break; + case EOpSubgroupClusteredOr: out.debug << "subgroupClusteredOr"; break; + case EOpSubgroupClusteredXor: out.debug << "subgroupClusteredXor"; break; + case EOpSubgroupQuadBroadcast: out.debug << "subgroupQuadBroadcast"; break; + case EOpSubgroupQuadSwapHorizontal: out.debug << "subgroupQuadSwapHorizontal"; break; + case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break; + case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break; + + case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break; + case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break; + case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break; + case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break; + case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break; + case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break; + case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break; + case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break; + case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break; + case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break; + case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break; + case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break; + case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break; + case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break; + case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break; + case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break; + case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break; + case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break; + case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break; + case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break; + case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break; + case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break; + + case EOpSubpassLoad: out.debug << "subpassLoad"; break; + case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break; + + case EOpTrace: out.debug << "traceNV"; break; + case EOpReportIntersection: out.debug << "reportIntersectionNV"; break; + case EOpIgnoreIntersection: out.debug << "ignoreIntersectionNV"; break; + case EOpTerminateRay: out.debug << "terminateRayNV"; break; + case EOpExecuteCallable: out.debug << "executeCallableNV"; break; + case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break; + + case EOpRayQueryInitialize: out.debug << "rayQueryInitializeEXT"; break; + case EOpRayQueryTerminate: out.debug << "rayQueryTerminateEXT"; break; + case EOpRayQueryGenerateIntersection: out.debug << "rayQueryGenerateIntersectionEXT"; break; + case EOpRayQueryConfirmIntersection: out.debug << "rayQueryConfirmIntersectionEXT"; break; + case EOpRayQueryProceed: out.debug << "rayQueryProceedEXT"; break; + case EOpRayQueryGetIntersectionType: out.debug << "rayQueryGetIntersectionTypeEXT"; break; + case EOpRayQueryGetRayTMin: out.debug << "rayQueryGetRayTMinEXT"; break; + case EOpRayQueryGetRayFlags: out.debug << "rayQueryGetRayFlagsEXT"; break; + case EOpRayQueryGetIntersectionT: out.debug << "rayQueryGetIntersectionTEXT"; break; + case EOpRayQueryGetIntersectionInstanceCustomIndex: out.debug << "rayQueryGetIntersectionInstanceCustomIndexEXT"; break; + case EOpRayQueryGetIntersectionInstanceId: out.debug << "rayQueryGetIntersectionInstanceIdEXT"; break; + case EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset: out.debug << "rayQueryGetIntersectionInstanceShaderBindingTableRecordOffsetEXT"; break; + case EOpRayQueryGetIntersectionGeometryIndex: out.debug << "rayQueryGetIntersectionGeometryIndexEXT"; break; + case EOpRayQueryGetIntersectionPrimitiveIndex: out.debug << "rayQueryGetIntersectionPrimitiveIndexEXT"; break; + case EOpRayQueryGetIntersectionBarycentrics: out.debug << "rayQueryGetIntersectionBarycentricsEXT"; break; + case EOpRayQueryGetIntersectionFrontFace: out.debug << "rayQueryGetIntersectionFrontFaceEXT"; break; + case EOpRayQueryGetIntersectionCandidateAABBOpaque: out.debug << "rayQueryGetIntersectionCandidateAABBOpaqueEXT"; break; + case EOpRayQueryGetIntersectionObjectRayDirection: out.debug << "rayQueryGetIntersectionObjectRayDirectionEXT"; break; + case EOpRayQueryGetIntersectionObjectRayOrigin: out.debug << "rayQueryGetIntersectionObjectRayOriginEXT"; break; + case EOpRayQueryGetWorldRayDirection: out.debug << "rayQueryGetWorldRayDirectionEXT"; break; + case EOpRayQueryGetWorldRayOrigin: out.debug << "rayQueryGetWorldRayOriginEXT"; break; + case EOpRayQueryGetIntersectionObjectToWorld: out.debug << "rayQueryGetIntersectionObjectToWorldEXT"; break; + case EOpRayQueryGetIntersectionWorldToObject: out.debug << "rayQueryGetIntersectionWorldToObjectEXT"; break; + + case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break; + case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break; + case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break; + + case EOpIsHelperInvocation: out.debug << "IsHelperInvocation"; break; + case EOpDebugPrintf: out.debug << "Debug printf"; break; + + default: out.debug.message(EPrefixError, "Bad aggregation op"); + } + + if (node->getOp() != EOpSequence && node->getOp() != EOpParameters) + out.debug << " (" << node->getCompleteString() << ")"; + + out.debug << "\n"; + + return true; +} + +bool TOutputTraverser::visitSelection(TVisit /* visit */, TIntermSelection* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + out.debug << "Test condition and select"; + out.debug << " (" << node->getCompleteString() << ")"; + + if (node->getShortCircuit() == false) + out.debug << ": no shortcircuit"; + if (node->getFlatten()) + out.debug << ": Flatten"; + if (node->getDontFlatten()) + out.debug << ": DontFlatten"; + out.debug << "\n"; + + ++depth; + + OutputTreeText(out, node, depth); + out.debug << "Condition\n"; + node->getCondition()->traverse(this); + + OutputTreeText(out, node, depth); + if (node->getTrueBlock()) { + out.debug << "true case\n"; + node->getTrueBlock()->traverse(this); + } else + out.debug << "true case is null\n"; + + if (node->getFalseBlock()) { + OutputTreeText(out, node, depth); + out.debug << "false case\n"; + node->getFalseBlock()->traverse(this); + } + + --depth; + + return false; +} + +// Print infinities and NaNs, and numbers in a portable way. +// Goals: +// - portable (across IEEE 754 platforms) +// - shows all possible IEEE values +// - shows simple numbers in a simple way, e.g., no leading/trailing 0s +// - shows all digits, no premature rounding +static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraOutput extra) +{ + if (IsInfinity(value)) { + if (value < 0) + out.debug << "-1.#INF"; + else + out.debug << "+1.#INF"; + } else if (IsNan(value)) + out.debug << "1.#IND"; + else { + const int maxSize = 340; + char buf[maxSize]; + const char* format = "%f"; + if (fabs(value) > 0.0 && (fabs(value) < 1e-5 || fabs(value) > 1e12)) + format = "%-.13e"; + int len = snprintf(buf, maxSize, format, value); + assert(len < maxSize); + + // remove a leading zero in the 100s slot in exponent; it is not portable + // pattern: XX...XXXe+0XX or XX...XXXe-0XX + if (len > 5) { + if (buf[len-5] == 'e' && (buf[len-4] == '+' || buf[len-4] == '-') && buf[len-3] == '0') { + buf[len-3] = buf[len-2]; + buf[len-2] = buf[len-1]; + buf[len-1] = '\0'; + } + } + + out.debug << buf; + + switch (extra) { + case TOutputTraverser::BinaryDoubleOutput: + { + uint64_t b; + static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)"); + memcpy(&b, &value, sizeof(b)); + + out.debug << " : "; + for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) { + out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0"); + b <<= 1; + } + break; + } + default: + break; + } + } +} + +static void OutputConstantUnion(TInfoSink& out, const TIntermTyped* node, const TConstUnionArray& constUnion, + TOutputTraverser::EExtraOutput extra, int depth) +{ + int size = node->getType().computeNumComponents(); + + for (int i = 0; i < size; i++) { + OutputTreeText(out, node, depth); + switch (constUnion[i].getType()) { + case EbtBool: + if (constUnion[i].getBConst()) + out.debug << "true"; + else + out.debug << "false"; + + out.debug << " (" << "const bool" << ")"; + + out.debug << "\n"; + break; + case EbtFloat: + case EbtDouble: + case EbtFloat16: + OutputDouble(out, constUnion[i].getDConst(), extra); + out.debug << "\n"; + break; + case EbtInt8: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI8Const(), "const int8_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint8: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU8Const(), "const uint8_t"); + + out.debug << buf << "\n"; + } + break; + case EbtInt16: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getI16Const(), "const int16_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint16: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getU16Const(), "const uint16_t"); + + out.debug << buf << "\n"; + } + break; + case EbtInt: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%d (%s)", constUnion[i].getIConst(), "const int"); + + out.debug << buf << "\n"; + } + break; + case EbtUint: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%u (%s)", constUnion[i].getUConst(), "const uint"); + + out.debug << buf << "\n"; + } + break; + case EbtInt64: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%lld (%s)", constUnion[i].getI64Const(), "const int64_t"); + + out.debug << buf << "\n"; + } + break; + case EbtUint64: + { + const int maxSize = 300; + char buf[maxSize]; + snprintf(buf, maxSize, "%llu (%s)", constUnion[i].getU64Const(), "const uint64_t"); + + out.debug << buf << "\n"; + } + break; + default: + out.info.message(EPrefixInternalError, "Unknown constant", node->getLoc()); + break; + } + } +} + +void TOutputTraverser::visitConstantUnion(TIntermConstantUnion* node) +{ + OutputTreeText(infoSink, node, depth); + infoSink.debug << "Constant:\n"; + + OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1); +} + +void TOutputTraverser::visitSymbol(TIntermSymbol* node) +{ + OutputTreeText(infoSink, node, depth); + + infoSink.debug << "'" << node->getName() << "' (" << node->getCompleteString() << ")\n"; + + if (! node->getConstArray().empty()) + OutputConstantUnion(infoSink, node, node->getConstArray(), extraOutput, depth + 1); + else if (node->getConstSubtree()) { + incrementDepth(node); + node->getConstSubtree()->traverse(this); + decrementDepth(); + } +} + +bool TOutputTraverser::visitLoop(TVisit /* visit */, TIntermLoop* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + out.debug << "Loop with condition "; + if (! node->testFirst()) + out.debug << "not "; + out.debug << "tested first"; + + if (node->getUnroll()) + out.debug << ": Unroll"; + if (node->getDontUnroll()) + out.debug << ": DontUnroll"; + if (node->getLoopDependency()) { + out.debug << ": Dependency "; + out.debug << node->getLoopDependency(); + } + out.debug << "\n"; + + ++depth; + + OutputTreeText(infoSink, node, depth); + if (node->getTest()) { + out.debug << "Loop Condition\n"; + node->getTest()->traverse(this); + } else + out.debug << "No loop condition\n"; + + OutputTreeText(infoSink, node, depth); + if (node->getBody()) { + out.debug << "Loop Body\n"; + node->getBody()->traverse(this); + } else + out.debug << "No loop body\n"; + + if (node->getTerminal()) { + OutputTreeText(infoSink, node, depth); + out.debug << "Loop Terminal Expression\n"; + node->getTerminal()->traverse(this); + } + + --depth; + + return false; +} + +bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + + switch (node->getFlowOp()) { + case EOpKill: out.debug << "Branch: Kill"; break; + case EOpBreak: out.debug << "Branch: Break"; break; + case EOpContinue: out.debug << "Branch: Continue"; break; + case EOpReturn: out.debug << "Branch: Return"; break; + case EOpCase: out.debug << "case: "; break; + case EOpDemote: out.debug << "Demote"; break; + case EOpDefault: out.debug << "default: "; break; + default: out.debug << "Branch: Unknown Branch"; break; + } + + if (node->getExpression()) { + out.debug << " with expression\n"; + ++depth; + node->getExpression()->traverse(this); + --depth; + } else + out.debug << "\n"; + + return false; +} + +bool TOutputTraverser::visitSwitch(TVisit /* visit */, TIntermSwitch* node) +{ + TInfoSink& out = infoSink; + + OutputTreeText(out, node, depth); + out.debug << "switch"; + + if (node->getFlatten()) + out.debug << ": Flatten"; + if (node->getDontFlatten()) + out.debug << ": DontFlatten"; + out.debug << "\n"; + + OutputTreeText(out, node, depth); + out.debug << "condition\n"; + ++depth; + node->getCondition()->traverse(this); + + --depth; + OutputTreeText(out, node, depth); + out.debug << "body\n"; + ++depth; + node->getBody()->traverse(this); + + --depth; + + return false; +} + +// +// This function is the one to call externally to start the traversal. +// Individual functions can be initialized to 0 to skip processing of that +// type of node. It's children will still be processed. +// +void TIntermediate::output(TInfoSink& infoSink, bool tree) +{ + infoSink.debug << "Shader version: " << version << "\n"; + if (requestedExtensions.size() > 0) { + for (auto extIt = requestedExtensions.begin(); extIt != requestedExtensions.end(); ++extIt) + infoSink.debug << "Requested " << *extIt << "\n"; + } + + if (xfbMode) + infoSink.debug << "in xfb mode\n"; + + switch (language) { + case EShLangVertex: + break; + + case EShLangTessControl: + infoSink.debug << "vertices = " << vertices << "\n"; + + if (inputPrimitive != ElgNone) + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + if (vertexSpacing != EvsNone) + infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n"; + if (vertexOrder != EvoNone) + infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n"; + break; + + case EShLangTessEvaluation: + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + infoSink.debug << "vertex spacing = " << TQualifier::getVertexSpacingString(vertexSpacing) << "\n"; + infoSink.debug << "triangle order = " << TQualifier::getVertexOrderString(vertexOrder) << "\n"; + if (pointMode) + infoSink.debug << "using point mode\n"; + break; + + case EShLangGeometry: + infoSink.debug << "invocations = " << invocations << "\n"; + infoSink.debug << "max_vertices = " << vertices << "\n"; + infoSink.debug << "input primitive = " << TQualifier::getGeometryString(inputPrimitive) << "\n"; + infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n"; + break; + + case EShLangFragment: + if (pixelCenterInteger) + infoSink.debug << "gl_FragCoord pixel center is integer\n"; + if (originUpperLeft) + infoSink.debug << "gl_FragCoord origin is upper left\n"; + if (earlyFragmentTests) + infoSink.debug << "using early_fragment_tests\n"; + if (postDepthCoverage) + infoSink.debug << "using post_depth_coverage\n"; + if (depthLayout != EldNone) + infoSink.debug << "using " << TQualifier::getLayoutDepthString(depthLayout) << "\n"; + if (blendEquations != 0) { + infoSink.debug << "using"; + // blendEquations is a mask, decode it + for (TBlendEquationShift be = (TBlendEquationShift)0; be < EBlendCount; be = (TBlendEquationShift)(be + 1)) { + if (blendEquations & (1 << be)) + infoSink.debug << " " << TQualifier::getBlendEquationString(be); + } + infoSink.debug << "\n"; + } + if (interlockOrdering != EioNone) + infoSink.debug << "interlock ordering = " << TQualifier::getInterlockOrderingString(interlockOrdering) << "\n"; + break; + + case EShLangMeshNV: + infoSink.debug << "max_vertices = " << vertices << "\n"; + infoSink.debug << "max_primitives = " << primitives << "\n"; + infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n"; + // Fall through + case EShLangTaskNV: + // Fall through + case EShLangCompute: + infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n"; + { + if (localSizeSpecId[0] != TQualifier::layoutNotSet || + localSizeSpecId[1] != TQualifier::layoutNotSet || + localSizeSpecId[2] != TQualifier::layoutNotSet) { + infoSink.debug << "local_size ids = (" << + localSizeSpecId[0] << ", " << + localSizeSpecId[1] << ", " << + localSizeSpecId[2] << ")\n"; + } + } + break; + + default: + break; + } + + if (treeRoot == 0 || ! tree) + return; + + TOutputTraverser it(infoSink); + if (getBinaryDoubleOutput()) + it.setDoubleOutput(TOutputTraverser::BinaryDoubleOutput); + treeRoot->traverse(&it); +} + +} // end namespace glslang + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE diff --git a/src/third_party/glslang/glslang/MachineIndependent/iomapper.cpp b/src/third_party/glslang/glslang/MachineIndependent/iomapper.cpp new file mode 100644 index 0000000..905cf65 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/iomapper.cpp @@ -0,0 +1,1291 @@ +// +// Copyright (C) 2016-2017 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#include "../Include/Common.h" +#include "../Include/InfoSink.h" + +#include "gl_types.h" +#include "iomapper.h" + +// +// Map IO bindings. +// +// High-level algorithm for one stage: +// +// 1. Traverse all code (live+dead) to find the explicitly provided bindings. +// +// 2. Traverse (just) the live code to determine which non-provided bindings +// require auto-numbering. We do not auto-number dead ones. +// +// 3. Traverse all the code to apply the bindings: +// a. explicitly given bindings are offset according to their type +// b. implicit live bindings are auto-numbered into the holes, using +// any open binding slot. +// c. implicit dead bindings are left un-bound. +// + +namespace glslang { + +class TVarGatherTraverser : public TLiveTraverser { +public: + TVarGatherTraverser(const TIntermediate& i, bool traverseDeadCode, TVarLiveMap& inList, TVarLiveMap& outList, TVarLiveMap& uniformList) + : TLiveTraverser(i, traverseDeadCode, true, true, false) + , inputList(inList) + , outputList(outList) + , uniformList(uniformList) + { + } + + virtual void visitSymbol(TIntermSymbol* base) + { + TVarLiveMap* target = nullptr; + if (base->getQualifier().storage == EvqVaryingIn) + target = &inputList; + else if (base->getQualifier().storage == EvqVaryingOut) + target = &outputList; + else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().isPushConstant()) + target = &uniformList; + // If a global is being visited, then we should also traverse it incase it's evaluation + // ends up visiting inputs we want to tag as live + else if (base->getQualifier().storage == EvqGlobal) + addGlobalReference(base->getName()); + + if (target) { + TVarEntryInfo ent = {base->getId(), base, ! traverseAll}; + ent.stage = intermediate.getStage(); + TVarLiveMap::iterator at = target->find( + ent.symbol->getName()); // std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById()); + if (at != target->end() && at->second.id == ent.id) + at->second.live = at->second.live || ! traverseAll; // update live state + else + (*target)[ent.symbol->getName()] = ent; + } + } + +private: + TVarLiveMap& inputList; + TVarLiveMap& outputList; + TVarLiveMap& uniformList; +}; + +class TVarSetTraverser : public TLiveTraverser +{ +public: + TVarSetTraverser(const TIntermediate& i, const TVarLiveMap& inList, const TVarLiveMap& outList, const TVarLiveMap& uniformList) + : TLiveTraverser(i, true, true, true, false) + , inputList(inList) + , outputList(outList) + , uniformList(uniformList) + { + } + + virtual void visitSymbol(TIntermSymbol* base) { + const TVarLiveMap* source; + if (base->getQualifier().storage == EvqVaryingIn) + source = &inputList; + else if (base->getQualifier().storage == EvqVaryingOut) + source = &outputList; + else if (base->getQualifier().isUniformOrBuffer()) + source = &uniformList; + else + return; + + TVarEntryInfo ent = { base->getId() }; + TVarLiveMap::const_iterator at = source->find(base->getName()); + if (at == source->end()) + return; + + if (at->second.id != ent.id) + return; + + if (at->second.newBinding != -1) + base->getWritableType().getQualifier().layoutBinding = at->second.newBinding; + if (at->second.newSet != -1) + base->getWritableType().getQualifier().layoutSet = at->second.newSet; + if (at->second.newLocation != -1) + base->getWritableType().getQualifier().layoutLocation = at->second.newLocation; + if (at->second.newComponent != -1) + base->getWritableType().getQualifier().layoutComponent = at->second.newComponent; + if (at->second.newIndex != -1) + base->getWritableType().getQualifier().layoutIndex = at->second.newIndex; + } + + private: + const TVarLiveMap& inputList; + const TVarLiveMap& outputList; + const TVarLiveMap& uniformList; +}; + +struct TNotifyUniformAdaptor +{ + EShLanguage stage; + TIoMapResolver& resolver; + inline TNotifyUniformAdaptor(EShLanguage s, TIoMapResolver& r) + : stage(s) + , resolver(r) + { + } + + inline void operator()(std::pair& entKey) + { + resolver.notifyBinding(stage, entKey.second); + } + +private: + TNotifyUniformAdaptor& operator=(TNotifyUniformAdaptor&) = delete; +}; + +struct TNotifyInOutAdaptor +{ + EShLanguage stage; + TIoMapResolver& resolver; + inline TNotifyInOutAdaptor(EShLanguage s, TIoMapResolver& r) + : stage(s) + , resolver(r) + { + } + + inline void operator()(std::pair& entKey) + { + resolver.notifyInOut(stage, entKey.second); + } + +private: + TNotifyInOutAdaptor& operator=(TNotifyInOutAdaptor&) = delete; +}; + +struct TResolverUniformAdaptor { + TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e) + : stage(s) + , resolver(r) + , infoSink(i) + , error(e) + { + } + + inline void operator()(std::pair& entKey) { + TVarEntryInfo& ent = entKey.second; + ent.newLocation = -1; + ent.newComponent = -1; + ent.newBinding = -1; + ent.newSet = -1; + ent.newIndex = -1; + const bool isValid = resolver.validateBinding(stage, ent); + if (isValid) { + resolver.resolveBinding(stage, ent); + resolver.resolveSet(stage, ent); + resolver.resolveUniformLocation(stage, ent); + + if (ent.newBinding != -1) { + if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) { + TString err = "mapped binding out of range: " + entKey.first; + + infoSink.info.message(EPrefixInternalError, err.c_str()); + error = true; + } + } + if (ent.newSet != -1) { + if (ent.newSet >= int(TQualifier::layoutSetEnd)) { + TString err = "mapped set out of range: " + entKey.first; + + infoSink.info.message(EPrefixInternalError, err.c_str()); + error = true; + } + } + } else { + TString errorMsg = "Invalid binding: " + entKey.first; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + error = true; + } + } + + inline void setStage(EShLanguage s) { stage = s; } + + EShLanguage stage; + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& error; + +private: + TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&) = delete; +}; + +struct TResolverInOutAdaptor { + TResolverInOutAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e) + : stage(s) + , resolver(r) + , infoSink(i) + , error(e) + { + } + + inline void operator()(std::pair& entKey) + { + TVarEntryInfo& ent = entKey.second; + ent.newLocation = -1; + ent.newComponent = -1; + ent.newBinding = -1; + ent.newSet = -1; + ent.newIndex = -1; + const bool isValid = resolver.validateInOut(stage, ent); + if (isValid) { + resolver.resolveInOutLocation(stage, ent); + resolver.resolveInOutComponent(stage, ent); + resolver.resolveInOutIndex(stage, ent); + } else { + TString errorMsg; + if (ent.symbol->getType().getQualifier().semanticName != nullptr) { + errorMsg = "Invalid shader In/Out variable semantic: "; + errorMsg += ent.symbol->getType().getQualifier().semanticName; + } else { + errorMsg = "Invalid shader In/Out variable: "; + errorMsg += ent.symbol->getName(); + } + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + error = true; + } + } + + inline void setStage(EShLanguage s) { stage = s; } + + EShLanguage stage; + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& error; + +private: + TResolverInOutAdaptor& operator=(TResolverInOutAdaptor&) = delete; +}; + +// The class is used for reserving explicit uniform locations and ubo/ssbo/opaque bindings + +struct TSymbolValidater +{ + TSymbolValidater(TIoMapResolver& r, TInfoSink& i, TVarLiveMap* in[EShLangCount], TVarLiveMap* out[EShLangCount], + TVarLiveMap* uniform[EShLangCount], bool& hadError) + : preStage(EShLangCount) + , currentStage(EShLangCount) + , nextStage(EShLangCount) + , resolver(r) + , infoSink(i) + , hadError(hadError) + { + memcpy(inVarMaps, in, EShLangCount * (sizeof(TVarLiveMap*))); + memcpy(outVarMaps, out, EShLangCount * (sizeof(TVarLiveMap*))); + memcpy(uniformVarMap, uniform, EShLangCount * (sizeof(TVarLiveMap*))); + } + + inline void operator()(std::pair& entKey) { + TVarEntryInfo& ent1 = entKey.second; + TIntermSymbol* base = ent1.symbol; + const TType& type = ent1.symbol->getType(); + const TString& name = entKey.first; + EShLanguage stage = ent1.stage; + TString mangleName1, mangleName2; + if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + nextStage = EShLangCount; + for (int i = currentStage + 1; i < EShLangCount; i++) { + if (inVarMaps[i] != nullptr) { + nextStage = static_cast(i); + break; + } + } + } + + if (type.getQualifier().isArrayedIo(stage)) { + TType subType(type, 0); + subType.appendMangledName(mangleName1); + } else { + type.appendMangledName(mangleName1); + } + + if (base->getQualifier().storage == EvqVaryingIn) { + // validate stage in; + if (preStage == EShLangCount) + return; + if (name == "gl_PerVertex") + return; + if (outVarMaps[preStage] != nullptr) { + auto ent2 = outVarMaps[preStage]->find(name); + if (ent2 != outVarMaps[preStage]->end()) { + if (ent2->second.symbol->getType().getQualifier().isArrayedIo(preStage)) { + TType subType(ent2->second.symbol->getType(), 0); + subType.appendMangledName(mangleName2); + } + else { + ent2->second.symbol->getType().appendMangledName(mangleName2); + } + if (mangleName1 == mangleName2) + return; + else { + TString err = "Invalid In/Out variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + } + return; + } + } else if (base->getQualifier().storage == EvqVaryingOut) { + // validate stage out; + if (nextStage == EShLangCount) + return; + if (name == "gl_PerVertex") + return; + if (outVarMaps[nextStage] != nullptr) { + auto ent2 = inVarMaps[nextStage]->find(name); + if (ent2 != inVarMaps[nextStage]->end()) { + if (ent2->second.symbol->getType().getQualifier().isArrayedIo(nextStage)) { + TType subType(ent2->second.symbol->getType(), 0); + subType.appendMangledName(mangleName2); + } + else { + ent2->second.symbol->getType().appendMangledName(mangleName2); + } + if (mangleName1 == mangleName2) + return; + else { + TString err = "Invalid In/Out variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + } + return; + } + } else if (base->getQualifier().isUniformOrBuffer() && ! base->getQualifier().isPushConstant()) { + // validate uniform type; + for (int i = 0; i < EShLangCount; i++) { + if (i != currentStage && outVarMaps[i] != nullptr) { + auto ent2 = uniformVarMap[i]->find(name); + if (ent2 != uniformVarMap[i]->end()) { + ent2->second.symbol->getType().appendMangledName(mangleName2); + if (mangleName1 != mangleName2) { + TString err = "Invalid Uniform variable type : " + entKey.first; + infoSink.info.message(EPrefixInternalError, err.c_str()); + hadError = true; + } + mangleName2.clear(); + } + } + } + } + } + TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], *uniformVarMap[EShLangCount]; + // Use for mark pre stage, to get more interface symbol information. + EShLanguage preStage, currentStage, nextStage; + // Use for mark current shader stage for resolver + TIoMapResolver& resolver; + TInfoSink& infoSink; + bool& hadError; + +private: + TSymbolValidater& operator=(TSymbolValidater&) = delete; +}; + +struct TSlotCollector { + TSlotCollector(TIoMapResolver& r, TInfoSink& i) : resolver(r), infoSink(i) { } + + inline void operator()(std::pair& entKey) { + resolver.reserverStorageSlot(entKey.second, infoSink); + resolver.reserverResourceSlot(entKey.second, infoSink); + } + TIoMapResolver& resolver; + TInfoSink& infoSink; + +private: + TSlotCollector& operator=(TSlotCollector&) = delete; +}; + +TDefaultIoResolverBase::TDefaultIoResolverBase(const TIntermediate& intermediate) + : intermediate(intermediate) + , nextUniformLocation(intermediate.getUniformLocationBase()) + , nextInputLocation(0) + , nextOutputLocation(0) +{ + memset(stageMask, false, sizeof(bool) * (EShLangCount + 1)); +} + +int TDefaultIoResolverBase::getBaseBinding(TResourceType res, unsigned int set) const { + return selectBaseBinding(intermediate.getShiftBinding(res), intermediate.getShiftBindingForSet(res, set)); +} + +const std::vector& TDefaultIoResolverBase::getResourceSetBinding() const { + return intermediate.getResourceSetBinding(); +} + +bool TDefaultIoResolverBase::doAutoBindingMapping() const { return intermediate.getAutoMapBindings(); } + +bool TDefaultIoResolverBase::doAutoLocationMapping() const { return intermediate.getAutoMapLocations(); } + +TDefaultIoResolverBase::TSlotSet::iterator TDefaultIoResolverBase::findSlot(int set, int slot) { + return std::lower_bound(slots[set].begin(), slots[set].end(), slot); +} + +bool TDefaultIoResolverBase::checkEmpty(int set, int slot) { + TSlotSet::iterator at = findSlot(set, slot); + return ! (at != slots[set].end() && *at == slot); +} + +int TDefaultIoResolverBase::reserveSlot(int set, int slot, int size) { + TSlotSet::iterator at = findSlot(set, slot); + // tolerate aliasing, by not double-recording aliases + // (policy about appropriateness of the alias is higher up) + for (int i = 0; i < size; i++) { + if (at == slots[set].end() || *at != slot + i) + at = slots[set].insert(at, slot + i); + ++at; + } + return slot; +} + +int TDefaultIoResolverBase::getFreeSlot(int set, int base, int size) { + TSlotSet::iterator at = findSlot(set, base); + if (at == slots[set].end()) + return reserveSlot(set, base, size); + // look for a big enough gap + for (; at != slots[set].end(); ++at) { + if (*at - base >= size) + break; + base = *at + 1; + } + return reserveSlot(set, base, size); +} + +int TDefaultIoResolverBase::resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + if (type.getQualifier().hasSet()) { + return ent.newSet = type.getQualifier().layoutSet; + } + // If a command line or API option requested a single descriptor set, use that (if not overrided by spaceN) + if (getResourceSetBinding().size() == 1) { + return ent.newSet = atoi(getResourceSetBinding()[0].c_str()); + } + return ent.newSet = 0; +} + +int TDefaultIoResolverBase::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const char* name = ent.symbol->getName().c_str(); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // no locations added if already present, a built-in variable, a block, or an opaque + if (type.getQualifier().hasLocation() || type.isBuiltIn() || type.getBasicType() == EbtBlock || + type.isAtomic() || (type.containsOpaque() && intermediate.getSpv().openGl == 0)) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + int location = intermediate.getUniformLocationOverride(name); + if (location != -1) { + return ent.newLocation = location; + } + location = nextUniformLocation; + nextUniformLocation += TIntermediate::computeTypeUniformLocationSize(type); + return ent.newLocation = location; +} + +int TDefaultIoResolverBase::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + + // no locations added if already present, or a built-in variable + if (type.getQualifier().hasLocation() || type.isBuiltIn()) { + return ent.newLocation = -1; + } + + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + // point to the right input or output location counter + int& nextLocation = type.getQualifier().isPipeInput() ? nextInputLocation : nextOutputLocation; + // Placeholder. This does not do proper cross-stage lining up, nor + // work with mixed location/no-location declarations. + int location = nextLocation; + int typeLocationSize; + // Don’t take into account the outer-most array if the stage’s + // interface is automatically an array. + typeLocationSize = computeTypeLocationSize(type, stage); + nextLocation += typeLocationSize; + return ent.newLocation = location; +} + +int TDefaultIoResolverBase::resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) { + return ent.newComponent = -1; +} + +int TDefaultIoResolverBase::resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) { return ent.newIndex = -1; } + +uint32_t TDefaultIoResolverBase::computeTypeLocationSize(const TType& type, EShLanguage stage) { + int typeLocationSize; + // Don’t take into account the outer-most array if the stage’s + // interface is automatically an array. + if (type.getQualifier().isArrayedIo(stage)) { + TType elementType(type, 0); + typeLocationSize = TIntermediate::computeTypeLocationSize(elementType, stage); + } else { + typeLocationSize = TIntermediate::computeTypeLocationSize(type, stage); + } + return typeLocationSize; +} + +//TDefaultGlslIoResolver +TResourceType TDefaultGlslIoResolver::getResourceType(const glslang::TType& type) { + if (isImageType(type)) { + return EResImage; + } + if (isTextureType(type)) { + return EResTexture; + } + if (isSsboType(type)) { + return EResSsbo; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; +} + +TDefaultGlslIoResolver::TDefaultGlslIoResolver(const TIntermediate& intermediate) + : TDefaultIoResolverBase(intermediate) + , preStage(EShLangCount) + , currentStage(EShLangCount) +{ } + +int TDefaultGlslIoResolver::resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // expand the location to each element if the symbol is a struct or array + if (type.getQualifier().hasLocation()) { + return ent.newLocation = type.getQualifier().layoutLocation; + } + // no locations added if already present, or a built-in variable + if (type.isBuiltIn()) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + int typeLocationSize = computeTypeLocationSize(type, stage); + int location = type.getQualifier().layoutLocation; + bool hasLocation = false; + EShLanguage keyStage(EShLangCount); + TStorageQualifier storage; + storage = EvqInOut; + if (type.getQualifier().isPipeInput()) { + // If this symbol is a input, search pre stage's out + keyStage = preStage; + } + if (type.getQualifier().isPipeOutput()) { + // If this symbol is a output, search next stage's in + keyStage = currentStage; + } + // The in/out in current stage is not declared with location, but it is possible declared + // with explicit location in other stages, find the storageSlotMap firstly to check whether + // the in/out has location + int resourceKey = buildStorageKey(keyStage, storage); + if (! storageSlotMap[resourceKey].empty()) { + TVarSlotMap::iterator iter = storageSlotMap[resourceKey].find(name); + if (iter != storageSlotMap[resourceKey].end()) { + // If interface resource be found, set it has location and this symbol's new location + // equal the symbol's explicit location declaration in pre or next stage. + // + // vs: out vec4 a; + // fs: layout(..., location = 3,...) in vec4 a; + hasLocation = true; + location = iter->second; + // if we want deal like that: + // vs: layout(location=4) out vec4 a; + // out vec4 b; + // + // fs: in vec4 a; + // layout(location = 4) in vec4 b; + // we need retraverse the map. + } + if (! hasLocation) { + // If interface resource note found, It's mean the location in two stage are both implicit declarat. + // So we should find a new slot for this interface. + // + // vs: out vec4 a; + // fs: in vec4 a; + location = getFreeSlot(resourceKey, 0, typeLocationSize); + storageSlotMap[resourceKey][name] = location; + } + } else { + // the first interface declarated in a program. + TVarSlotMap varSlotMap; + location = getFreeSlot(resourceKey, 0, typeLocationSize); + varSlotMap[name] = location; + storageSlotMap[resourceKey] = varSlotMap; + } + //Update location + return ent.newLocation = location; +} + +int TDefaultGlslIoResolver::resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + // kick out of not doing this + if (! doAutoLocationMapping()) { + return ent.newLocation = -1; + } + // expand the location to each element if the symbol is a struct or array + if (type.getQualifier().hasLocation() && (type.isStruct() || type.isArray())) { + return ent.newLocation = type.getQualifier().layoutLocation; + } else { + // no locations added if already present, a built-in variable, a block, or an opaque + if (type.getQualifier().hasLocation() || type.isBuiltIn() || type.getBasicType() == EbtBlock || + type.isAtomic() || (type.containsOpaque() && intermediate.getSpv().openGl == 0)) { + return ent.newLocation = -1; + } + // no locations on blocks of built-in variables + if (type.isStruct()) { + if (type.getStruct()->size() < 1) { + return ent.newLocation = -1; + } + if ((*type.getStruct())[0].type->isBuiltIn()) { + return ent.newLocation = -1; + } + } + } + int location = intermediate.getUniformLocationOverride(name.c_str()); + if (location != -1) { + return ent.newLocation = location; + } + + int size = TIntermediate::computeTypeUniformLocationSize(type); + + // The uniform in current stage is not declared with location, but it is possible declared + // with explicit location in other stages, find the storageSlotMap firstly to check whether + // the uniform has location + bool hasLocation = false; + int resourceKey = buildStorageKey(EShLangCount, EvqUniform); + TVarSlotMap& slotMap = storageSlotMap[resourceKey]; + // Check dose shader program has uniform resource + if (! slotMap.empty()) { + // If uniform resource not empty, try find a same name uniform + TVarSlotMap::iterator iter = slotMap.find(name); + if (iter != slotMap.end()) { + // If uniform resource be found, set it has location and this symbol's new location + // equal the uniform's explicit location declaration in other stage. + // + // vs: uniform vec4 a; + // fs: layout(..., location = 3,...) uniform vec4 a; + hasLocation = true; + location = iter->second; + } + if (! hasLocation) { + // No explicit location declaration in other stage. + // So we should find a new slot for this uniform. + // + // vs: uniform vec4 a; + // fs: uniform vec4 a; + location = getFreeSlot(resourceKey, 0, computeTypeLocationSize(type, currentStage)); + storageSlotMap[resourceKey][name] = location; + } + } else { + // the first uniform declaration in a program. + TVarSlotMap varSlotMap; + location = getFreeSlot(resourceKey, 0, size); + varSlotMap[name] = location; + storageSlotMap[resourceKey] = varSlotMap; + } + return ent.newLocation = location; +} + +int TDefaultGlslIoResolver::resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + // On OpenGL arrays of opaque types take a separate binding for each element + int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1; + TResourceType resource = getResourceType(type); + // don't need to handle uniform symbol, it will be handled in resolveUniformLocation + if (resource == EResUbo && type.getBasicType() != EbtBlock) { + return ent.newBinding = -1; + } + // There is no 'set' qualifier in OpenGL shading language, each resource has its own + // binding name space, so remap the 'set' to resource type which make each resource + // binding is valid from 0 to MAX_XXRESOURCE_BINDINGS + int set = resource; + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + ent.newBinding = reserveSlot(set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding, numBindings); + return ent.newBinding; + } else if (ent.live && doAutoBindingMapping()) { + // The resource in current stage is not declared with binding, but it is possible declared + // with explicit binding in other stages, find the resourceSlotMap firstly to check whether + // the resource has binding, don't need to allocate if it already has a binding + bool hasBinding = false; + if (! resourceSlotMap[resource].empty()) { + TVarSlotMap::iterator iter = resourceSlotMap[resource].find(name); + if (iter != resourceSlotMap[resource].end()) { + hasBinding = true; + ent.newBinding = iter->second; + } + } + if (! hasBinding) { + TVarSlotMap varSlotMap; + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + int binding = getFreeSlot(resource, getBaseBinding(resource, set), numBindings); + varSlotMap[name] = binding; + resourceSlotMap[resource] = varSlotMap; + ent.newBinding = binding; + } + return ent.newBinding; + } + } + return ent.newBinding = -1; +} + +void TDefaultGlslIoResolver::beginResolve(EShLanguage stage) { + // reset stage state + if (stage == EShLangCount) + preStage = currentStage = stage; + // update stage state + else if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } +} + +void TDefaultGlslIoResolver::endResolve(EShLanguage /*stage*/) { + // TODO nothing +} + +void TDefaultGlslIoResolver::beginCollect(EShLanguage stage) { + // reset stage state + if (stage == EShLangCount) + preStage = currentStage = stage; + // update stage state + else if (currentStage != stage) { + preStage = currentStage; + currentStage = stage; + } +} + +void TDefaultGlslIoResolver::endCollect(EShLanguage /*stage*/) { + // TODO nothing +} + +void TDefaultGlslIoResolver::reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + TStorageQualifier storage = type.getQualifier().storage; + EShLanguage stage(EShLangCount); + switch (storage) { + case EvqUniform: + if (type.getBasicType() != EbtBlock && type.getQualifier().hasLocation()) { + // + // Reserve the slots for the uniforms who has explicit location + int storageKey = buildStorageKey(EShLangCount, EvqUniform); + int location = type.getQualifier().layoutLocation; + TVarSlotMap& varSlotMap = storageSlotMap[storageKey]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + if (iter == varSlotMap.end()) { + int numLocations = TIntermediate::computeTypeUniformLocationSize(type); + reserveSlot(storageKey, location, numLocations); + varSlotMap[name] = location; + } else { + // Allocate location by name for OpenGL driver, so the uniform in different + // stages should be declared with the same location + if (iter->second != location) { + TString errorMsg = "Invalid location: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } + break; + case EvqVaryingIn: + case EvqVaryingOut: + // + // Reserve the slots for the inout who has explicit location + if (type.getQualifier().hasLocation()) { + stage = storage == EvqVaryingIn ? preStage : stage; + stage = storage == EvqVaryingOut ? currentStage : stage; + int storageKey = buildStorageKey(stage, EvqInOut); + int location = type.getQualifier().layoutLocation; + TVarSlotMap& varSlotMap = storageSlotMap[storageKey]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + if (iter == varSlotMap.end()) { + int numLocations = TIntermediate::computeTypeUniformLocationSize(type); + reserveSlot(storageKey, location, numLocations); + varSlotMap[name] = location; + } else { + // Allocate location by name for OpenGL driver, so the uniform in different + // stages should be declared with the same location + if (iter->second != location) { + TString errorMsg = "Invalid location: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } + break; + default: + break; + } +} + +void TDefaultGlslIoResolver::reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) { + const TType& type = ent.symbol->getType(); + const TString& name = getAccessName(ent.symbol); + int resource = getResourceType(type); + if (type.getQualifier().hasBinding()) { + TVarSlotMap& varSlotMap = resourceSlotMap[resource]; + TVarSlotMap::iterator iter = varSlotMap.find(name); + int binding = type.getQualifier().layoutBinding; + if (iter == varSlotMap.end()) { + // Reserve the slots for the ubo, ssbo and opaques who has explicit binding + int numBindings = type.isSizedArray() ? type.getCumulativeArraySize() : 1; + varSlotMap[name] = binding; + reserveSlot(resource, binding, numBindings); + } else { + // Allocate binding by name for OpenGL driver, so the resource in different + // stages should be declared with the same binding + if (iter->second != binding) { + TString errorMsg = "Invalid binding: " + name; + infoSink.info.message(EPrefixInternalError, errorMsg.c_str()); + hasError = true; + } + } + } +} + +const TString& TDefaultGlslIoResolver::getAccessName(const TIntermSymbol* symbol) +{ + return symbol->getBasicType() == EbtBlock ? + symbol->getType().getTypeName() : + symbol->getName(); +} + +//TDefaultGlslIoResolver end + +/* + * Basic implementation of glslang::TIoMapResolver that replaces the + * previous offset behavior. + * It does the same, uses the offsets for the corresponding uniform + * types. Also respects the EOptionAutoMapBindings flag and binds + * them if needed. + */ +/* + * Default resolver + */ +struct TDefaultIoResolver : public TDefaultIoResolverBase { + TDefaultIoResolver(const TIntermediate& intermediate) : TDefaultIoResolverBase(intermediate) { } + + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + + TResourceType getResourceType(const glslang::TType& type) override { + if (isImageType(type)) { + return EResImage; + } + if (isTextureType(type)) { + return EResTexture; + } + if (isSsboType(type)) { + return EResSsbo; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; + } + + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override { + const TType& type = ent.symbol->getType(); + const int set = getLayoutSet(type); + // On OpenGL arrays of opaque types take a seperate binding for each element + int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1; + TResourceType resource = getResourceType(type); + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + return ent.newBinding = reserveSlot( + set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding, numBindings); + } else if (ent.live && doAutoBindingMapping()) { + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + return ent.newBinding = getFreeSlot(set, getBaseBinding(resource, set), numBindings); + } + } + return ent.newBinding = -1; + } +}; + +#ifdef ENABLE_HLSL +/******************************************************************************** +The following IO resolver maps types in HLSL register space, as follows: + +t - for shader resource views (SRV) + TEXTURE1D + TEXTURE1DARRAY + TEXTURE2D + TEXTURE2DARRAY + TEXTURE3D + TEXTURECUBE + TEXTURECUBEARRAY + TEXTURE2DMS + TEXTURE2DMSARRAY + STRUCTUREDBUFFER + BYTEADDRESSBUFFER + BUFFER + TBUFFER + +s - for samplers + SAMPLER + SAMPLER1D + SAMPLER2D + SAMPLER3D + SAMPLERCUBE + SAMPLERSTATE + SAMPLERCOMPARISONSTATE + +u - for unordered access views (UAV) + RWBYTEADDRESSBUFFER + RWSTRUCTUREDBUFFER + APPENDSTRUCTUREDBUFFER + CONSUMESTRUCTUREDBUFFER + RWBUFFER + RWTEXTURE1D + RWTEXTURE1DARRAY + RWTEXTURE2D + RWTEXTURE2DARRAY + RWTEXTURE3D + +b - for constant buffer views (CBV) + CBUFFER + CONSTANTBUFFER + ********************************************************************************/ +struct TDefaultHlslIoResolver : public TDefaultIoResolverBase { + TDefaultHlslIoResolver(const TIntermediate& intermediate) : TDefaultIoResolverBase(intermediate) { } + + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + + TResourceType getResourceType(const glslang::TType& type) override { + if (isUavType(type)) { + return EResUav; + } + if (isSrvType(type)) { + return EResTexture; + } + if (isSamplerType(type)) { + return EResSampler; + } + if (isUboType(type)) { + return EResUbo; + } + return EResCount; + } + + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override { + const TType& type = ent.symbol->getType(); + const int set = getLayoutSet(type); + TResourceType resource = getResourceType(type); + if (resource < EResCount) { + if (type.getQualifier().hasBinding()) { + return ent.newBinding = reserveSlot(set, getBaseBinding(resource, set) + type.getQualifier().layoutBinding); + } else if (ent.live && doAutoBindingMapping()) { + // find free slot, the caller did make sure it passes all vars with binding + // first and now all are passed that do not have a binding and needs one + return ent.newBinding = getFreeSlot(set, getBaseBinding(resource, set)); + } + } + return ent.newBinding = -1; + } +}; +#endif + +// Map I/O variables to provided offsets, and make bindings for +// unbound but live variables. +// +// Returns false if the input is too malformed to do this. +bool TIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) { + bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() || + intermediate.getAutoMapLocations(); + // Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce + // unnecessary or insignificant for-loop operation after 'somethingToDo' have been true. + for (int res = 0; (res < EResCount && !somethingToDo); ++res) { + somethingToDo = somethingToDo || (intermediate.getShiftBinding(TResourceType(res)) != 0) || + intermediate.hasShiftBindingForSet(TResourceType(res)); + } + if (! somethingToDo && resolver == nullptr) + return true; + if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive()) + return false; + TIntermNode* root = intermediate.getTreeRoot(); + if (root == nullptr) + return false; + // if no resolver is provided, use the default resolver with the given shifts and auto map settings + TDefaultIoResolver defaultResolver(intermediate); +#ifdef ENABLE_HLSL + TDefaultHlslIoResolver defaultHlslResolver(intermediate); + if (resolver == nullptr) { + // TODO: use a passed in IO mapper for this + if (intermediate.usingHlslIoMapping()) + resolver = &defaultHlslResolver; + else + resolver = &defaultResolver; + } + resolver->addStage(stage); +#else + resolver = &defaultResolver; +#endif + + TVarLiveMap inVarMap, outVarMap, uniformVarMap; + TVarLiveVector inVector, outVector, uniformVector; + TVarGatherTraverser iter_binding_all(intermediate, true, inVarMap, outVarMap, uniformVarMap); + TVarGatherTraverser iter_binding_live(intermediate, false, inVarMap, outVarMap, uniformVarMap); + root->traverse(&iter_binding_all); + iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str()); + while (! iter_binding_live.destinations.empty()) { + TIntermNode* destination = iter_binding_live.destinations.back(); + iter_binding_live.destinations.pop_back(); + destination->traverse(&iter_binding_live); + } + + // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. + std::for_each(inVarMap.begin(), inVarMap.end(), + [&inVector](TVarLivePair p) { inVector.push_back(p); }); + std::sort(inVector.begin(), inVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(outVarMap.begin(), outVarMap.end(), + [&outVector](TVarLivePair p) { outVector.push_back(p); }); + std::sort(outVector.begin(), outVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(uniformVarMap.begin(), uniformVarMap.end(), + [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + bool hadError = false; + TNotifyInOutAdaptor inOutNotify(stage, *resolver); + TNotifyUniformAdaptor uniformNotify(stage, *resolver); + TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError); + TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError); + resolver->beginNotifications(stage); + std::for_each(inVector.begin(), inVector.end(), inOutNotify); + std::for_each(outVector.begin(), outVector.end(), inOutNotify); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformNotify); + resolver->endNotifications(stage); + resolver->beginResolve(stage); + std::for_each(inVector.begin(), inVector.end(), inOutResolve); + std::for_each(inVector.begin(), inVector.end(), [&inVarMap](TVarLivePair p) { + auto at = inVarMap.find(p.second.symbol->getName()); + if (at != inVarMap.end()) + at->second = p.second; + }); + std::for_each(outVector.begin(), outVector.end(), inOutResolve); + std::for_each(outVector.begin(), outVector.end(), [&outVarMap](TVarLivePair p) { + auto at = outVarMap.find(p.second.symbol->getName()); + if (at != outVarMap.end()) + at->second = p.second; + }); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); + std::for_each(uniformVector.begin(), uniformVector.end(), [&uniformVarMap](TVarLivePair p) { + auto at = uniformVarMap.find(p.second.symbol->getName()); + if (at != uniformVarMap.end()) + at->second = p.second; + }); + resolver->endResolve(stage); + if (!hadError) { + TVarSetTraverser iter_iomap(intermediate, inVarMap, outVarMap, uniformVarMap); + root->traverse(&iter_iomap); + } + return !hadError; +} + +// Map I/O variables to provided offsets, and make bindings for +// unbound but live variables. +// +// Returns false if the input is too malformed to do this. +bool TGlslIoMapper::addStage(EShLanguage stage, TIntermediate& intermediate, TInfoSink& infoSink, TIoMapResolver* resolver) { + + bool somethingToDo = ! intermediate.getResourceSetBinding().empty() || intermediate.getAutoMapBindings() || + intermediate.getAutoMapLocations(); + // Restrict the stricter condition to further check 'somethingToDo' only if 'somethingToDo' has not been set, reduce + // unnecessary or insignificant for-loop operation after 'somethingToDo' have been true. + for (int res = 0; (res < EResCount && !somethingToDo); ++res) { + somethingToDo = somethingToDo || (intermediate.getShiftBinding(TResourceType(res)) != 0) || + intermediate.hasShiftBindingForSet(TResourceType(res)); + } + if (! somethingToDo && resolver == nullptr) { + return true; + } + if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive()) { + return false; + } + TIntermNode* root = intermediate.getTreeRoot(); + if (root == nullptr) { + return false; + } + // if no resolver is provided, use the default resolver with the given shifts and auto map settings + TDefaultGlslIoResolver defaultResolver(intermediate); + if (resolver == nullptr) { + resolver = &defaultResolver; + } + resolver->addStage(stage); + inVarMaps[stage] = new TVarLiveMap(); outVarMaps[stage] = new TVarLiveMap(); uniformVarMap[stage] = new TVarLiveMap(); + TVarGatherTraverser iter_binding_all(intermediate, true, *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + TVarGatherTraverser iter_binding_live(intermediate, false, *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + root->traverse(&iter_binding_all); + iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str()); + while (! iter_binding_live.destinations.empty()) { + TIntermNode* destination = iter_binding_live.destinations.back(); + iter_binding_live.destinations.pop_back(); + destination->traverse(&iter_binding_live); + } + + TNotifyInOutAdaptor inOutNotify(stage, *resolver); + TNotifyUniformAdaptor uniformNotify(stage, *resolver); + // Resolve current stage input symbol location with previous stage output here, + // uniform symbol, ubo, ssbo and opaque symbols are per-program resource, + // will resolve uniform symbol location and ubo/ssbo/opaque binding in doMap() + resolver->beginNotifications(stage); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutNotify); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutNotify); + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), uniformNotify); + resolver->endNotifications(stage); + TSlotCollector slotCollector(*resolver, infoSink); + resolver->beginCollect(stage); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), slotCollector); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), slotCollector); + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), slotCollector); + resolver->endCollect(stage); + intermediates[stage] = &intermediate; + return !hadError; +} + +bool TGlslIoMapper::doMap(TIoMapResolver* resolver, TInfoSink& infoSink) { + resolver->endResolve(EShLangCount); + if (!hadError) { + //Resolve uniform location, ubo/ssbo/opaque bindings across stages + TResolverUniformAdaptor uniformResolve(EShLangCount, *resolver, infoSink, hadError); + TResolverInOutAdaptor inOutResolve(EShLangCount, *resolver, infoSink, hadError); + TSymbolValidater symbolValidater(*resolver, infoSink, inVarMaps, outVarMaps, uniformVarMap, hadError); + TVarLiveVector uniformVector; + resolver->beginResolve(EShLangCount); + for (int stage = EShLangVertex; stage < EShLangCount; stage++) { + if (inVarMaps[stage] != nullptr) { + inOutResolve.setStage(EShLanguage(stage)); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), symbolValidater); + std::for_each(inVarMaps[stage]->begin(), inVarMaps[stage]->end(), inOutResolve); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), symbolValidater); + std::for_each(outVarMaps[stage]->begin(), outVarMaps[stage]->end(), inOutResolve); + } + if (uniformVarMap[stage] != nullptr) { + uniformResolve.setStage(EShLanguage(stage)); + // sort entries by priority. see TVarEntryInfo::TOrderByPriority for info. + std::for_each(uniformVarMap[stage]->begin(), uniformVarMap[stage]->end(), + [&uniformVector](TVarLivePair p) { uniformVector.push_back(p); }); + } + } + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + std::for_each(uniformVector.begin(), uniformVector.end(), symbolValidater); + std::for_each(uniformVector.begin(), uniformVector.end(), uniformResolve); + std::sort(uniformVector.begin(), uniformVector.end(), [](const TVarLivePair& p1, const TVarLivePair& p2) -> bool { + return TVarEntryInfo::TOrderByPriority()(p1.second, p2.second); + }); + resolver->endResolve(EShLangCount); + for (size_t stage = 0; stage < EShLangCount; stage++) { + if (intermediates[stage] != nullptr) { + // traverse each stage, set new location to each input/output and unifom symbol, set new binding to + // ubo, ssbo and opaque symbols + TVarLiveMap** pUniformVarMap = uniformVarMap; + std::for_each(uniformVector.begin(), uniformVector.end(), [pUniformVarMap, stage](TVarLivePair p) { + auto at = pUniformVarMap[stage]->find(p.second.symbol->getName()); + if (at != pUniformVarMap[stage]->end()) + at->second = p.second; + }); + TVarSetTraverser iter_iomap(*intermediates[stage], *inVarMaps[stage], *outVarMaps[stage], + *uniformVarMap[stage]); + intermediates[stage]->getTreeRoot()->traverse(&iter_iomap); + } + } + return !hadError; + } else { + return false; + } +} + +} // end namespace glslang + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE diff --git a/src/third_party/glslang/glslang/MachineIndependent/iomapper.h b/src/third_party/glslang/glslang/MachineIndependent/iomapper.h new file mode 100644 index 0000000..6741327 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/iomapper.h @@ -0,0 +1,302 @@ +// +// Copyright (C) 2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#ifndef _IOMAPPER_INCLUDED +#define _IOMAPPER_INCLUDED + +#include +#include "LiveTraverser.h" +#include +#include +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +class TInfoSink; + +namespace glslang { + +class TIntermediate; +struct TVarEntryInfo { + int id; + TIntermSymbol* symbol; + bool live; + int newBinding; + int newSet; + int newLocation; + int newComponent; + int newIndex; + EShLanguage stage; + struct TOrderById { + inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { return l.id < r.id; } + }; + + struct TOrderByPriority { + // ordering: + // 1) has both binding and set + // 2) has binding but no set + // 3) has no binding but set + // 4) has no binding and no set + inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { + const TQualifier& lq = l.symbol->getQualifier(); + const TQualifier& rq = r.symbol->getQualifier(); + + // simple rules: + // has binding gives 2 points + // has set gives 1 point + // who has the most points is more important. + int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0); + int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0); + + if (lPoints == rPoints) + return l.id < r.id; + return lPoints > rPoints; + } + }; +}; + +// Base class for shared TIoMapResolver services, used by several derivations. +struct TDefaultIoResolverBase : public glslang::TIoMapResolver { +public: + TDefaultIoResolverBase(const TIntermediate& intermediate); + typedef std::vector TSlotSet; + typedef std::unordered_map TSlotSetMap; + + // grow the reflection stage by stage + void notifyBinding(EShLanguage, TVarEntryInfo& /*ent*/) override {} + void notifyInOut(EShLanguage, TVarEntryInfo& /*ent*/) override {} + void beginNotifications(EShLanguage) override {} + void endNotifications(EShLanguage) override {} + void beginResolve(EShLanguage) override {} + void endResolve(EShLanguage) override {} + void beginCollect(EShLanguage) override {} + void endCollect(EShLanguage) override {} + void reserverResourceSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {} + void reserverStorageSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {} + int getBaseBinding(TResourceType res, unsigned int set) const; + const std::vector& getResourceSetBinding() const; + virtual TResourceType getResourceType(const glslang::TType& type) = 0; + bool doAutoBindingMapping() const; + bool doAutoLocationMapping() const; + TSlotSet::iterator findSlot(int set, int slot); + bool checkEmpty(int set, int slot); + bool validateInOut(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + int reserveSlot(int set, int slot, int size = 1); + int getFreeSlot(int set, int base, int size = 1); + int resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override; + int resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + void addStage(EShLanguage stage) override { + if (stage < EShLangCount) + stageMask[stage] = true; + } + uint32_t computeTypeLocationSize(const TType& type, EShLanguage stage); + + TSlotSetMap slots; + bool hasError = false; + +protected: + TDefaultIoResolverBase(TDefaultIoResolverBase&); + TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&); + const TIntermediate& intermediate; + int nextUniformLocation; + int nextInputLocation; + int nextOutputLocation; + bool stageMask[EShLangCount + 1]; + // Return descriptor set specific base if there is one, and the generic base otherwise. + int selectBaseBinding(int base, int descriptorSetBase) const { + return descriptorSetBase != -1 ? descriptorSetBase : base; + } + + static int getLayoutSet(const glslang::TType& type) { + if (type.getQualifier().hasSet()) + return type.getQualifier().layoutSet; + else + return 0; + } + + static bool isSamplerType(const glslang::TType& type) { + return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler(); + } + + static bool isTextureType(const glslang::TType& type) { + return (type.getBasicType() == glslang::EbtSampler && + (type.getSampler().isTexture() || type.getSampler().isSubpass())); + } + + static bool isUboType(const glslang::TType& type) { + return type.getQualifier().storage == EvqUniform; + } + + static bool isImageType(const glslang::TType& type) { + return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage(); + } + + static bool isSsboType(const glslang::TType& type) { + return type.getQualifier().storage == EvqBuffer; + } + + // Return true if this is a SRV (shader resource view) type: + static bool isSrvType(const glslang::TType& type) { + return isTextureType(type) || type.getQualifier().storage == EvqBuffer; + } + + // Return true if this is a UAV (unordered access view) type: + static bool isUavType(const glslang::TType& type) { + if (type.getQualifier().isReadOnly()) + return false; + return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) || + (type.getQualifier().storage == EvqBuffer); + } +}; + +// Default I/O resolver for OpenGL +struct TDefaultGlslIoResolver : public TDefaultIoResolverBase { +public: + typedef std::map TVarSlotMap; // + typedef std::map TSlotMap; // + TDefaultGlslIoResolver(const TIntermediate& intermediate); + bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; } + TResourceType getResourceType(const glslang::TType& type) override; + int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override; + int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override; + void beginResolve(EShLanguage /*stage*/) override; + void endResolve(EShLanguage stage) override; + void beginCollect(EShLanguage) override; + void endCollect(EShLanguage) override; + void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; + void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override; + const TString& getAccessName(const TIntermSymbol*); + // in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol. + // We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage. + // if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key. + // Note: both stage and type must less then 0xffff. + int buildStorageKey(EShLanguage stage, TStorageQualifier type) { + assert(static_cast(stage) <= 0x0000ffff && static_cast(type) <= 0x0000ffff); + return (stage << 16) | type; + } + +protected: + // Use for mark pre stage, to get more interface symbol information. + EShLanguage preStage; + // Use for mark current shader stage for resolver + EShLanguage currentStage; + // Slot map for storage resource(location of uniform and interface symbol) It's a program share slot + TSlotMap resourceSlotMap; + // Slot map for other resource(image, ubo, ssbo), It's a program share slot. + TSlotMap storageSlotMap; +}; + +typedef std::map TVarLiveMap; + +// override function "operator=", if a vector being sort, +// when use vc++, the sort function will call : +// pair& operator=(const pair<_Other1, _Other2>& _Right) +// { +// first = _Right.first; +// second = _Right.second; +// return (*this); +// } +// that will make a const type handing on left. +// override this function can avoid a compiler error. +// In the future, if the vc++ compiler can handle such a situation, +// this part of the code will be removed. +struct TVarLivePair : std::pair { + TVarLivePair(const std::pair& _Right) : pair(_Right.first, _Right.second) {} + TVarLivePair& operator=(const TVarLivePair& _Right) { + const_cast(first) = _Right.first; + second = _Right.second; + return (*this); + } + TVarLivePair(const TVarLivePair& src) : pair(src) { } +}; +typedef std::vector TVarLiveVector; + +// I/O mapper +class TIoMapper { +public: + TIoMapper() {} + virtual ~TIoMapper() {} + // grow the reflection stage by stage + bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*); + bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; } +}; + +// I/O mapper for OpenGL +class TGlslIoMapper : public TIoMapper { +public: + TGlslIoMapper() { + memset(inVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(outVarMaps, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * EShLangCount); + memset(intermediates, 0, sizeof(TIntermediate*) * EShLangCount); + } + virtual ~TGlslIoMapper() { + for (size_t stage = 0; stage < EShLangCount; stage++) { + if (inVarMaps[stage] != nullptr) { + delete inVarMaps[stage]; + inVarMaps[stage] = nullptr; + } + if (outVarMaps[stage] != nullptr) { + delete outVarMaps[stage]; + outVarMaps[stage] = nullptr; + } + if (uniformVarMap[stage] != nullptr) { + delete uniformVarMap[stage]; + uniformVarMap[stage] = nullptr; + } + if (intermediates[stage] != nullptr) + intermediates[stage] = nullptr; + } + } + // grow the reflection stage by stage + bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*) override; + bool doMap(TIoMapResolver*, TInfoSink&) override; + TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount], + *uniformVarMap[EShLangCount]; + TIntermediate* intermediates[EShLangCount]; + bool hadError = false; +}; + +} // end namespace glslang + +#endif // _IOMAPPER_INCLUDED + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE diff --git a/src/third_party/glslang/glslang/MachineIndependent/limits.cpp b/src/third_party/glslang/glslang/MachineIndependent/limits.cpp new file mode 100644 index 0000000..51d9300 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/limits.cpp @@ -0,0 +1,200 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do sub tree walks for +// 1) inductive loop bodies to see if the inductive variable is modified +// 2) array-index expressions to see if they are "constant-index-expression" +// +// These are per Appendix A of ES 2.0: +// +// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the +// argument to a function out or inout parameter." +// +// "The following are constant-index-expressions: +// - Constant expressions +// - Loop indices as defined in section 4 +// - Expressions composed of both of the above" +// +// N.B.: assuming the last rule excludes function calls +// + +#include "ParseHelper.h" + +namespace glslang { + +// +// The inductive loop-body traverser. +// +// Just look at things that might modify the loop index. +// + +class TInductiveTraverser : public TIntermTraverser { +public: + TInductiveTraverser(int id, TSymbolTable& st) + : loopId(id), symbolTable(st), bad(false) { } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual bool visitUnary(TVisit, TIntermUnary* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + + int loopId; // unique ID of the symbol that's the loop inductive variable + TSymbolTable& symbolTable; + bool bad; + TSourceLoc badLoc; + +protected: + TInductiveTraverser(TInductiveTraverser&); + TInductiveTraverser& operator=(TInductiveTraverser&); +}; + +// check binary operations for those modifying the loop index +bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + if (node->modifiesState() && node->getLeft()->getAsSymbolNode() && + node->getLeft()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check unary operations for those modifying the loop index +bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) +{ + if (node->modifiesState() && node->getOperand()->getAsSymbolNode() && + node->getOperand()->getAsSymbolNode()->getId() == loopId) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// check function calls for arguments modifying the loop index +bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + // see if an out or inout argument is the loop index + const TIntermSequence& args = node->getSequence(); + for (int i = 0; i < (int)args.size(); ++i) { + if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) { + TSymbol* function = symbolTable.find(node->getName()); + const TType* type = (*function->getAsFunction())[i].type; + if (type->getQualifier().storage == EvqOut || + type->getQualifier().storage == EvqInOut) { + bad = true; + badLoc = node->getLoc(); + } + } + } + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable) +{ + TInductiveTraverser it(loopId, symbolTable); + + if (body == nullptr) + return; + + body->traverse(&it); + + if (it.bad) + error(it.badLoc, "inductive loop index modified", "limitations", ""); +} + +// +// The "constant-index-expression" tranverser. +// +// Just look at things that can form an index. +// + +class TIndexTraverser : public TIntermTraverser { +public: + TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { } + virtual void visitSymbol(TIntermSymbol* symbol); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + const TIdSetType& inductiveLoopIds; + bool bad; + TSourceLoc badLoc; + +protected: + TIndexTraverser(TIndexTraverser&); + TIndexTraverser& operator=(TIndexTraverser&); +}; + +// make sure symbols are inductive-loop indexes +void TIndexTraverser::visitSymbol(TIntermSymbol* symbol) +{ + if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) { + bad = true; + badLoc = symbol->getLoc(); + } +} + +// check for function calls, assuming they are bad; spec. doesn't really say +bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (node->getOp() == EOpFunctionCall) { + bad = true; + badLoc = node->getLoc(); + } + + return true; +} + +// +// External function to call for loop check. +// +void TParseContext::constantIndexExpressionCheck(TIntermNode* index) +{ +#ifndef GLSLANG_WEB + TIndexTraverser it(inductiveLoopIds); + + index->traverse(&it); + + if (it.bad) + error(it.badLoc, "Non-constant-index-expression", "limitations", ""); +#endif +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp b/src/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp new file mode 100644 index 0000000..a8e031b --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/linkValidate.cpp @@ -0,0 +1,1781 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Do link-time merging and validation of intermediate representations. +// +// Basic model is that during compilation, each compilation unit (shader) is +// compiled into one TIntermediate instance. Then, at link time, multiple +// units for the same stage can be merged together, which can generate errors. +// Then, after all merging, a single instance of TIntermediate represents +// the whole stage. A final error check can be done on the resulting stage, +// even if no merging was done (i.e., the stage was only one compilation unit). +// + +#include "localintermediate.h" +#include "../Include/InfoSink.h" + +namespace glslang { + +// +// Link-time error emitter. +// +void TIntermediate::error(TInfoSink& infoSink, const char* message) +{ +#ifndef GLSLANG_WEB + infoSink.info.prefix(EPrefixError); + infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; +#endif + + ++numErrors; +} + +// Link-time warning. +void TIntermediate::warn(TInfoSink& infoSink, const char* message) +{ +#ifndef GLSLANG_WEB + infoSink.info.prefix(EPrefixWarning); + infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n"; +#endif +} + +// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block +// name must have the exact same set of members qualified with offset and their integral-constant +// expression values must be the same, or a link-time error results." + +// +// Merge the information from 'unit' into 'this' +// +void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit) +{ +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + mergeCallGraphs(infoSink, unit); + mergeModes(infoSink, unit); + mergeTrees(infoSink, unit); +#endif +} + +void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit) +{ + if (unit.getNumEntryPoints() > 0) { + if (getNumEntryPoints() > 0) + error(infoSink, "can't handle multiple entry points per stage"); + else { + entryPointName = unit.getEntryPointName(); + entryPointMangledName = unit.getEntryPointMangledName(); + } + } + numEntryPoints += unit.getNumEntryPoints(); + + callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end()); +} + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#define MERGE_MAX(member) member = std::max(member, unit.member) +#define MERGE_TRUE(member) if (unit.member) member = unit.member; + +void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit) +{ + if (language != unit.language) + error(infoSink, "stages must match when linking into a single stage"); + + if (getSource() == EShSourceNone) + setSource(unit.getSource()); + if (getSource() != unit.getSource()) + error(infoSink, "can't link compilation units from different source languages"); + + if (treeRoot == nullptr) { + profile = unit.profile; + version = unit.version; + requestedExtensions = unit.requestedExtensions; + } else { + if ((isEsProfile()) != (unit.isEsProfile())) + error(infoSink, "Cannot cross link ES and desktop profiles"); + else if (unit.profile == ECompatibilityProfile) + profile = ECompatibilityProfile; + version = std::max(version, unit.version); + requestedExtensions.insert(unit.requestedExtensions.begin(), unit.requestedExtensions.end()); + } + + MERGE_MAX(spvVersion.spv); + MERGE_MAX(spvVersion.vulkanGlsl); + MERGE_MAX(spvVersion.vulkan); + MERGE_MAX(spvVersion.openGl); + + numErrors += unit.getNumErrors(); + // Only one push_constant is allowed, mergeLinkerObjects() will ensure the push_constant + // is the same for all units. + if (numPushConstants > 1 || unit.numPushConstants > 1) + error(infoSink, "Only one push_constant block is allowed per stage"); + numPushConstants = std::min(numPushConstants + unit.numPushConstants, 1); + + if (unit.invocations != TQualifier::layoutNotSet) { + if (invocations == TQualifier::layoutNotSet) + invocations = unit.invocations; + else if (invocations != unit.invocations) + error(infoSink, "number of invocations must match between compilation units"); + } + + if (vertices == TQualifier::layoutNotSet) + vertices = unit.vertices; + else if (unit.vertices != TQualifier::layoutNotSet && vertices != unit.vertices) { + if (language == EShLangGeometry || language == EShLangMeshNV) + error(infoSink, "Contradictory layout max_vertices values"); + else if (language == EShLangTessControl) + error(infoSink, "Contradictory layout vertices values"); + else + assert(0); + } + if (primitives == TQualifier::layoutNotSet) + primitives = unit.primitives; + else if (primitives != unit.primitives) { + if (language == EShLangMeshNV) + error(infoSink, "Contradictory layout max_primitives values"); + else + assert(0); + } + + if (inputPrimitive == ElgNone) + inputPrimitive = unit.inputPrimitive; + else if (unit.inputPrimitive != ElgNone && inputPrimitive != unit.inputPrimitive) + error(infoSink, "Contradictory input layout primitives"); + + if (outputPrimitive == ElgNone) + outputPrimitive = unit.outputPrimitive; + else if (unit.outputPrimitive != ElgNone && outputPrimitive != unit.outputPrimitive) + error(infoSink, "Contradictory output layout primitives"); + + if (originUpperLeft != unit.originUpperLeft || pixelCenterInteger != unit.pixelCenterInteger) + error(infoSink, "gl_FragCoord redeclarations must match across shaders"); + + if (vertexSpacing == EvsNone) + vertexSpacing = unit.vertexSpacing; + else if (vertexSpacing != unit.vertexSpacing) + error(infoSink, "Contradictory input vertex spacing"); + + if (vertexOrder == EvoNone) + vertexOrder = unit.vertexOrder; + else if (vertexOrder != unit.vertexOrder) + error(infoSink, "Contradictory triangle ordering"); + + MERGE_TRUE(pointMode); + + for (int i = 0; i < 3; ++i) { + if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) { + localSize[i] = unit.localSize[i]; + localSizeNotDefault[i] = true; + } + else if (localSize[i] != unit.localSize[i]) + error(infoSink, "Contradictory local size"); + + if (localSizeSpecId[i] == TQualifier::layoutNotSet) + localSizeSpecId[i] = unit.localSizeSpecId[i]; + else if (localSizeSpecId[i] != unit.localSizeSpecId[i]) + error(infoSink, "Contradictory local size specialization ids"); + } + + MERGE_TRUE(earlyFragmentTests); + MERGE_TRUE(postDepthCoverage); + + if (depthLayout == EldNone) + depthLayout = unit.depthLayout; + else if (depthLayout != unit.depthLayout) + error(infoSink, "Contradictory depth layouts"); + + MERGE_TRUE(depthReplacing); + MERGE_TRUE(hlslFunctionality1); + + blendEquations |= unit.blendEquations; + + MERGE_TRUE(xfbMode); + + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = unit.xfbBuffers[b].stride; + else if (xfbBuffers[b].stride != unit.xfbBuffers[b].stride) + error(infoSink, "Contradictory xfb_stride"); + xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride); + if (unit.xfbBuffers[b].contains64BitType) + xfbBuffers[b].contains64BitType = true; + if (unit.xfbBuffers[b].contains32BitType) + xfbBuffers[b].contains32BitType = true; + if (unit.xfbBuffers[b].contains16BitType) + xfbBuffers[b].contains16BitType = true; + // TODO: 4.4 link: enhanced layouts: compare ranges + } + + MERGE_TRUE(multiStream); + MERGE_TRUE(layoutOverrideCoverage); + MERGE_TRUE(geoPassthroughEXT); + + for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) { + if (unit.shiftBinding[i] > 0) + setShiftBinding((TResourceType)i, unit.shiftBinding[i]); + } + + for (unsigned int i = 0; i < unit.shiftBindingForSet.size(); ++i) { + for (auto it = unit.shiftBindingForSet[i].begin(); it != unit.shiftBindingForSet[i].end(); ++it) + setShiftBindingForSet((TResourceType)i, it->second, it->first); + } + + resourceSetBinding.insert(resourceSetBinding.end(), unit.resourceSetBinding.begin(), unit.resourceSetBinding.end()); + + MERGE_TRUE(autoMapBindings); + MERGE_TRUE(autoMapLocations); + MERGE_TRUE(invertY); + MERGE_TRUE(flattenUniformArrays); + MERGE_TRUE(useUnknownFormat); + MERGE_TRUE(hlslOffsets); + MERGE_TRUE(useStorageBuffer); + MERGE_TRUE(hlslIoMapping); + + // TODO: sourceFile + // TODO: sourceText + // TODO: processes + + MERGE_TRUE(needToLegalize); + MERGE_TRUE(binaryDoubleOutput); + MERGE_TRUE(usePhysicalStorageBuffer); +} + +// +// Merge the 'unit' AST into 'this' AST. +// That includes rationalizing the unique IDs, which were set up independently, +// and might have overlaps that are not the same symbol, or might have different +// IDs for what should be the same shared symbol. +// +void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit) +{ + if (unit.treeRoot == nullptr) + return; + + if (treeRoot == nullptr) { + treeRoot = unit.treeRoot; + return; + } + + // Getting this far means we have two existing trees to merge... + numShaderRecordBlocks += unit.numShaderRecordBlocks; + numTaskNVBlocks += unit.numTaskNVBlocks; + + // Get the top-level globals of each unit + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + TIntermSequence& unitGlobals = unit.treeRoot->getAsAggregate()->getSequence(); + + // Get the linker-object lists + TIntermSequence& linkerObjects = findLinkerObjects()->getSequence(); + const TIntermSequence& unitLinkerObjects = unit.findLinkerObjects()->getSequence(); + + // Map by global name to unique ID to rationalize the same object having + // differing IDs in different trees. + TIdMaps idMaps; + int maxId; + seedIdMap(idMaps, maxId); + remapIds(idMaps, maxId + 1, unit); + + mergeBodies(infoSink, globals, unitGlobals); + mergeLinkerObjects(infoSink, linkerObjects, unitLinkerObjects); + ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end()); +} + +#endif + +static const TString& getNameForIdMap(TIntermSymbol* symbol) +{ + TShaderInterface si = symbol->getType().getShaderInterface(); + if (si == EsiNone) + return symbol->getName(); + else + return symbol->getType().getTypeName(); +} + + + +// Traverser that seeds an ID map with all built-ins, and tracks the +// maximum ID used. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TBuiltInIdTraverser : public TIntermTraverser { +public: + TBuiltInIdTraverser(TIdMaps& idMaps) : idMaps(idMaps), maxId(0) { } + // If it's a built in, add it to the map. + // Track the max ID. + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + if (qualifier.builtIn != EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + idMaps[si][getNameForIdMap(symbol)] = symbol->getId(); + } + maxId = std::max(maxId, symbol->getId()); + } + int getMaxId() const { return maxId; } +protected: + TBuiltInIdTraverser(TBuiltInIdTraverser&); + TBuiltInIdTraverser& operator=(TBuiltInIdTraverser&); + TIdMaps& idMaps; + int maxId; +}; + +// Traverser that seeds an ID map with non-builtins. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TUserIdTraverser : public TIntermTraverser { +public: + TUserIdTraverser(TIdMaps& idMaps) : idMaps(idMaps) { } + // If its a non-built-in global, add it to the map. + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + if (qualifier.builtIn == EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + idMaps[si][getNameForIdMap(symbol)] = symbol->getId(); + } + } + +protected: + TUserIdTraverser(TUserIdTraverser&); + TUserIdTraverser& operator=(TUserIdTraverser&); + TIdMaps& idMaps; // over biggest id +}; + +// Initialize the the ID map with what we know of 'this' AST. +void TIntermediate::seedIdMap(TIdMaps& idMaps, int& maxId) +{ + // all built-ins everywhere need to align on IDs and contribute to the max ID + TBuiltInIdTraverser builtInIdTraverser(idMaps); + treeRoot->traverse(&builtInIdTraverser); + maxId = builtInIdTraverser.getMaxId(); + + // user variables in the linker object list need to align on ids + TUserIdTraverser userIdTraverser(idMaps); + findLinkerObjects()->traverse(&userIdTraverser); +} + +// Traverser to map an AST ID to what was known from the seeding AST. +// (It would be nice to put this in a function, but that causes warnings +// on having no bodies for the copy-constructor/operator=.) +class TRemapIdTraverser : public TIntermTraverser { +public: + TRemapIdTraverser(const TIdMaps& idMaps, int idShift) : idMaps(idMaps), idShift(idShift) { } + // Do the mapping: + // - if the same symbol, adopt the 'this' ID + // - otherwise, ensure a unique ID by shifting to a new space + virtual void visitSymbol(TIntermSymbol* symbol) + { + const TQualifier& qualifier = symbol->getType().getQualifier(); + bool remapped = false; + if (qualifier.isLinkable() || qualifier.builtIn != EbvNone) { + TShaderInterface si = symbol->getType().getShaderInterface(); + auto it = idMaps[si].find(getNameForIdMap(symbol)); + if (it != idMaps[si].end()) { + symbol->changeId(it->second); + remapped = true; + } + } + if (!remapped) + symbol->changeId(symbol->getId() + idShift); + } +protected: + TRemapIdTraverser(TRemapIdTraverser&); + TRemapIdTraverser& operator=(TRemapIdTraverser&); + const TIdMaps& idMaps; + int idShift; +}; + +void TIntermediate::remapIds(const TIdMaps& idMaps, int idShift, TIntermediate& unit) +{ + // Remap all IDs to either share or be unique, as dictated by the idMap and idShift. + TRemapIdTraverser idTraverser(idMaps, idShift); + unit.getTreeRoot()->traverse(&idTraverser); +} + +// +// Merge the function bodies and global-level initializers from unitGlobals into globals. +// Will error check duplication of function bodies for the same signature. +// +void TIntermediate::mergeBodies(TInfoSink& infoSink, TIntermSequence& globals, const TIntermSequence& unitGlobals) +{ + // TODO: link-time performance: Processing in alphabetical order will be faster + + // Error check the global objects, not including the linker objects + for (unsigned int child = 0; child < globals.size() - 1; ++child) { + for (unsigned int unitChild = 0; unitChild < unitGlobals.size() - 1; ++unitChild) { + TIntermAggregate* body = globals[child]->getAsAggregate(); + TIntermAggregate* unitBody = unitGlobals[unitChild]->getAsAggregate(); + if (body && unitBody && body->getOp() == EOpFunction && unitBody->getOp() == EOpFunction && body->getName() == unitBody->getName()) { + error(infoSink, "Multiple function bodies in multiple compilation units for the same signature in the same stage:"); + infoSink.info << " " << globals[child]->getAsAggregate()->getName() << "\n"; + } + } + } + + // Merge the global objects, just in front of the linker objects + globals.insert(globals.end() - 1, unitGlobals.begin(), unitGlobals.end() - 1); +} + +// +// Merge the linker objects from unitLinkerObjects into linkerObjects. +// Duplication is expected and filtered out, but contradictions are an error. +// +void TIntermediate::mergeLinkerObjects(TInfoSink& infoSink, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects) +{ + // Error check and merge the linker objects (duplicates should not be created) + std::size_t initialNumLinkerObjects = linkerObjects.size(); + for (unsigned int unitLinkObj = 0; unitLinkObj < unitLinkerObjects.size(); ++unitLinkObj) { + bool merge = true; + for (std::size_t linkObj = 0; linkObj < initialNumLinkerObjects; ++linkObj) { + TIntermSymbol* symbol = linkerObjects[linkObj]->getAsSymbolNode(); + TIntermSymbol* unitSymbol = unitLinkerObjects[unitLinkObj]->getAsSymbolNode(); + assert(symbol && unitSymbol); + + bool isSameSymbol = false; + // If they are both blocks in the same shader interface, + // match by the block-name, not the identifier name. + if (symbol->getType().getBasicType() == EbtBlock && unitSymbol->getType().getBasicType() == EbtBlock) { + if (symbol->getType().getShaderInterface() == unitSymbol->getType().getShaderInterface()) { + isSameSymbol = symbol->getType().getTypeName() == unitSymbol->getType().getTypeName(); + } + } + else if (symbol->getName() == unitSymbol->getName()) + isSameSymbol = true; + + if (isSameSymbol) { + // filter out copy + merge = false; + + // but if one has an initializer and the other does not, update + // the initializer + if (symbol->getConstArray().empty() && ! unitSymbol->getConstArray().empty()) + symbol->setConstArray(unitSymbol->getConstArray()); + + // Similarly for binding + if (! symbol->getQualifier().hasBinding() && unitSymbol->getQualifier().hasBinding()) + symbol->getQualifier().layoutBinding = unitSymbol->getQualifier().layoutBinding; + + // Update implicit array sizes + mergeImplicitArraySizes(symbol->getWritableType(), unitSymbol->getType()); + + // Check for consistent types/qualification/initializers etc. + mergeErrorCheck(infoSink, *symbol, *unitSymbol, false); + } + // If different symbols, verify they arn't push_constant since there can only be one per stage + else if (symbol->getQualifier().isPushConstant() && unitSymbol->getQualifier().isPushConstant()) + error(infoSink, "Only one push_constant block is allowed per stage"); + } + if (merge) + linkerObjects.push_back(unitLinkerObjects[unitLinkObj]); + } +} + +// TODO 4.5 link functionality: cull distance array size checking + +// Recursively merge the implicit array sizes through the objects' respective type trees. +void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType) +{ + if (type.isUnsizedArray()) { + if (unitType.isUnsizedArray()) { + type.updateImplicitArraySize(unitType.getImplicitArraySize()); + if (unitType.isArrayVariablyIndexed()) + type.setArrayVariablyIndexed(); + } else if (unitType.isSizedArray()) + type.changeOuterArraySize(unitType.getOuterArraySize()); + } + + // Type mismatches are caught and reported after this, just be careful for now. + if (! type.isStruct() || ! unitType.isStruct() || type.getStruct()->size() != unitType.getStruct()->size()) + return; + + for (int i = 0; i < (int)type.getStruct()->size(); ++i) + mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type); +} + +// +// Compare two global objects from two compilation units and see if they match +// well enough. Rules can be different for intra- vs. cross-stage matching. +// +// This function only does one of intra- or cross-stage matching per call. +// +void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage) +{ +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + bool writeTypeComparison = false; + + // Types have to match + if (symbol.getType() != unitSymbol.getType()) { + // but, we make an exception if one is an implicit array and the other is sized + if (! (symbol.getType().isArray() && unitSymbol.getType().isArray() && + symbol.getType().sameElementType(unitSymbol.getType()) && + (symbol.getType().isUnsizedArray() || unitSymbol.getType().isUnsizedArray()))) { + error(infoSink, "Types must match:"); + writeTypeComparison = true; + } + } + + // Qualifiers have to (almost) match + + // Storage... + if (symbol.getQualifier().storage != unitSymbol.getQualifier().storage) { + error(infoSink, "Storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Uniform and buffer blocks must either both have an instance name, or + // must both be anonymous. The names don't need to match though. + if (symbol.getQualifier().isUniformOrBuffer() && + (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()))) { + error(infoSink, "Matched Uniform or Storage blocks must all be anonymous," + " or all be named:"); + writeTypeComparison = true; + } + + if (symbol.getQualifier().storage == unitSymbol.getQualifier().storage && + (IsAnonymous(symbol.getName()) != IsAnonymous(unitSymbol.getName()) || + (!IsAnonymous(symbol.getName()) && symbol.getName() != unitSymbol.getName()))) { + warn(infoSink, "Matched shader interfaces are using different instance names."); + writeTypeComparison = true; + } + + // Precision... + if (symbol.getQualifier().precision != unitSymbol.getQualifier().precision) { + error(infoSink, "Precision qualifiers must match:"); + writeTypeComparison = true; + } + + // Invariance... + if (! crossStage && symbol.getQualifier().invariant != unitSymbol.getQualifier().invariant) { + error(infoSink, "Presence of invariant qualifier must match:"); + writeTypeComparison = true; + } + + // Precise... + if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) { + error(infoSink, "Presence of precise qualifier must match:"); + writeTypeComparison = true; + } + + // Auxiliary and interpolation... + if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid || + symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth || + symbol.getQualifier().flat != unitSymbol.getQualifier().flat || + symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() || + symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() || + symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) { + error(infoSink, "Interpolation and auxiliary storage qualifiers must match:"); + writeTypeComparison = true; + } + + // Memory... + if (symbol.getQualifier().coherent != unitSymbol.getQualifier().coherent || + symbol.getQualifier().devicecoherent != unitSymbol.getQualifier().devicecoherent || + symbol.getQualifier().queuefamilycoherent != unitSymbol.getQualifier().queuefamilycoherent || + symbol.getQualifier().workgroupcoherent != unitSymbol.getQualifier().workgroupcoherent || + symbol.getQualifier().subgroupcoherent != unitSymbol.getQualifier().subgroupcoherent || + symbol.getQualifier().shadercallcoherent!= unitSymbol.getQualifier().shadercallcoherent || + symbol.getQualifier().nonprivate != unitSymbol.getQualifier().nonprivate || + symbol.getQualifier().volatil != unitSymbol.getQualifier().volatil || + symbol.getQualifier().restrict != unitSymbol.getQualifier().restrict || + symbol.getQualifier().readonly != unitSymbol.getQualifier().readonly || + symbol.getQualifier().writeonly != unitSymbol.getQualifier().writeonly) { + error(infoSink, "Memory qualifiers must match:"); + writeTypeComparison = true; + } + + // Layouts... + // TODO: 4.4 enhanced layouts: Generalize to include offset/align: current spec + // requires separate user-supplied offset from actual computed offset, but + // current implementation only has one offset. + if (symbol.getQualifier().layoutMatrix != unitSymbol.getQualifier().layoutMatrix || + symbol.getQualifier().layoutPacking != unitSymbol.getQualifier().layoutPacking || + symbol.getQualifier().layoutLocation != unitSymbol.getQualifier().layoutLocation || + symbol.getQualifier().layoutComponent != unitSymbol.getQualifier().layoutComponent || + symbol.getQualifier().layoutIndex != unitSymbol.getQualifier().layoutIndex || + symbol.getQualifier().layoutBinding != unitSymbol.getQualifier().layoutBinding || + (symbol.getQualifier().hasBinding() && (symbol.getQualifier().layoutOffset != unitSymbol.getQualifier().layoutOffset))) { + error(infoSink, "Layout qualification must match:"); + writeTypeComparison = true; + } + + // Initializers have to match, if both are present, and if we don't already know the types don't match + if (! writeTypeComparison) { + if (! symbol.getConstArray().empty() && ! unitSymbol.getConstArray().empty()) { + if (symbol.getConstArray() != unitSymbol.getConstArray()) { + error(infoSink, "Initializers must match:"); + infoSink.info << " " << symbol.getName() << "\n"; + } + } + } + + if (writeTypeComparison) { + infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus "; + if (symbol.getName() != unitSymbol.getName()) + infoSink.info << unitSymbol.getName() << ": "; + + infoSink.info << "\"" << unitSymbol.getType().getCompleteString() << "\"\n"; + } +#endif +} + +// +// Do final link-time error checking of a complete (merged) intermediate representation. +// (Much error checking was done during merging). +// +// Also, lock in defaults of things not set, including array sizes. +// +void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled) +{ + if (getTreeRoot() == nullptr) + return; + + if (numEntryPoints < 1) { + if (getSource() == EShSourceGlsl) + error(infoSink, "Missing entry point: Each stage requires one entry point"); + else + warn(infoSink, "Entry point not found"); + } + + // recursion and missing body checking + checkCallGraphCycles(infoSink); + checkCallGraphBodies(infoSink, keepUncalled); + + // overlap/alias/missing I/O, etc. + inOutLocationCheck(infoSink); + +#ifndef GLSLANG_WEB + if (getNumPushConstants() > 1) + error(infoSink, "Only one push_constant block is allowed per stage"); + + // invocations + if (invocations == TQualifier::layoutNotSet) + invocations = 1; + + if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex")) + error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); + if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex")) + error(infoSink, "Can only use one of gl_CullDistance or gl_ClipVertex (gl_ClipDistance is preferred)"); + + if (userOutputUsed() && (inIoAccessed("gl_FragColor") || inIoAccessed("gl_FragData"))) + error(infoSink, "Cannot use gl_FragColor or gl_FragData when using user-defined outputs"); + if (inIoAccessed("gl_FragColor") && inIoAccessed("gl_FragData")) + error(infoSink, "Cannot use both gl_FragColor and gl_FragData"); + + for (size_t b = 0; b < xfbBuffers.size(); ++b) { + if (xfbBuffers[b].contains64BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 8); + else if (xfbBuffers[b].contains32BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 4); + else if (xfbBuffers[b].contains16BitType) + RoundToPow2(xfbBuffers[b].implicitStride, 2); + + // "It is a compile-time or link-time error to have + // any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or + // in different compilation units. While xfb_stride can be declared multiple times for the same buffer, it is a + // compile-time or link-time error to have different values specified for the stride for the same buffer." + if (xfbBuffers[b].stride != TQualifier::layoutXfbStrideEnd && xfbBuffers[b].implicitStride > xfbBuffers[b].stride) { + error(infoSink, "xfb_stride is too small to hold all buffer entries:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << ", minimum stride needed: " << xfbBuffers[b].implicitStride << "\n"; + } + if (xfbBuffers[b].stride == TQualifier::layoutXfbStrideEnd) + xfbBuffers[b].stride = xfbBuffers[b].implicitStride; + + // "If the buffer is capturing any + // outputs with double-precision or 64-bit integer components, the stride must be a multiple of 8, otherwise it must be a + // multiple of 4, or a compile-time or link-time error results." + if (xfbBuffers[b].contains64BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 8)) { + error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) { + error(infoSink, "xfb_stride must be multiple of 4:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } + // "If the buffer is capturing any + // outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2" + else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) { + error(infoSink, "xfb_stride must be multiple of 2 for buffer holding a half float or 16-bit integer:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n"; + } + + // "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the + // implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents." + if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) { + error(infoSink, "xfb_stride is too large:"); + infoSink.info.prefix(EPrefixError); + infoSink.info << " xfb_buffer " << (unsigned int)b << ", components (1/4 stride) needed are " << xfbBuffers[b].stride/4 << ", gl_MaxTransformFeedbackInterleavedComponents is " << resources.maxTransformFeedbackInterleavedComponents << "\n"; + } + } + + switch (language) { + case EShLangVertex: + break; + case EShLangTessControl: + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify an output layout(vertices=...)"); + break; + case EShLangTessEvaluation: + if (getSource() == EShSourceGlsl) { + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (vertexSpacing == EvsNone) + vertexSpacing = EvsEqual; + if (vertexOrder == EvoNone) + vertexOrder = EvoCcw; + } + break; + case EShLangGeometry: + if (inputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an input layout primitive"); + if (outputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an output layout primitive"); + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); + break; + case EShLangFragment: + // for GL_ARB_post_depth_coverage, EarlyFragmentTest is set automatically in + // ParseHelper.cpp. So if we reach here, this must be GL_EXT_post_depth_coverage + // requiring explicit early_fragment_tests + if (getPostDepthCoverage() && !getEarlyFragmentTests()) + error(infoSink, "post_depth_coverage requires early_fragment_tests"); + break; + case EShLangCompute: + break; + case EShLangRayGen: + case EShLangIntersect: + case EShLangAnyHit: + case EShLangClosestHit: + case EShLangMiss: + case EShLangCallable: + if (numShaderRecordBlocks > 1) + error(infoSink, "Only one shaderRecordNV buffer block is allowed per stage"); + break; + case EShLangMeshNV: + // NV_mesh_shader doesn't allow use of both single-view and per-view builtins. + if (inIoAccessed("gl_Position") && inIoAccessed("gl_PositionPerViewNV")) + error(infoSink, "Can only use one of gl_Position or gl_PositionPerViewNV"); + if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipDistancePerViewNV")) + error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipDistancePerViewNV"); + if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_CullDistancePerViewNV")) + error(infoSink, "Can only use one of gl_CullDistance or gl_CullDistancePerViewNV"); + if (inIoAccessed("gl_Layer") && inIoAccessed("gl_LayerPerViewNV")) + error(infoSink, "Can only use one of gl_Layer or gl_LayerPerViewNV"); + if (inIoAccessed("gl_ViewportMask") && inIoAccessed("gl_ViewportMaskPerViewNV")) + error(infoSink, "Can only use one of gl_ViewportMask or gl_ViewportMaskPerViewNV"); + if (outputPrimitive == ElgNone) + error(infoSink, "At least one shader must specify an output layout primitive"); + if (vertices == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_vertices = value)"); + if (primitives == TQualifier::layoutNotSet) + error(infoSink, "At least one shader must specify a layout(max_primitives = value)"); + // fall through + case EShLangTaskNV: + if (numTaskNVBlocks > 1) + error(infoSink, "Only one taskNV interface block is allowed per shader"); + break; + default: + error(infoSink, "Unknown Stage."); + break; + } + + // Process the tree for any node-specific work. + class TFinalLinkTraverser : public TIntermTraverser { + public: + TFinalLinkTraverser() { } + virtual ~TFinalLinkTraverser() { } + + virtual void visitSymbol(TIntermSymbol* symbol) + { + // Implicitly size arrays. + // If an unsized array is left as unsized, it effectively + // becomes run-time sized. + symbol->getWritableType().adoptImplicitArraySizes(false); + } + } finalLinkTraverser; + + treeRoot->traverse(&finalLinkTraverser); +#endif +} + +// +// See if the call graph contains any static recursion, which is disallowed +// by the specification. +// +void TIntermediate::checkCallGraphCycles(TInfoSink& infoSink) +{ + // Clear fields we'll use for this. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + call->visited = false; + call->currentPath = false; + call->errorGiven = false; + } + + // + // Loop, looking for a new connected subgraph. One subgraph is handled per loop iteration. + // + + TCall* newRoot; + do { + // See if we have unvisited parts of the graph. + newRoot = 0; + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (! call->visited) { + newRoot = &(*call); + break; + } + } + + // If not, we are done. + if (! newRoot) + break; + + // Otherwise, we found a new subgraph, process it: + // See what all can be reached by this new root, and if any of + // that is recursive. This is done by depth-first traversals, seeing + // if a new call is found that was already in the currentPath (a back edge), + // thereby detecting recursion. + std::list stack; + newRoot->currentPath = true; // currentPath will be true iff it is on the stack + stack.push_back(newRoot); + while (! stack.empty()) { + // get a caller + TCall* call = stack.back(); + + // Add to the stack just one callee. + // This algorithm always terminates, because only !visited and !currentPath causes a push + // and all pushes change currentPath to true, and all pops change visited to true. + TGraph::iterator child = callGraph.begin(); + for (; child != callGraph.end(); ++child) { + + // If we already visited this node, its whole subgraph has already been processed, so skip it. + if (child->visited) + continue; + + if (call->callee == child->caller) { + if (child->currentPath) { + // Then, we found a back edge + if (! child->errorGiven) { + error(infoSink, "Recursion detected:"); + infoSink.info << " " << call->callee << " calling " << child->callee << "\n"; + child->errorGiven = true; + recursive = true; + } + } else { + child->currentPath = true; + stack.push_back(&(*child)); + break; + } + } + } + if (child == callGraph.end()) { + // no more callees, we bottomed out, never look at this node again + stack.back()->currentPath = false; + stack.back()->visited = true; + stack.pop_back(); + } + } // end while, meaning nothing left to process in this subtree + + } while (newRoot); // redundant loop check; should always exit via the 'break' above +} + +// +// See which functions are reachable from the entry point and which have bodies. +// Reachable ones with missing bodies are errors. +// Unreachable bodies are dead code. +// +void TIntermediate::checkCallGraphBodies(TInfoSink& infoSink, bool keepUncalled) +{ + // Clear fields we'll use for this. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + call->visited = false; + call->calleeBodyPosition = -1; + } + + // The top level of the AST includes function definitions (bodies). + // Compare these to function calls in the call graph. + // We'll end up knowing which have bodies, and if so, + // how to map the call-graph node to the location in the AST. + TIntermSequence &functionSequence = getTreeRoot()->getAsAggregate()->getSequence(); + std::vector reachable(functionSequence.size(), true); // so that non-functions are reachable + for (int f = 0; f < (int)functionSequence.size(); ++f) { + glslang::TIntermAggregate* node = functionSequence[f]->getAsAggregate(); + if (node && (node->getOp() == glslang::EOpFunction)) { + if (node->getName().compare(getEntryPointMangledName().c_str()) != 0) + reachable[f] = false; // so that function bodies are unreachable, until proven otherwise + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->callee == node->getName()) + call->calleeBodyPosition = f; + } + } + } + + // Start call-graph traversal by visiting the entry point nodes. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->caller.compare(getEntryPointMangledName().c_str()) == 0) + call->visited = true; + } + + // Propagate 'visited' through the call-graph to every part of the graph it + // can reach (seeded with the entry-point setting above). + bool changed; + do { + changed = false; + for (auto call1 = callGraph.begin(); call1 != callGraph.end(); ++call1) { + if (call1->visited) { + for (TGraph::iterator call2 = callGraph.begin(); call2 != callGraph.end(); ++call2) { + if (! call2->visited) { + if (call1->callee == call2->caller) { + changed = true; + call2->visited = true; + } + } + } + } + } + } while (changed); + + // Any call-graph node set to visited but without a callee body is an error. + for (TGraph::iterator call = callGraph.begin(); call != callGraph.end(); ++call) { + if (call->visited) { + if (call->calleeBodyPosition == -1) { + error(infoSink, "No function definition (body) found: "); + infoSink.info << " " << call->callee << "\n"; + } else + reachable[call->calleeBodyPosition] = true; + } + } + + // Bodies in the AST not reached by the call graph are dead; + // clear them out, since they can't be reached and also can't + // be translated further due to possibility of being ill defined. + if (! keepUncalled) { + for (int f = 0; f < (int)functionSequence.size(); ++f) { + if (! reachable[f]) + functionSequence[f] = nullptr; + } + functionSequence.erase(std::remove(functionSequence.begin(), functionSequence.end(), nullptr), functionSequence.end()); + } +} + +// +// Satisfy rules for location qualifiers on inputs and outputs +// +void TIntermediate::inOutLocationCheck(TInfoSink& infoSink) +{ + // ES 3.0 requires all outputs to have location qualifiers if there is more than one output + bool fragOutWithNoLocation = false; + int numFragOut = 0; + + // TODO: linker functionality: location collision checking + + TIntermSequence& linkObjects = findLinkerObjects()->getSequence(); + for (size_t i = 0; i < linkObjects.size(); ++i) { + const TType& type = linkObjects[i]->getAsTyped()->getType(); + const TQualifier& qualifier = type.getQualifier(); + if (language == EShLangFragment) { + if (qualifier.storage == EvqVaryingOut && qualifier.builtIn == EbvNone) { + ++numFragOut; + if (!qualifier.hasAnyLocation()) + fragOutWithNoLocation = true; + } + } + } + + if (isEsProfile()) { + if (numFragOut > 1 && fragOutWithNoLocation) + error(infoSink, "when more than one fragment shader output, all must have location qualifiers"); + } +} + +TIntermAggregate* TIntermediate::findLinkerObjects() const +{ + // Get the top-level globals + TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence(); + + // Get the last member of the sequences, expected to be the linker-object lists + assert(globals.back()->getAsAggregate()->getOp() == EOpLinkerObjects); + + return globals.back()->getAsAggregate(); +} + +// See if a variable was both a user-declared output and used. +// Note: the spec discusses writing to one, but this looks at read or write, which +// is more useful, and perhaps the spec should be changed to reflect that. +bool TIntermediate::userOutputUsed() const +{ + const TIntermSequence& linkerObjects = findLinkerObjects()->getSequence(); + + bool found = false; + for (size_t i = 0; i < linkerObjects.size(); ++i) { + const TIntermSymbol& symbolNode = *linkerObjects[i]->getAsSymbolNode(); + if (symbolNode.getQualifier().storage == EvqVaryingOut && + symbolNode.getName().compare(0, 3, "gl_") != 0 && + inIoAccessed(symbolNode.getName())) { + found = true; + break; + } + } + + return found; +} + +// Accumulate locations used for inputs, outputs, and uniforms, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +// typeCollision is set to true if there is no direct collision, but the types in the same location +// are different. +// +int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& type, bool& typeCollision) +{ + typeCollision = false; + + int set; + if (qualifier.isPipeInput()) + set = 0; + else if (qualifier.isPipeOutput()) + set = 1; + else if (qualifier.storage == EvqUniform) + set = 2; + else if (qualifier.storage == EvqBuffer) + set = 3; + else + return -1; + + int size; + if (qualifier.isUniformOrBuffer() || qualifier.isTaskMemory()) { + if (type.isSizedArray()) + size = type.getCumulativeArraySize(); + else + size = 1; + } else { + // Strip off the outer array dimension for those having an extra one. + if (type.isArray() && qualifier.isArrayedIo(language)) { + TType elementType(type, 0); + size = computeTypeLocationSize(elementType, language); + } else + size = computeTypeLocationSize(type, language); + } + + // Locations, and components within locations. + // + // Almost always, dealing with components means a single location is involved. + // The exception is a dvec3. From the spec: + // + // "A dvec3 will consume all four components of the first location and components 0 and 1 of + // the second location. This leaves components 2 and 3 available for other component-qualified + // declarations." + // + // That means, without ever mentioning a component, a component range + // for a different location gets specified, if it's not a vertex shader input. (!) + // (A vertex shader input will show using only one location, even for a dvec3/4.) + // + // So, for the case of dvec3, we need two independent ioRanges. + + int collision = -1; // no collision +#ifndef GLSLANG_WEB + if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 && + (qualifier.isPipeInput() || qualifier.isPipeOutput())) { + // Dealing with dvec3 in/out split across two locations. + // Need two io-ranges. + // The case where the dvec3 doesn't start at component 0 was previously caught as overflow. + + // First range: + TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation); + TRange componentRange(0, 3); + TIoRange range(locationRange, componentRange, type.getBasicType(), 0); + + // check for collisions + collision = checkLocationRange(set, range, type, typeCollision); + if (collision < 0) { + usedIo[set].push_back(range); + + // Second range: + TRange locationRange2(qualifier.layoutLocation + 1, qualifier.layoutLocation + 1); + TRange componentRange2(0, 1); + TIoRange range2(locationRange2, componentRange2, type.getBasicType(), 0); + + // check for collisions + collision = checkLocationRange(set, range2, type, typeCollision); + if (collision < 0) + usedIo[set].push_back(range2); + } + } else +#endif + { + // Not a dvec3 in/out split across two locations, generic path. + // Need a single IO-range block. + + TRange locationRange(qualifier.layoutLocation, qualifier.layoutLocation + size - 1); + TRange componentRange(0, 3); + if (qualifier.hasComponent() || type.getVectorSize() > 0) { + int consumedComponents = type.getVectorSize() * (type.getBasicType() == EbtDouble ? 2 : 1); + if (qualifier.hasComponent()) + componentRange.start = qualifier.layoutComponent; + componentRange.last = componentRange.start + consumedComponents - 1; + } + + // combine location and component ranges + TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0); + + // check for collisions, except for vertex inputs on desktop targeting OpenGL + if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0) + collision = checkLocationRange(set, range, type, typeCollision); + + if (collision < 0) + usedIo[set].push_back(range); + } + + return collision; +} + +// Compare a new (the passed in) 'range' against the existing set, and see +// if there are any collisions. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::checkLocationRange(int set, const TIoRange& range, const TType& type, bool& typeCollision) +{ + for (size_t r = 0; r < usedIo[set].size(); ++r) { + if (range.overlap(usedIo[set][r])) { + // there is a collision; pick one + return std::max(range.location.start, usedIo[set][r].location.start); + } else if (range.location.overlap(usedIo[set][r].location) && type.getBasicType() != usedIo[set][r].basicType) { + // aliased-type mismatch + typeCollision = true; + return std::max(range.location.start, usedIo[set][r].location.start); + } + } + + return -1; // no collision +} + +// Accumulate bindings and offsets, and check for collisions +// as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addUsedOffsets(int binding, int offset, int numOffsets) +{ + TRange bindingRange(binding, binding); + TRange offsetRange(offset, offset + numOffsets - 1); + TOffsetRange range(bindingRange, offsetRange); + + // check for collisions, except for vertex inputs on desktop + for (size_t r = 0; r < usedAtomics.size(); ++r) { + if (range.overlap(usedAtomics[r])) { + // there is a collision; pick one + return std::max(offset, usedAtomics[r].offset.start); + } + } + + usedAtomics.push_back(range); + + return -1; // no collision +} + +// Accumulate used constant_id values. +// +// Return false is one was already used. +bool TIntermediate::addUsedConstantId(int id) +{ + if (usedConstantId.find(id) != usedConstantId.end()) + return false; + + usedConstantId.insert(id); + + return true; +} + +// Recursively figure out how many locations are used up by an input or output type. +// Return the size of type, as measured by "locations". +int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage) +{ + // "If the declared input is an array of size n and each element takes m locations, it will be assigned m * n + // consecutive locations..." + if (type.isArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + // TODO: are there valid cases of having an unsized array with a location? If so, running this code too early. + TType elementType(type, 0); + if (type.isSizedArray() && !type.getQualifier().isPerView()) + return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage); + else { +#ifndef GLSLANG_WEB + // unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];" + elementType.getQualifier().perViewNV = false; +#endif + return computeTypeLocationSize(elementType, stage); + } + } + + // "The locations consumed by block and structure members are determined by applying the rules above + // recursively..." + if (type.isStruct()) { + int size = 0; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + size += computeTypeLocationSize(memberType, stage); + } + return size; + } + + // ES: "If a shader input is any scalar or vector type, it will consume a single location." + + // Desktop: "If a vertex shader input is any scalar or vector type, it will consume a single location. If a non-vertex + // shader input is a scalar or vector type other than dvec3 or dvec4, it will consume a single location, while + // types dvec3 or dvec4 will consume two consecutive locations. Inputs of type double and dvec2 will + // consume only a single location, in all stages." + if (type.isScalar()) + return 1; + if (type.isVector()) { + if (stage == EShLangVertex && type.getQualifier().isPipeInput()) + return 1; + if (type.getBasicType() == EbtDouble && type.getVectorSize() > 2) + return 2; + else + return 1; + } + + // "If the declared input is an n x m single- or double-precision matrix, ... + // The number of locations assigned for each matrix will be the same as + // for an n-element array of m-component vectors..." + if (type.isMatrix()) { + TType columnType(type, 0); + return type.getMatrixCols() * computeTypeLocationSize(columnType, stage); + } + + assert(0); + return 1; +} + +// Same as computeTypeLocationSize but for uniforms +int TIntermediate::computeTypeUniformLocationSize(const TType& type) +{ + // "Individual elements of a uniform array are assigned + // consecutive locations with the first element taking location + // location." + if (type.isArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + TType elementType(type, 0); + if (type.isSizedArray()) { + return type.getOuterArraySize() * computeTypeUniformLocationSize(elementType); + } else { + // TODO: are there valid cases of having an implicitly-sized array with a location? If so, running this code too early. + return computeTypeUniformLocationSize(elementType); + } + } + + // "Each subsequent inner-most member or element gets incremental + // locations for the entire structure or array." + if (type.isStruct()) { + int size = 0; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + size += computeTypeUniformLocationSize(memberType); + } + return size; + } + + return 1; +} + +#ifndef GLSLANG_WEB + +// Accumulate xfb buffer ranges and check for collisions as the accumulation is done. +// +// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value. +// +int TIntermediate::addXfbBufferOffset(const TType& type) +{ + const TQualifier& qualifier = type.getQualifier(); + + assert(qualifier.hasXfbOffset() && qualifier.hasXfbBuffer()); + TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer]; + + // compute the range + unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType); + buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size); + TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1); + + // check for collisions + for (size_t r = 0; r < buffer.ranges.size(); ++r) { + if (range.overlap(buffer.ranges[r])) { + // there is a collision; pick an example to return + return std::max(range.start, buffer.ranges[r].start); + } + } + + buffer.ranges.push_back(range); + + return -1; // no collision +} + +// Recursively figure out how many bytes of xfb buffer are used by the given type. +// Return the size of type, in bytes. +// Sets contains64BitType to true if the type contains a 64-bit data type. +// Sets contains32BitType to true if the type contains a 32-bit data type. +// Sets contains16BitType to true if the type contains a 16-bit data type. +// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling. +unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const +{ + // "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8. + // ...within the qualified entity, subsequent components are each + // assigned, in order, to the next available offset aligned to a multiple of + // that component's size. Aggregate types are flattened down to the component + // level to get this sequence of components." + + if (type.isSizedArray()) { + // TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + // Unsized array use to xfb should be a compile error. + TType elementType(type, 0); + return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType); + } + + if (type.isStruct()) { + unsigned int size = 0; + bool structContains64BitType = false; + bool structContains32BitType = false; + bool structContains16BitType = false; + for (int member = 0; member < (int)type.getStruct()->size(); ++member) { + TType memberType(type, member); + // "... if applied to + // an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8, + // and the space taken in the buffer will be a multiple of 8." + bool memberContains64BitType = false; + bool memberContains32BitType = false; + bool memberContains16BitType = false; + int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType); + if (memberContains64BitType) { + structContains64BitType = true; + RoundToPow2(size, 8); + } else if (memberContains32BitType) { + structContains32BitType = true; + RoundToPow2(size, 4); + } else if (memberContains16BitType) { + structContains16BitType = true; + RoundToPow2(size, 2); + } + size += memberSize; + } + + if (structContains64BitType) { + contains64BitType = true; + RoundToPow2(size, 8); + } else if (structContains32BitType) { + contains32BitType = true; + RoundToPow2(size, 4); + } else if (structContains16BitType) { + contains16BitType = true; + RoundToPow2(size, 2); + } + return size; + } + + int numComponents; + if (type.isScalar()) + numComponents = 1; + else if (type.isVector()) + numComponents = type.getVectorSize(); + else if (type.isMatrix()) + numComponents = type.getMatrixCols() * type.getMatrixRows(); + else { + assert(0); + numComponents = 1; + } + + if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) { + contains64BitType = true; + return 8 * numComponents; + } else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) { + contains16BitType = true; + return 2 * numComponents; + } else if (type.getBasicType() == EbtInt8 || type.getBasicType() == EbtUint8) + return numComponents; + else { + contains32BitType = true; + return 4 * numComponents; + } +} + +#endif + +const int baseAlignmentVec4Std140 = 16; + +// Return the size and alignment of a component of the given type. +// The size is returned in the 'size' parameter +// Return value is the alignment.. +int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size) +{ +#ifdef GLSLANG_WEB + size = 4; return 4; +#endif + + switch (type.getBasicType()) { + case EbtInt64: + case EbtUint64: + case EbtDouble: size = 8; return 8; + case EbtFloat16: size = 2; return 2; + case EbtInt8: + case EbtUint8: size = 1; return 1; + case EbtInt16: + case EbtUint16: size = 2; return 2; + case EbtReference: size = 8; return 8; + default: size = 4; return 4; + } +} + +// Implement base-alignment and size rules from section 7.6.2.2 Standard Uniform Block Layout +// Operates recursively. +// +// If std140 is true, it does the rounding up to vec4 size required by std140, +// otherwise it does not, yielding std430 rules. +// +// The size is returned in the 'size' parameter +// +// The stride is only non-0 for arrays or matrices, and is the stride of the +// top-level object nested within the type. E.g., for an array of matrices, +// it is the distances needed between matrices, despite the rules saying the +// stride comes from the flattening down to vectors. +// +// Return value is the alignment of the type. +int TIntermediate::getBaseAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor) +{ + int alignment; + + bool std140 = layoutPacking == glslang::ElpStd140; + // When using the std140 storage layout, structures will be laid out in buffer + // storage with its members stored in monotonically increasing order based on their + // location in the declaration. A structure and each structure member have a base + // offset and a base alignment, from which an aligned offset is computed by rounding + // the base offset up to a multiple of the base alignment. The base offset of the first + // member of a structure is taken from the aligned offset of the structure itself. The + // base offset of all other structure members is derived by taking the offset of the + // last basic machine unit consumed by the previous member and adding one. Each + // structure member is stored in memory at its aligned offset. The members of a top- + // level uniform block are laid out in buffer storage by treating the uniform block as + // a structure with a base offset of zero. + // + // 1. If the member is a scalar consuming N basic machine units, the base alignment is N. + // + // 2. If the member is a two- or four-component vector with components consuming N basic + // machine units, the base alignment is 2N or 4N, respectively. + // + // 3. If the member is a three-component vector with components consuming N + // basic machine units, the base alignment is 4N. + // + // 4. If the member is an array of scalars or vectors, the base alignment and array + // stride are set to match the base alignment of a single array element, according + // to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The + // array may have padding at the end; the base offset of the member following + // the array is rounded up to the next multiple of the base alignment. + // + // 5. If the member is a column-major matrix with C columns and R rows, the + // matrix is stored identically to an array of C column vectors with R + // components each, according to rule (4). + // + // 6. If the member is an array of S column-major matrices with C columns and + // R rows, the matrix is stored identically to a row of S X C column vectors + // with R components each, according to rule (4). + // + // 7. If the member is a row-major matrix with C columns and R rows, the matrix + // is stored identically to an array of R row vectors with C components each, + // according to rule (4). + // + // 8. If the member is an array of S row-major matrices with C columns and R + // rows, the matrix is stored identically to a row of S X R row vectors with C + // components each, according to rule (4). + // + // 9. If the member is a structure, the base alignment of the structure is N , where + // N is the largest base alignment value of any of its members, and rounded + // up to the base alignment of a vec4. The individual members of this substructure + // are then assigned offsets by applying this set of rules recursively, + // where the base offset of the first member of the sub-structure is equal to the + // aligned offset of the structure. The structure may have padding at the end; + // the base offset of the member following the sub-structure is rounded up to + // the next multiple of the base alignment of the structure. + // + // 10. If the member is an array of S structures, the S elements of the array are laid + // out in order, according to rule (9). + // + // Assuming, for rule 10: The stride is the same as the size of an element. + + stride = 0; + int dummyStride; + + // rules 4, 6, 8, and 10 + if (type.isArray()) { + // TODO: perf: this might be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness + TType derefType(type, 0); + alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + stride = size; // uses full matrix size for stride of an array of matrices (not quite what rule 6/8, but what's expected) + // uses the assumption for rule 10 in the comment above + // use one element to represent the last member of SSBO which is unsized array + int arraySize = (type.isUnsizedArray() && (type.getOuterArraySize() == 0)) ? 1 : type.getOuterArraySize(); + size = stride * arraySize; + return alignment; + } + + // rule 9 + if (type.getBasicType() == EbtStruct) { + const TTypeList& memberList = *type.getStruct(); + + size = 0; + int maxAlignment = std140 ? baseAlignmentVec4Std140 : 0; + for (size_t m = 0; m < memberList.size(); ++m) { + int memberSize; + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix; + int memberAlignment = getBaseAlignment(*memberList[m].type, memberSize, dummyStride, layoutPacking, + (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor); + maxAlignment = std::max(maxAlignment, memberAlignment); + RoundToPow2(size, memberAlignment); + size += memberSize; + } + + // The structure may have padding at the end; the base offset of + // the member following the sub-structure is rounded up to the next + // multiple of the base alignment of the structure. + RoundToPow2(size, maxAlignment); + + return maxAlignment; + } + + // rule 1 + if (type.isScalar()) + return getBaseAlignmentScalar(type, size); + + // rules 2 and 3 + if (type.isVector()) { + int scalarAlign = getBaseAlignmentScalar(type, size); + switch (type.getVectorSize()) { + case 1: // HLSL has this, GLSL does not + return scalarAlign; + case 2: + size *= 2; + return 2 * scalarAlign; + default: + size *= type.getVectorSize(); + return 4 * scalarAlign; + } + } + + // rules 5 and 7 + if (type.isMatrix()) { + // rule 5: deref to row, not to column, meaning the size of vector is num columns instead of num rows + TType derefType(type, 0, rowMajor); + + alignment = getBaseAlignment(derefType, size, dummyStride, layoutPacking, rowMajor); + if (std140) + alignment = std::max(baseAlignmentVec4Std140, alignment); + RoundToPow2(size, alignment); + stride = size; // use intra-matrix stride for stride of a just a matrix + if (rowMajor) + size = stride * type.getMatrixRows(); + else + size = stride * type.getMatrixCols(); + + return alignment; + } + + assert(0); // all cases should be covered above + size = baseAlignmentVec4Std140; + return baseAlignmentVec4Std140; +} + +// To aid the basic HLSL rule about crossing vec4 boundaries. +bool TIntermediate::improperStraddle(const TType& type, int size, int offset) +{ + if (! type.isVector() || type.isArray()) + return false; + + return size <= 16 ? offset / 16 != (offset + size - 1) / 16 + : offset % 16 != 0; +} + +int TIntermediate::getScalarAlignment(const TType& type, int& size, int& stride, bool rowMajor) +{ + int alignment; + + stride = 0; + int dummyStride; + + if (type.isArray()) { + TType derefType(type, 0); + alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor); + + stride = size; + RoundToPow2(stride, alignment); + + size = stride * (type.getOuterArraySize() - 1) + size; + return alignment; + } + + if (type.getBasicType() == EbtStruct) { + const TTypeList& memberList = *type.getStruct(); + + size = 0; + int maxAlignment = 0; + for (size_t m = 0; m < memberList.size(); ++m) { + int memberSize; + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix; + int memberAlignment = getScalarAlignment(*memberList[m].type, memberSize, dummyStride, + (subMatrixLayout != ElmNone) ? (subMatrixLayout == ElmRowMajor) : rowMajor); + maxAlignment = std::max(maxAlignment, memberAlignment); + RoundToPow2(size, memberAlignment); + size += memberSize; + } + + return maxAlignment; + } + + if (type.isScalar()) + return getBaseAlignmentScalar(type, size); + + if (type.isVector()) { + int scalarAlign = getBaseAlignmentScalar(type, size); + + size *= type.getVectorSize(); + return scalarAlign; + } + + if (type.isMatrix()) { + TType derefType(type, 0, rowMajor); + + alignment = getScalarAlignment(derefType, size, dummyStride, rowMajor); + + stride = size; // use intra-matrix stride for stride of a just a matrix + if (rowMajor) + size = stride * type.getMatrixRows(); + else + size = stride * type.getMatrixCols(); + + return alignment; + } + + assert(0); // all cases should be covered above + size = 1; + return 1; +} + +int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor) +{ + if (layoutPacking == glslang::ElpScalar) { + return getScalarAlignment(type, size, stride, rowMajor); + } else { + return getBaseAlignment(type, size, stride, layoutPacking, rowMajor); + } +} + +// shared calculation by getOffset and getOffsets +void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize) +{ + int dummyStride; + + // modify just the children's view of matrix layout, if there is one for this member + TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix; + int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride, + parentType.getQualifier().layoutPacking, + subMatrixLayout != ElmNone + ? subMatrixLayout == ElmRowMajor + : parentType.getQualifier().layoutMatrix == ElmRowMajor); + RoundToPow2(offset, memberAlignment); +} + +// Lookup or calculate the offset of a block member, using the recursively +// defined block offset rules. +int TIntermediate::getOffset(const TType& type, int index) +{ + const TTypeList& memberList = *type.getStruct(); + + // Don't calculate offset if one is present, it could be user supplied + // and different than what would be calculated. That is, this is faster, + // but not just an optimization. + if (memberList[index].type->getQualifier().hasOffset()) + return memberList[index].type->getQualifier().layoutOffset; + + int memberSize = 0; + int offset = 0; + for (int m = 0; m <= index; ++m) { + updateOffset(type, *memberList[m].type, offset, memberSize); + + if (m < index) + offset += memberSize; + } + + return offset; +} + +// Calculate the block data size. +// Block arrayness is not taken into account, each element is backed by a separate buffer. +int TIntermediate::getBlockSize(const TType& blockType) +{ + const TTypeList& memberList = *blockType.getStruct(); + int lastIndex = (int)memberList.size() - 1; + int lastOffset = getOffset(blockType, lastIndex); + + int lastMemberSize; + int dummyStride; + getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride, + blockType.getQualifier().layoutPacking, + blockType.getQualifier().layoutMatrix == ElmRowMajor); + + return lastOffset + lastMemberSize; +} + +int TIntermediate::computeBufferReferenceTypeSize(const TType& type) +{ + assert(type.isReference()); + int size = getBlockSize(*type.getReferentType()); + + int align = type.getBufferReferenceAlignment(); + + if (align) { + size = (size + align - 1) & ~(align-1); + } + + return size; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/localintermediate.h b/src/third_party/glslang/glslang/MachineIndependent/localintermediate.h new file mode 100644 index 0000000..3cdc1f1 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/localintermediate.h @@ -0,0 +1,1052 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2016 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef _LOCAL_INTERMEDIATE_INCLUDED_ +#define _LOCAL_INTERMEDIATE_INCLUDED_ + +#include "../Include/intermediate.h" +#include "../Public/ShaderLang.h" +#include "Versions.h" + +#include +#include +#include +#include +#include + +class TInfoSink; + +namespace glslang { + +struct TMatrixSelector { + int coord1; // stay agnostic about column/row; this is parse order + int coord2; +}; + +typedef int TVectorSelector; + +const int MaxSwizzleSelectors = 4; + +template +class TSwizzleSelectors { +public: + TSwizzleSelectors() : size_(0) { } + + void push_back(selectorType comp) + { + if (size_ < MaxSwizzleSelectors) + components[size_++] = comp; + } + void resize(int s) + { + assert(s <= size_); + size_ = s; + } + int size() const { return size_; } + selectorType operator[](int i) const + { + assert(i < MaxSwizzleSelectors); + return components[i]; + } + +private: + int size_; + selectorType components[MaxSwizzleSelectors]; +}; + +// +// Some helper structures for TIntermediate. Their contents are encapsulated +// by TIntermediate. +// + +// Used for call-graph algorithms for detecting recursion, missing bodies, and dead bodies. +// A "call" is a pair: . +// There can be duplicates. General assumption is the list is small. +struct TCall { + TCall(const TString& pCaller, const TString& pCallee) : caller(pCaller), callee(pCallee) { } + TString caller; + TString callee; + bool visited; + bool currentPath; + bool errorGiven; + int calleeBodyPosition; +}; + +// A generic 1-D range. +struct TRange { + TRange(int start, int last) : start(start), last(last) { } + bool overlap(const TRange& rhs) const + { + return last >= rhs.start && start <= rhs.last; + } + int start; + int last; +}; + +// An IO range is a 3-D rectangle; the set of (location, component, index) triples all lying +// within the same location range, component range, and index value. Locations don't alias unless +// all other dimensions of their range overlap. +struct TIoRange { + TIoRange(TRange location, TRange component, TBasicType basicType, int index) + : location(location), component(component), basicType(basicType), index(index) { } + bool overlap(const TIoRange& rhs) const + { + return location.overlap(rhs.location) && component.overlap(rhs.component) && index == rhs.index; + } + TRange location; + TRange component; + TBasicType basicType; + int index; +}; + +// An offset range is a 2-D rectangle; the set of (binding, offset) pairs all lying +// within the same binding and offset range. +struct TOffsetRange { + TOffsetRange(TRange binding, TRange offset) + : binding(binding), offset(offset) { } + bool overlap(const TOffsetRange& rhs) const + { + return binding.overlap(rhs.binding) && offset.overlap(rhs.offset); + } + TRange binding; + TRange offset; +}; + +#ifndef GLSLANG_WEB +// Things that need to be tracked per xfb buffer. +struct TXfbBuffer { + TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false), + contains32BitType(false), contains16BitType(false) { } + std::vector ranges; // byte offsets that have already been assigned + unsigned int stride; + unsigned int implicitStride; + bool contains64BitType; + bool contains32BitType; + bool contains16BitType; +}; +#endif + +// Track a set of strings describing how the module was processed. +// This includes command line options, transforms, etc., ideally inclusive enough +// to reproduce the steps used to transform the input source to the output. +// E.g., see SPIR-V OpModuleProcessed. +// Each "process" or "transform" uses is expressed in the form: +// process arg0 arg1 arg2 ... +// process arg0 arg1 arg2 ... +// where everything is textual, and there can be zero or more arguments +class TProcesses { +public: + TProcesses() {} + ~TProcesses() {} + + void addProcess(const char* process) + { + processes.push_back(process); + } + void addProcess(const std::string& process) + { + processes.push_back(process); + } + void addArgument(int arg) + { + processes.back().append(" "); + std::string argString = std::to_string(arg); + processes.back().append(argString); + } + void addArgument(const char* arg) + { + processes.back().append(" "); + processes.back().append(arg); + } + void addArgument(const std::string& arg) + { + processes.back().append(" "); + processes.back().append(arg); + } + void addIfNonZero(const char* process, int value) + { + if (value != 0) { + addProcess(process); + addArgument(value); + } + } + + const std::vector& getProcesses() const { return processes; } + +private: + std::vector processes; +}; + +class TSymbolTable; +class TSymbol; +class TVariable; + +// +// Texture and Sampler transformation mode. +// +enum ComputeDerivativeMode { + LayoutDerivativeNone, // default layout as SPV_NV_compute_shader_derivatives not enabled + LayoutDerivativeGroupQuads, // derivative_group_quadsNV + LayoutDerivativeGroupLinear, // derivative_group_linearNV +}; + +class TIdMaps { +public: + TMap& operator[](int i) { return maps[i]; } + const TMap& operator[](int i) const { return maps[i]; } +private: + TMap maps[EsiCount]; +}; + +class TNumericFeatures { +public: + TNumericFeatures() : features(0) { } + TNumericFeatures(const TNumericFeatures&) = delete; + TNumericFeatures& operator=(const TNumericFeatures&) = delete; + typedef enum : unsigned int { + shader_explicit_arithmetic_types = 1 << 0, + shader_explicit_arithmetic_types_int8 = 1 << 1, + shader_explicit_arithmetic_types_int16 = 1 << 2, + shader_explicit_arithmetic_types_int32 = 1 << 3, + shader_explicit_arithmetic_types_int64 = 1 << 4, + shader_explicit_arithmetic_types_float16 = 1 << 5, + shader_explicit_arithmetic_types_float32 = 1 << 6, + shader_explicit_arithmetic_types_float64 = 1 << 7, + shader_implicit_conversions = 1 << 8, + gpu_shader_fp64 = 1 << 9, + gpu_shader_int16 = 1 << 10, + gpu_shader_half_float = 1 << 11, + } feature; + void insert(feature f) { features |= f; } + void erase(feature f) { features &= ~f; } + bool contains(feature f) const { return (features & f) != 0; } +private: + unsigned int features; +}; + +// +// Set of helper functions to help parse and build the tree. +// +class TIntermediate { +public: + explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) : + language(l), +#ifndef GLSLANG_ANGLE + profile(p), version(v), +#endif + treeRoot(0), + numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false), + invertY(false), + useStorageBuffer(false), + nanMinMaxClamp(false), + depthReplacing(false) +#ifndef GLSLANG_WEB + , + implicitThisName("@this"), implicitCounterName("@count"), + source(EShSourceNone), + useVulkanMemoryModel(false), + invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet), + inputPrimitive(ElgNone), outputPrimitive(ElgNone), + pixelCenterInteger(false), originUpperLeft(false), + vertexSpacing(EvsNone), vertexOrder(EvoNone), interlockOrdering(EioNone), pointMode(false), earlyFragmentTests(false), + postDepthCoverage(false), depthLayout(EldNone), + hlslFunctionality1(false), + blendEquations(0), xfbMode(false), multiStream(false), + layoutOverrideCoverage(false), + geoPassthroughEXT(false), + numShaderRecordBlocks(0), + computeDerivativeMode(LayoutDerivativeNone), + primitives(TQualifier::layoutNotSet), + numTaskNVBlocks(0), + layoutPrimitiveCulling(false), + autoMapBindings(false), + autoMapLocations(false), + flattenUniformArrays(false), + useUnknownFormat(false), + hlslOffsets(false), + hlslIoMapping(false), + useVariablePointers(false), + textureSamplerTransformMode(EShTexSampTransKeep), + needToLegalize(false), + binaryDoubleOutput(false), + usePhysicalStorageBuffer(false), + uniformLocationBase(0) +#endif + { + localSize[0] = 1; + localSize[1] = 1; + localSize[2] = 1; + localSizeNotDefault[0] = false; + localSizeNotDefault[1] = false; + localSizeNotDefault[2] = false; + localSizeSpecId[0] = TQualifier::layoutNotSet; + localSizeSpecId[1] = TQualifier::layoutNotSet; + localSizeSpecId[2] = TQualifier::layoutNotSet; +#ifndef GLSLANG_WEB + xfbBuffers.resize(TQualifier::layoutXfbBufferEnd); + shiftBinding.fill(0); +#endif + } + + void setVersion(int v) + { +#ifndef GLSLANG_ANGLE + version = v; +#endif + } + void setProfile(EProfile p) + { +#ifndef GLSLANG_ANGLE + profile = p; +#endif + } + + int getVersion() const { return version; } + EProfile getProfile() const { return profile; } + void setSpv(const SpvVersion& s) + { + spvVersion = s; + + // client processes + if (spvVersion.vulkan > 0) + processes.addProcess("client vulkan100"); + if (spvVersion.openGl > 0) + processes.addProcess("client opengl100"); + + // target SPV + switch (spvVersion.spv) { + case 0: + break; + case EShTargetSpv_1_0: + break; + case EShTargetSpv_1_1: + processes.addProcess("target-env spirv1.1"); + break; + case EShTargetSpv_1_2: + processes.addProcess("target-env spirv1.2"); + break; + case EShTargetSpv_1_3: + processes.addProcess("target-env spirv1.3"); + break; + case EShTargetSpv_1_4: + processes.addProcess("target-env spirv1.4"); + break; + case EShTargetSpv_1_5: + processes.addProcess("target-env spirv1.5"); + break; + default: + processes.addProcess("target-env spirvUnknown"); + break; + } + + // target-environment processes + switch (spvVersion.vulkan) { + case 0: + break; + case EShTargetVulkan_1_0: + processes.addProcess("target-env vulkan1.0"); + break; + case EShTargetVulkan_1_1: + processes.addProcess("target-env vulkan1.1"); + break; + case EShTargetVulkan_1_2: + processes.addProcess("target-env vulkan1.2"); + break; + default: + processes.addProcess("target-env vulkanUnknown"); + break; + } + if (spvVersion.openGl > 0) + processes.addProcess("target-env opengl"); + } + const SpvVersion& getSpv() const { return spvVersion; } + EShLanguage getStage() const { return language; } + void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); } + const std::set& getRequestedExtensions() const { return requestedExtensions; } + + void setTreeRoot(TIntermNode* r) { treeRoot = r; } + TIntermNode* getTreeRoot() const { return treeRoot; } + void incrementEntryPointCount() { ++numEntryPoints; } + int getNumEntryPoints() const { return numEntryPoints; } + int getNumErrors() const { return numErrors; } + void addPushConstantCount() { ++numPushConstants; } + void setLimits(const TBuiltInResource& r) { resources = r; } + + bool postProcess(TIntermNode*, EShLanguage); + void removeTree(); + + void setEntryPointName(const char* ep) + { + entryPointName = ep; + processes.addProcess("entry-point"); + processes.addArgument(entryPointName); + } + void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; } + const std::string& getEntryPointName() const { return entryPointName; } + const std::string& getEntryPointMangledName() const { return entryPointMangledName; } + + void setInvertY(bool invert) + { + invertY = invert; + if (invertY) + processes.addProcess("invert-y"); + } + bool getInvertY() const { return invertY; } + +#ifdef ENABLE_HLSL + void setSource(EShSource s) { source = s; } + EShSource getSource() const { return source; } +#else + void setSource(EShSource s) { assert(s == EShSourceGlsl); (void)s; } + EShSource getSource() const { return EShSourceGlsl; } +#endif + + bool isRecursive() const { return recursive; } + + TIntermSymbol* addSymbol(const TVariable&); + TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&); + TIntermSymbol* addSymbol(const TType&, const TSourceLoc&); + TIntermSymbol* addSymbol(const TIntermSymbol&); + TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*); + std::tuple addPairConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1); + TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*); + TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const; + void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode); + TIntermTyped* addShapeConversion(const TType&, TIntermTyped*); + TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); + TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); + TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, const TSourceLoc&); + TIntermTyped* addUnaryMath(TOperator, TIntermTyped* child, const TSourceLoc&); + TIntermTyped* addBuiltInFunctionCall(const TSourceLoc& line, TOperator, bool unary, TIntermNode*, const TType& returnType); + bool canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op = EOpNull) const; + bool isIntegralPromotion(TBasicType from, TBasicType to) const; + bool isFPPromotion(TBasicType from, TBasicType to) const; + bool isIntegralConversion(TBasicType from, TBasicType to) const; + bool isFPConversion(TBasicType from, TBasicType to) const; + bool isFPIntegralConversion(TBasicType from, TBasicType to) const; + TOperator mapTypeToConstructorOp(const TType&) const; + TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right); + TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&); + TIntermAggregate* makeAggregate(TIntermNode* node); + TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&); + TIntermAggregate* makeAggregate(const TSourceLoc&); + TIntermTyped* setAggregateOperator(TIntermNode*, TOperator, const TType& type, const TSourceLoc&); + bool areAllChildConst(TIntermAggregate* aggrNode); + TIntermSelection* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&); + TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&); + TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&); + TIntermTyped* addMethod(TIntermTyped*, const TType&, const TString*, const TSourceLoc&); + TIntermConstantUnion* addConstantUnion(const TConstUnionArray&, const TType&, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(signed char, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned char, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(signed short, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned short, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(int, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned int, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(long long, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(unsigned long long, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(bool, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(double, TBasicType, const TSourceLoc&, bool literal = false) const; + TIntermConstantUnion* addConstantUnion(const TString*, const TSourceLoc&, bool literal = false) const; + TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*) const; + bool parseConstTree(TIntermNode*, TConstUnionArray, TOperator, const TType&, bool singleConstantParam = false); + TIntermLoop* addLoop(TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, const TSourceLoc&); + TIntermAggregate* addForLoop(TIntermNode*, TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, + const TSourceLoc&, TIntermLoop*&); + TIntermBranch* addBranch(TOperator, const TSourceLoc&); + TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&); + template TIntermTyped* addSwizzle(TSwizzleSelectors&, const TSourceLoc&); + + // Low level functions to add nodes (no conversions or other higher level transformations) + // If a type is provided, the node's type will be set to it. + TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&) const; + TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc&, + const TType&) const; + TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc&) const; + TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc&, const TType&) const; + + // Constant folding (in Constant.cpp) + TIntermTyped* fold(TIntermAggregate* aggrNode); + TIntermTyped* foldConstructor(TIntermAggregate* aggrNode); + TIntermTyped* foldDereference(TIntermTyped* node, int index, const TSourceLoc&); + TIntermTyped* foldSwizzle(TIntermTyped* node, TSwizzleSelectors& fields, const TSourceLoc&); + + // Tree ops + static const TIntermTyped* findLValueBase(const TIntermTyped*, bool swizzleOkay); + + // Linkage related + void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&); + void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&); + + void setUseStorageBuffer() { useStorageBuffer = true; } + bool usingStorageBuffer() const { return useStorageBuffer; } + void setDepthReplacing() { depthReplacing = true; } + bool isDepthReplacing() const { return depthReplacing; } + bool setLocalSize(int dim, int size) + { + if (localSizeNotDefault[dim]) + return size == localSize[dim]; + localSizeNotDefault[dim] = true; + localSize[dim] = size; + return true; + } + unsigned int getLocalSize(int dim) const { return localSize[dim]; } + bool setLocalSizeSpecId(int dim, int id) + { + if (localSizeSpecId[dim] != TQualifier::layoutNotSet) + return id == localSizeSpecId[dim]; + localSizeSpecId[dim] = id; + return true; + } + int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; } +#ifdef GLSLANG_WEB + void output(TInfoSink&, bool tree) { } + + bool isEsProfile() const { return false; } + bool getXfbMode() const { return false; } + bool isMultiStream() const { return false; } + TLayoutGeometry getOutputPrimitive() const { return ElgNone; } + bool getPostDepthCoverage() const { return false; } + bool getEarlyFragmentTests() const { return false; } + TLayoutDepth getDepth() const { return EldNone; } + bool getPixelCenterInteger() const { return false; } + void setOriginUpperLeft() { } + bool getOriginUpperLeft() const { return true; } + TInterlockOrdering getInterlockOrdering() const { return EioNone; } + + bool getAutoMapBindings() const { return false; } + bool getAutoMapLocations() const { return false; } + int getNumPushConstants() const { return 0; } + void addShaderRecordCount() { } + void addTaskNVCount() { } + void setUseVulkanMemoryModel() { } + bool usingVulkanMemoryModel() const { return false; } + bool usingPhysicalStorageBuffer() const { return false; } + bool usingVariablePointers() const { return false; } + unsigned getXfbStride(int buffer) const { return 0; } + bool hasLayoutDerivativeModeNone() const { return false; } + ComputeDerivativeMode getLayoutDerivativeModeNone() const { return LayoutDerivativeNone; } +#else + void output(TInfoSink&, bool tree); + + bool isEsProfile() const { return profile == EEsProfile; } + + void setShiftBinding(TResourceType res, unsigned int shift) + { + shiftBinding[res] = shift; + + const char* name = getResourceName(res); + if (name != nullptr) + processes.addIfNonZero(name, shift); + } + + unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; } + + void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set) + { + if (shift == 0) // ignore if there's no shift: it's a no-op. + return; + + shiftBindingForSet[res][set] = shift; + + const char* name = getResourceName(res); + if (name != nullptr) { + processes.addProcess(name); + processes.addArgument(shift); + processes.addArgument(set); + } + } + + int getShiftBindingForSet(TResourceType res, unsigned int set) const + { + const auto shift = shiftBindingForSet[res].find(set); + return shift == shiftBindingForSet[res].end() ? -1 : shift->second; + } + bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); } + + void setResourceSetBinding(const std::vector& shift) + { + resourceSetBinding = shift; + if (shift.size() > 0) { + processes.addProcess("resource-set-binding"); + for (int s = 0; s < (int)shift.size(); ++s) + processes.addArgument(shift[s]); + } + } + const std::vector& getResourceSetBinding() const { return resourceSetBinding; } + void setAutoMapBindings(bool map) + { + autoMapBindings = map; + if (autoMapBindings) + processes.addProcess("auto-map-bindings"); + } + bool getAutoMapBindings() const { return autoMapBindings; } + void setAutoMapLocations(bool map) + { + autoMapLocations = map; + if (autoMapLocations) + processes.addProcess("auto-map-locations"); + } + bool getAutoMapLocations() const { return autoMapLocations; } + +#ifdef ENABLE_HLSL + void setFlattenUniformArrays(bool flatten) + { + flattenUniformArrays = flatten; + if (flattenUniformArrays) + processes.addProcess("flatten-uniform-arrays"); + } + bool getFlattenUniformArrays() const { return flattenUniformArrays; } +#endif + void setNoStorageFormat(bool b) + { + useUnknownFormat = b; + if (useUnknownFormat) + processes.addProcess("no-storage-format"); + } + bool getNoStorageFormat() const { return useUnknownFormat; } + void setUseVulkanMemoryModel() + { + useVulkanMemoryModel = true; + processes.addProcess("use-vulkan-memory-model"); + } + bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; } + void setUsePhysicalStorageBuffer() + { + usePhysicalStorageBuffer = true; + } + bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; } + void setUseVariablePointers() + { + useVariablePointers = true; + processes.addProcess("use-variable-pointers"); + } + bool usingVariablePointers() const { return useVariablePointers; } + +#ifdef ENABLE_HLSL + template T addCounterBufferName(const T& name) const { return name + implicitCounterName; } + bool hasCounterBufferName(const TString& name) const { + size_t len = strlen(implicitCounterName); + return name.size() > len && + name.compare(name.size() - len, len, implicitCounterName) == 0; + } +#endif + + void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; } + int getNumPushConstants() const { return numPushConstants; } + void addShaderRecordCount() { ++numShaderRecordBlocks; } + void addTaskNVCount() { ++numTaskNVBlocks; } + + bool setInvocations(int i) + { + if (invocations != TQualifier::layoutNotSet) + return invocations == i; + invocations = i; + return true; + } + int getInvocations() const { return invocations; } + bool setVertices(int m) + { + if (vertices != TQualifier::layoutNotSet) + return vertices == m; + vertices = m; + return true; + } + int getVertices() const { return vertices; } + bool setInputPrimitive(TLayoutGeometry p) + { + if (inputPrimitive != ElgNone) + return inputPrimitive == p; + inputPrimitive = p; + return true; + } + TLayoutGeometry getInputPrimitive() const { return inputPrimitive; } + bool setVertexSpacing(TVertexSpacing s) + { + if (vertexSpacing != EvsNone) + return vertexSpacing == s; + vertexSpacing = s; + return true; + } + TVertexSpacing getVertexSpacing() const { return vertexSpacing; } + bool setVertexOrder(TVertexOrder o) + { + if (vertexOrder != EvoNone) + return vertexOrder == o; + vertexOrder = o; + return true; + } + TVertexOrder getVertexOrder() const { return vertexOrder; } + void setPointMode() { pointMode = true; } + bool getPointMode() const { return pointMode; } + + bool setInterlockOrdering(TInterlockOrdering o) + { + if (interlockOrdering != EioNone) + return interlockOrdering == o; + interlockOrdering = o; + return true; + } + TInterlockOrdering getInterlockOrdering() const { return interlockOrdering; } + + void setXfbMode() { xfbMode = true; } + bool getXfbMode() const { return xfbMode; } + void setMultiStream() { multiStream = true; } + bool isMultiStream() const { return multiStream; } + bool setOutputPrimitive(TLayoutGeometry p) + { + if (outputPrimitive != ElgNone) + return outputPrimitive == p; + outputPrimitive = p; + return true; + } + TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; } + void setPostDepthCoverage() { postDepthCoverage = true; } + bool getPostDepthCoverage() const { return postDepthCoverage; } + void setEarlyFragmentTests() { earlyFragmentTests = true; } + bool getEarlyFragmentTests() const { return earlyFragmentTests; } + bool setDepth(TLayoutDepth d) + { + if (depthLayout != EldNone) + return depthLayout == d; + depthLayout = d; + return true; + } + TLayoutDepth getDepth() const { return depthLayout; } + void setOriginUpperLeft() { originUpperLeft = true; } + bool getOriginUpperLeft() const { return originUpperLeft; } + void setPixelCenterInteger() { pixelCenterInteger = true; } + bool getPixelCenterInteger() const { return pixelCenterInteger; } + void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); } + unsigned int getBlendEquations() const { return blendEquations; } + bool setXfbBufferStride(int buffer, unsigned stride) + { + if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd) + return xfbBuffers[buffer].stride == stride; + xfbBuffers[buffer].stride = stride; + return true; + } + unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; } + int addXfbBufferOffset(const TType&); + unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const; + unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const; + void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; } + bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; } + void setGeoPassthroughEXT() { geoPassthroughEXT = true; } + bool getGeoPassthroughEXT() const { return geoPassthroughEXT; } + void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; } + bool hasLayoutDerivativeModeNone() const { return computeDerivativeMode != LayoutDerivativeNone; } + ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; } + void setLayoutPrimitiveCulling() { layoutPrimitiveCulling = true; } + bool getLayoutPrimitiveCulling() const { return layoutPrimitiveCulling; } + bool setPrimitives(int m) + { + if (primitives != TQualifier::layoutNotSet) + return primitives == m; + primitives = m; + return true; + } + int getPrimitives() const { return primitives; } + const char* addSemanticName(const TString& name) + { + return semanticNameSet.insert(name).first->c_str(); + } + void addUniformLocationOverride(const char* nameStr, int location) + { + std::string name = nameStr; + uniformLocationOverrides[name] = location; + } + + int getUniformLocationOverride(const char* nameStr) const + { + std::string name = nameStr; + auto pos = uniformLocationOverrides.find(name); + if (pos == uniformLocationOverrides.end()) + return -1; + else + return pos->second; + } + + void setUniformLocationBase(int base) { uniformLocationBase = base; } + int getUniformLocationBase() const { return uniformLocationBase; } + + void setNeedsLegalization() { needToLegalize = true; } + bool needsLegalization() const { return needToLegalize; } + + void setBinaryDoubleOutput() { binaryDoubleOutput = true; } + bool getBinaryDoubleOutput() { return binaryDoubleOutput; } +#endif // GLSLANG_WEB + +#ifdef ENABLE_HLSL + void setHlslFunctionality1() { hlslFunctionality1 = true; } + bool getHlslFunctionality1() const { return hlslFunctionality1; } + void setHlslOffsets() + { + hlslOffsets = true; + if (hlslOffsets) + processes.addProcess("hlsl-offsets"); + } + bool usingHlslOffsets() const { return hlslOffsets; } + void setHlslIoMapping(bool b) + { + hlslIoMapping = b; + if (hlslIoMapping) + processes.addProcess("hlsl-iomap"); + } + bool usingHlslIoMapping() { return hlslIoMapping; } +#else + bool getHlslFunctionality1() const { return false; } + bool usingHlslOffsets() const { return false; } + bool usingHlslIoMapping() { return false; } +#endif + + void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee); + void merge(TInfoSink&, TIntermediate&); + void finalCheck(TInfoSink&, bool keepUncalled); + + bool buildConvertOp(TBasicType dst, TBasicType src, TOperator& convertOp) const; + TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const; + + void addIoAccessed(const TString& name) { ioAccessed.insert(name); } + bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); } + + int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision); + int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision); + int addUsedOffsets(int binding, int offset, int numOffsets); + bool addUsedConstantId(int id); + static int computeTypeLocationSize(const TType&, EShLanguage); + static int computeTypeUniformLocationSize(const TType&); + + static int getBaseAlignmentScalar(const TType&, int& size); + static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor); + static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor); + static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor); + static bool improperStraddle(const TType& type, int size, int offset); + static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize); + static int getOffset(const TType& type, int index); + static int getBlockSize(const TType& blockType); + static int computeBufferReferenceTypeSize(const TType&); + bool promote(TIntermOperator*); + void setNanMinMaxClamp(bool setting) { nanMinMaxClamp = setting; } + bool getNanMinMaxClamp() const { return nanMinMaxClamp; } + + void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; } + const std::string& getSourceFile() const { return sourceFile; } + void addSourceText(const char* text, size_t len) { sourceText.append(text, len); } + const std::string& getSourceText() const { return sourceText; } + const std::map& getIncludeText() const { return includeText; } + void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); } + void addProcesses(const std::vector& p) + { + for (int i = 0; i < (int)p.size(); ++i) + processes.addProcess(p[i]); + } + void addProcess(const std::string& process) { processes.addProcess(process); } + void addProcessArgument(const std::string& arg) { processes.addArgument(arg); } + const std::vector& getProcesses() const { return processes.getProcesses(); } + + // Certain explicit conversions are allowed conditionally +#ifdef GLSLANG_WEB + bool getArithemeticInt8Enabled() const { return false; } + bool getArithemeticInt16Enabled() const { return false; } + bool getArithemeticFloat16Enabled() const { return false; } + void updateNumericFeature(TNumericFeatures::feature f, bool on) { } +#else + bool getArithemeticInt8Enabled() const { + return numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8); + } + bool getArithemeticInt16Enabled() const { + return numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::gpu_shader_int16) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16); + } + + bool getArithemeticFloat16Enabled() const { + return numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) || + numericFeatures.contains(TNumericFeatures::gpu_shader_half_float) || + numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16); + } + void updateNumericFeature(TNumericFeatures::feature f, bool on) + { on ? numericFeatures.insert(f) : numericFeatures.erase(f); } +#endif + +protected: + TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&); + void error(TInfoSink& infoSink, const char*); + void warn(TInfoSink& infoSink, const char*); + void mergeCallGraphs(TInfoSink&, TIntermediate&); + void mergeModes(TInfoSink&, TIntermediate&); + void mergeTrees(TInfoSink&, TIntermediate&); + void seedIdMap(TIdMaps& idMaps, int& maxId); + void remapIds(const TIdMaps& idMaps, int idShift, TIntermediate&); + void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals); + void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects); + void mergeImplicitArraySizes(TType&, const TType&); + void mergeErrorCheck(TInfoSink&, const TIntermSymbol&, const TIntermSymbol&, bool crossStage); + void checkCallGraphCycles(TInfoSink&); + void checkCallGraphBodies(TInfoSink&, bool keepUncalled); + void inOutLocationCheck(TInfoSink&); + TIntermAggregate* findLinkerObjects() const; + bool userOutputUsed() const; + bool isSpecializationOperation(const TIntermOperator&) const; + bool isNonuniformPropagating(TOperator) const; + bool promoteUnary(TIntermUnary&); + bool promoteBinary(TIntermBinary&); + void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&); + bool promoteAggregate(TIntermAggregate&); + void pushSelector(TIntermSequence&, const TVectorSelector&, const TSourceLoc&); + void pushSelector(TIntermSequence&, const TMatrixSelector&, const TSourceLoc&); + bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&); + void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root); + bool isConversionAllowed(TOperator op, TIntermTyped* node) const; + std::tuple getConversionDestinationType(TBasicType type0, TBasicType type1, TOperator op) const; + + static const char* getResourceName(TResourceType); + + const EShLanguage language; // stage, known at construction time + std::string entryPointName; + std::string entryPointMangledName; + typedef std::list TGraph; + TGraph callGraph; + +#ifdef GLSLANG_ANGLE + const EProfile profile = ECoreProfile; + const int version = 450; +#else + EProfile profile; // source profile + int version; // source version +#endif + SpvVersion spvVersion; + TIntermNode* treeRoot; + std::set requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them + TBuiltInResource resources; + int numEntryPoints; + int numErrors; + int numPushConstants; + bool recursive; + bool invertY; + bool useStorageBuffer; + bool nanMinMaxClamp; // true if desiring min/max/clamp to favor non-NaN over NaN + bool depthReplacing; + int localSize[3]; + bool localSizeNotDefault[3]; + int localSizeSpecId[3]; +#ifndef GLSLANG_WEB +public: + const char* const implicitThisName; + const char* const implicitCounterName; +protected: + EShSource source; // source language, known a bit later + bool useVulkanMemoryModel; + int invocations; + int vertices; + TLayoutGeometry inputPrimitive; + TLayoutGeometry outputPrimitive; + bool pixelCenterInteger; + bool originUpperLeft; + TVertexSpacing vertexSpacing; + TVertexOrder vertexOrder; + TInterlockOrdering interlockOrdering; + bool pointMode; + bool earlyFragmentTests; + bool postDepthCoverage; + TLayoutDepth depthLayout; + bool hlslFunctionality1; + int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift + bool xfbMode; + std::vector xfbBuffers; // all the data we need to track per xfb buffer + bool multiStream; + bool layoutOverrideCoverage; + bool geoPassthroughEXT; + int numShaderRecordBlocks; + ComputeDerivativeMode computeDerivativeMode; + int primitives; + int numTaskNVBlocks; + bool layoutPrimitiveCulling; + + // Base shift values + std::array shiftBinding; + + // Per-descriptor-set shift values + std::array, EResCount> shiftBindingForSet; + + std::vector resourceSetBinding; + bool autoMapBindings; + bool autoMapLocations; + bool flattenUniformArrays; + bool useUnknownFormat; + bool hlslOffsets; + bool hlslIoMapping; + bool useVariablePointers; + + std::set semanticNameSet; + + EShTextureSamplerTransformMode textureSamplerTransformMode; + + bool needToLegalize; + bool binaryDoubleOutput; + bool usePhysicalStorageBuffer; + + std::unordered_map uniformLocationOverrides; + int uniformLocationBase; + TNumericFeatures numericFeatures; +#endif + + std::unordered_set usedConstantId; // specialization constant ids used + std::vector usedAtomics; // sets of bindings used by atomic counters + std::vector usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers + // set of names of statically read/written I/O that might need extra checking + std::set ioAccessed; + // source code of shader, useful as part of debug information + std::string sourceFile; + std::string sourceText; + + // Included text. First string is a name, second is the included text + std::map includeText; + + // for OpModuleProcessed, or equivalent + TProcesses processes; + +private: + void operator=(TIntermediate&); // prevent assignments +}; + +} // end namespace glslang + +#endif // _LOCAL_INTERMEDIATE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/parseConst.cpp b/src/third_party/glslang/glslang/MachineIndependent/parseConst.cpp new file mode 100644 index 0000000..7c04743 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/parseConst.cpp @@ -0,0 +1,214 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// Traverse a tree of constants to create a single folded constant. +// It should only be used when the whole tree is known to be constant. +// + +#include "ParseHelper.h" + +namespace glslang { + +class TConstTraverser : public TIntermTraverser { +public: + TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t) + : unionArray(cUnion), type(t), + constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false), + matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; } + + virtual void visitConstantUnion(TIntermConstantUnion* node); + virtual bool visitAggregate(TVisit, TIntermAggregate* node); + + int index; + TConstUnionArray unionArray; + TOperator tOp; + const TType& type; + TOperator constructorType; + bool singleConstantParam; + bool error; + int size; // size of the constructor ( 4 for vec4) + bool isMatrix; + int matrixCols; + int matrixRows; + +protected: + TConstTraverser(TConstTraverser&); + TConstTraverser& operator=(TConstTraverser&); +}; + +bool TConstTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node) +{ + if (! node->isConstructor() && node->getOp() != EOpComma) { + error = true; + + return false; + } + + bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion(); + if (flag) { + singleConstantParam = true; + constructorType = node->getOp(); + size = node->getType().computeNumComponents(); + + if (node->getType().isMatrix()) { + isMatrix = true; + matrixCols = node->getType().getMatrixCols(); + matrixRows = node->getType().getMatrixRows(); + } + } + + for (TIntermSequence::iterator p = node->getSequence().begin(); + p != node->getSequence().end(); p++) { + + if (node->getOp() == EOpComma) + index = 0; + + (*p)->traverse(this); + } + if (flag) + { + singleConstantParam = false; + constructorType = EOpNull; + size = 0; + isMatrix = false; + matrixCols = 0; + matrixRows = 0; + } + + return false; +} + +void TConstTraverser::visitConstantUnion(TIntermConstantUnion* node) +{ + TConstUnionArray leftUnionArray(unionArray); + int instanceSize = type.computeNumComponents(); + + if (index >= instanceSize) + return; + + if (! singleConstantParam) { + int rightUnionSize = node->getType().computeNumComponents(); + + const TConstUnionArray& rightUnionArray = node->getConstArray(); + for (int i = 0; i < rightUnionSize; i++) { + if (index >= instanceSize) + return; + leftUnionArray[index] = rightUnionArray[i]; + + index++; + } + } else { + int endIndex = index + size; + const TConstUnionArray& rightUnionArray = node->getConstArray(); + if (! isMatrix) { + int count = 0; + int nodeComps = node->getType().computeNumComponents(); + for (int i = index; i < endIndex; i++) { + if (i >= instanceSize) + return; + + leftUnionArray[i] = rightUnionArray[count]; + + (index)++; + + if (nodeComps > 1) + count++; + } + } else { + // constructing a matrix, but from what? + if (node->isMatrix()) { + // Matrix from a matrix; this has the outer matrix, node is the argument matrix. + // Traverse the outer, potentially bigger matrix, fill in missing pieces with the + // identity matrix. + for (int c = 0; c < matrixCols; ++c) { + for (int r = 0; r < matrixRows; ++r) { + int targetOffset = index + c * matrixRows + r; + if (r < node->getType().getMatrixRows() && c < node->getType().getMatrixCols()) { + int srcOffset = c * node->getType().getMatrixRows() + r; + leftUnionArray[targetOffset] = rightUnionArray[srcOffset]; + } else if (r == c) + leftUnionArray[targetOffset].setDConst(1.0); + else + leftUnionArray[targetOffset].setDConst(0.0); + } + } + } else { + // matrix from vector or scalar + int count = 0; + const int startIndex = index; + int nodeComps = node->getType().computeNumComponents(); + for (int i = startIndex; i < endIndex; i++) { + if (i >= instanceSize) + return; + if (nodeComps == 1) { + // If there is a single scalar parameter to a matrix + // constructor, it is used to initialize all the + // components on the matrix's diagonal, with the + // remaining components initialized to 0.0. + if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 ) + leftUnionArray[i] = rightUnionArray[count]; + else + leftUnionArray[i].setDConst(0.0); + } else { + // construct the matrix in column-major order, from + // the components provided, in order + leftUnionArray[i] = rightUnionArray[count]; + } + + index++; + + if (nodeComps > 1) + count++; + } + } + } + } +} + +bool TIntermediate::parseConstTree(TIntermNode* root, TConstUnionArray unionArray, TOperator constructorType, const TType& t, bool singleConstantParam) +{ + if (root == 0) + return false; + + TConstTraverser it(unionArray, singleConstantParam, constructorType, t); + + root->traverse(&it); + if (it.error) + return true; + else + return false; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/parseVersions.h b/src/third_party/glslang/glslang/MachineIndependent/parseVersions.h new file mode 100644 index 0000000..7248354 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/parseVersions.h @@ -0,0 +1,245 @@ +// +// Copyright (C) 2015-2018 Google, Inc. +// Copyright (C) 2017 ARM Limited. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// This is implemented in Versions.cpp + +#ifndef _PARSE_VERSIONS_INCLUDED_ +#define _PARSE_VERSIONS_INCLUDED_ + +#include "../Public/ShaderLang.h" +#include "../Include/InfoSink.h" +#include "Scan.h" + +#include + +namespace glslang { + +// +// Base class for parse helpers. +// This just has version-related information and checking. +// This class should be sufficient for preprocessing. +// +class TParseVersions { +public: + TParseVersions(TIntermediate& interm, int version, EProfile profile, + const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink, + bool forwardCompatible, EShMessages messages) + : +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + forwardCompatible(forwardCompatible), + profile(profile), +#endif + infoSink(infoSink), version(version), + language(language), + spvVersion(spvVersion), + intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { } + virtual ~TParseVersions() { } + void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc); + void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc); +#ifdef GLSLANG_WEB + const EProfile profile = EEsProfile; + bool isEsProfile() const { return true; } + void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc) + { + if (! (EEsProfile & profileMask)) + error(loc, "not supported with this profile:", featureDesc, ProfileName(profile)); + } + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc) + { + if ((EEsProfile & profileMask) && (minVersion == 0 || version < minVersion)) + error(loc, "not supported for this version or the enabled extensions", featureDesc, ""); + } + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc) + { + profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc); + } + void initializeExtensionBehavior() { } + void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { } + void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { } + void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc) { } + void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc) { } + TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; } + bool extensionTurnedOn(const char* const extension) { return false; } + bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; } + void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { } + void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { } + void checkExtensionStage(const TSourceLoc&, const char* const extension) { } + void extensionRequires(const TSourceLoc&, const char* const extension, const char* behavior) { } + void fullIntegerCheck(const TSourceLoc&, const char* op) { } + void doubleCheck(const TSourceLoc&, const char* op) { } + bool float16Arithmetic() { return false; } + void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + bool int16Arithmetic() { return false; } + void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + bool int8Arithmetic() { return false; } + void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { } + void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { } + bool relaxedErrors() const { return false; } + bool suppressWarnings() const { return true; } + bool isForwardCompatible() const { return false; } +#else +#ifdef GLSLANG_ANGLE + const bool forwardCompatible = true; + const EProfile profile = ECoreProfile; +#else + bool forwardCompatible; // true if errors are to be given for use of deprecated features + EProfile profile; // the declared profile in the shader (core by default) +#endif + bool isEsProfile() const { return profile == EEsProfile; } + void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc); + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, + const char* const extensions[], const char* featureDesc); + void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, + const char* featureDesc); + virtual void initializeExtensionBehavior(); + virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc); + virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc); + virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual TExtensionBehavior getExtensionBehavior(const char*); + virtual bool extensionTurnedOn(const char* const extension); + virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]); + virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior); + virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior); + virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], + const char* featureDesc); + virtual void checkExtensionStage(const TSourceLoc&, const char* const extension); + virtual void extensionRequires(const TSourceLoc&, const char* const extension, const char* behavior); + virtual void fullIntegerCheck(const TSourceLoc&, const char* op); + + virtual void unimplemented(const TSourceLoc&, const char* featureDesc); + virtual void doubleCheck(const TSourceLoc&, const char* op); + virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool float16Arithmetic(); + virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool int16Arithmetic(); + virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual bool int8Arithmetic(); + virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc); + virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false); + virtual void intcoopmatCheck(const TSourceLoc&, const char *op, bool builtIn = false); + bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; } + bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; } + bool isForwardCompatible() const { return forwardCompatible; } +#endif // GLSLANG_WEB + virtual void spvRemoved(const TSourceLoc&, const char* op); + virtual void vulkanRemoved(const TSourceLoc&, const char* op); + virtual void requireVulkan(const TSourceLoc&, const char* op); + virtual void requireSpv(const TSourceLoc&, const char* op); + virtual void requireSpv(const TSourceLoc&, const char *op, unsigned int version); + + +#if defined(GLSLANG_WEB) && !defined(GLSLANG_WEB_DEVEL) + void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { addError(); } + void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { } + void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { addError(); } + void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) { } +#else + virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; + virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken, + const char* szExtraInfoFormat, ...) = 0; +#endif + + void addError() { ++numErrors; } + int getNumErrors() const { return numErrors; } + + void setScanner(TInputScanner* scanner) { currentScanner = scanner; } + TInputScanner* getScanner() const { return currentScanner; } + const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); } + void setCurrentLine(int line) { currentScanner->setLine(line); } + void setCurrentColumn(int col) { currentScanner->setColumn(col); } + void setCurrentSourceName(const char* name) { currentScanner->setFile(name); } + void setCurrentString(int string) { currentScanner->setString(string); } + + void getPreamble(std::string&); +#ifdef ENABLE_HLSL + bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; } + bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; } + bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; } +#else + bool isReadingHLSL() const { return false; } +#endif + + TInfoSink& infoSink; + + // compilation mode + int version; // version, updated by #version in the shader + EShLanguage language; // really the stage + SpvVersion spvVersion; + TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree + +protected: + TMap extensionBehavior; // for each extension string, what its current behavior is + TMap extensionMinSpv; // for each extension string, store minimum spirv required + EShMessages messages; // errors/warnings/rule-sets + int numErrors; // number of compile-time errors encountered + TInputScanner* currentScanner; + +private: + explicit TParseVersions(const TParseVersions&); + TParseVersions& operator=(const TParseVersions&); +}; + +} // end namespace glslang + +#endif // _PARSE_VERSIONS_INCLUDED_ diff --git a/src/third_party/glslang/glslang/MachineIndependent/pch.h b/src/third_party/glslang/glslang/MachineIndependent/pch.h new file mode 100644 index 0000000..6ea3761 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/pch.h @@ -0,0 +1,49 @@ +#ifndef _PCH_H +#define _PCH_H +// +// Copyright (C) 2018 The Khronos Group Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#include +#include +#include +#include +#include +#include +#include +#include +#include "SymbolTable.h" +#include "ParseHelper.h" +#include "Scan.h" +#include "ScanContext.h" + +#endif /* _PCH_H */ diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp new file mode 100644 index 0000000..a0a626f --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp @@ -0,0 +1,1345 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace glslang { + +// Handle #define +int TPpContext::CPPdefine(TPpToken* ppToken) +{ + MacroSymbol mac; + + // get the macro name + int token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#define", ""); + return token; + } + if (ppToken->loc.string >= 0) { + // We are in user code; check for reserved name use: + parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define"); + } + + // save the macro name + const int defAtom = atomStrings.getAddAtom(ppToken->name); + TSourceLoc defineLoc = ppToken->loc; // because ppToken might go to the next line before we report errors + + // gather parameters to the macro, between (...) + token = scanToken(ppToken); + if (token == '(' && !ppToken->space) { + mac.functionLike = 1; + do { + token = scanToken(ppToken); + if (mac.args.size() == 0 && token == ')') + break; + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "bad argument", "#define", ""); + + return token; + } + const int argAtom = atomStrings.getAddAtom(ppToken->name); + + // check for duplication of parameter name + bool duplicate = false; + for (size_t a = 0; a < mac.args.size(); ++a) { + if (mac.args[a] == argAtom) { + parseContext.ppError(ppToken->loc, "duplicate macro parameter", "#define", ""); + duplicate = true; + break; + } + } + if (! duplicate) + mac.args.push_back(argAtom); + token = scanToken(ppToken); + } while (token == ','); + if (token != ')') { + parseContext.ppError(ppToken->loc, "missing parenthesis", "#define", ""); + + return token; + } + + token = scanToken(ppToken); + } else if (token != '\n' && token != EndOfInput && !ppToken->space) { + parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", ""); + + return token; + } + + // record the definition of the macro + while (token != '\n' && token != EndOfInput) { + mac.body.putToken(token, ppToken); + token = scanToken(ppToken); + if (token != '\n' && ppToken->space) + mac.body.putToken(' ', ppToken); + } + + // check for duplicate definition + MacroSymbol* existing = lookupMacroDef(defAtom); + if (existing != nullptr) { + if (! existing->undef) { + // Already defined -- need to make sure they are identical: + // "Two replacement lists are identical if and only if the + // preprocessing tokens in both have the same number, + // ordering, spelling, and white-space separation, where all + // white-space separations are considered identical." + if (existing->functionLike != mac.functionLike) { + parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define", + atomStrings.getString(defAtom)); + } else if (existing->args.size() != mac.args.size()) { + parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define", + atomStrings.getString(defAtom)); + } else { + if (existing->args != mac.args) { + parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", + atomStrings.getString(defAtom)); + } + // set up to compare the two + existing->body.reset(); + mac.body.reset(); + int newToken; + bool firstToken = true; + do { + int oldToken; + TPpToken oldPpToken; + TPpToken newPpToken; + oldToken = existing->body.getToken(parseContext, &oldPpToken); + newToken = mac.body.getToken(parseContext, &newPpToken); + // for the first token, preceding spaces don't matter + if (firstToken) { + newPpToken.space = oldPpToken.space; + firstToken = false; + } + if (oldToken != newToken || oldPpToken != newPpToken) { + parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", + atomStrings.getString(defAtom)); + break; + } + } while (newToken != EndOfInput); + } + } + *existing = mac; + } else + addMacroDef(defAtom, mac); + + return '\n'; +} + +// Handle #undef +int TPpContext::CPPundef(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#undef", ""); + + return token; + } + + parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#undef"); + + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + if (macro != nullptr) + macro->undef = 1; + token = scanToken(ppToken); + if (token != '\n') + parseContext.ppError(ppToken->loc, "can only be followed by a single macro name", "#undef", ""); + + return token; +} + +// Handle #else +/* Skip forward to appropriate spot. This is used both +** to skip to a #endif after seeing an #else, AND to skip to a #else, +** #elif, or #endif after a #if/#ifdef/#ifndef/#elif test was false. +*/ +int TPpContext::CPPelse(int matchelse, TPpToken* ppToken) +{ + int depth = 0; + int token = scanToken(ppToken); + + while (token != EndOfInput) { + if (token != '#') { + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + + if (token == EndOfInput) + return token; + + token = scanToken(ppToken); + continue; + } + + if ((token = scanToken(ppToken)) != PpAtomIdentifier) + continue; + + int nextAtom = atomStrings.getAtom(ppToken->name); + if (nextAtom == PpAtomIf || nextAtom == PpAtomIfdef || nextAtom == PpAtomIfndef) { + depth++; + if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if/#ifdef/#ifndef", ""); + return EndOfInput; + } else { + ifdepth++; + elsetracker++; + } + } else if (nextAtom == PpAtomEndif) { + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + elseSeen[elsetracker] = false; + --elsetracker; + if (depth == 0) { + // found the #endif we are looking for + if (ifdepth > 0) + --ifdepth; + break; + } + --depth; + --ifdepth; + } else if (matchelse && depth == 0) { + if (nextAtom == PpAtomElse) { + elseSeen[elsetracker] = true; + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + // found the #else we are looking for + break; + } else if (nextAtom == PpAtomElif) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + /* we decrement ifdepth here, because CPPif will increment + * it and we really want to leave it alone */ + if (ifdepth > 0) { + --ifdepth; + elseSeen[elsetracker] = false; + --elsetracker; + } + + return CPPif(ppToken); + } + } else if (nextAtom == PpAtomElse) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#else after #else", "#else", ""); + else + elseSeen[elsetracker] = true; + token = extraTokenCheck(nextAtom, ppToken, scanToken(ppToken)); + } else if (nextAtom == PpAtomElif) { + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + } + } + + return token; +} + +// Call when there should be no more tokens left on a line. +int TPpContext::extraTokenCheck(int contextAtom, TPpToken* ppToken, int token) +{ + if (token != '\n' && token != EndOfInput) { + static const char* message = "unexpected tokens following directive"; + + const char* label; + if (contextAtom == PpAtomElse) + label = "#else"; + else if (contextAtom == PpAtomElif) + label = "#elif"; + else if (contextAtom == PpAtomEndif) + label = "#endif"; + else if (contextAtom == PpAtomIf) + label = "#if"; + else if (contextAtom == PpAtomLine) + label = "#line"; + else + label = ""; + + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, message, label, ""); + else + parseContext.ppError(ppToken->loc, message, label, ""); + + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + } + + return token; +} + +enum eval_prec { + MIN_PRECEDENCE, + COND, LOGOR, LOGAND, OR, XOR, AND, EQUAL, RELATION, SHIFT, ADD, MUL, UNARY, + MAX_PRECEDENCE +}; + +namespace { + + int op_logor(int a, int b) { return a || b; } + int op_logand(int a, int b) { return a && b; } + int op_or(int a, int b) { return a | b; } + int op_xor(int a, int b) { return a ^ b; } + int op_and(int a, int b) { return a & b; } + int op_eq(int a, int b) { return a == b; } + int op_ne(int a, int b) { return a != b; } + int op_ge(int a, int b) { return a >= b; } + int op_le(int a, int b) { return a <= b; } + int op_gt(int a, int b) { return a > b; } + int op_lt(int a, int b) { return a < b; } + int op_shl(int a, int b) { return a << b; } + int op_shr(int a, int b) { return a >> b; } + int op_add(int a, int b) { return a + b; } + int op_sub(int a, int b) { return a - b; } + int op_mul(int a, int b) { return a * b; } + int op_div(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a / b; } + int op_mod(int a, int b) { return a == INT_MIN && b == -1 ? 0 : a % b; } + int op_pos(int a) { return a; } + int op_neg(int a) { return -a; } + int op_cmpl(int a) { return ~a; } + int op_not(int a) { return !a; } + +}; + +struct TBinop { + int token, precedence, (*op)(int, int); +} binop[] = { + { PpAtomOr, LOGOR, op_logor }, + { PpAtomAnd, LOGAND, op_logand }, + { '|', OR, op_or }, + { '^', XOR, op_xor }, + { '&', AND, op_and }, + { PpAtomEQ, EQUAL, op_eq }, + { PpAtomNE, EQUAL, op_ne }, + { '>', RELATION, op_gt }, + { PpAtomGE, RELATION, op_ge }, + { '<', RELATION, op_lt }, + { PpAtomLE, RELATION, op_le }, + { PpAtomLeft, SHIFT, op_shl }, + { PpAtomRight, SHIFT, op_shr }, + { '+', ADD, op_add }, + { '-', ADD, op_sub }, + { '*', MUL, op_mul }, + { '/', MUL, op_div }, + { '%', MUL, op_mod }, +}; + +struct TUnop { + int token, (*op)(int); +} unop[] = { + { '+', op_pos }, + { '-', op_neg }, + { '~', op_cmpl }, + { '!', op_not }, +}; + +#define NUM_ELEMENTS(A) (sizeof(A) / sizeof(A[0])) + +int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken* ppToken) +{ + TSourceLoc loc = ppToken->loc; // because we sometimes read the newline before reporting the error + if (token == PpAtomIdentifier) { + if (strcmp("defined", ppToken->name) == 0) { + if (! parseContext.isReadingHLSL() && isMacroInput()) { + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, "nonportable when expanded from macros for preprocessor expression", + "defined", ""); + else + parseContext.ppError(ppToken->loc, "cannot use in preprocessor expression when expanded from macros", + "defined", ""); + } + bool needclose = 0; + token = scanToken(ppToken); + if (token == '(') { + needclose = true; + token = scanToken(ppToken); + } + if (token != PpAtomIdentifier) { + parseContext.ppError(loc, "incorrect directive, expected identifier", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + res = macro != nullptr ? !macro->undef : 0; + token = scanToken(ppToken); + if (needclose) { + if (token != ')') { + parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + token = scanToken(ppToken); + } + } else { + token = evalToToken(token, shortCircuit, res, err, ppToken); + return eval(token, precedence, shortCircuit, res, err, ppToken); + } + } else if (token == PpAtomConstInt) { + res = ppToken->ival; + token = scanToken(ppToken); + } else if (token == '(') { + token = scanToken(ppToken); + token = eval(token, MIN_PRECEDENCE, shortCircuit, res, err, ppToken); + if (! err) { + if (token != ')') { + parseContext.ppError(loc, "expected ')'", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + token = scanToken(ppToken); + } + } else { + int op = NUM_ELEMENTS(unop) - 1; + for (; op >= 0; op--) { + if (unop[op].token == token) + break; + } + if (op >= 0) { + token = scanToken(ppToken); + token = eval(token, UNARY, shortCircuit, res, err, ppToken); + res = unop[op].op(res); + } else { + parseContext.ppError(loc, "bad expression", "preprocessor evaluation", ""); + err = true; + res = 0; + + return token; + } + } + + token = evalToToken(token, shortCircuit, res, err, ppToken); + + // Perform evaluation of binary operation, if there is one, otherwise we are done. + while (! err) { + if (token == ')' || token == '\n') + break; + int op; + for (op = NUM_ELEMENTS(binop) - 1; op >= 0; op--) { + if (binop[op].token == token) + break; + } + if (op < 0 || binop[op].precedence <= precedence) + break; + int leftSide = res; + + // Setup short-circuiting, needed for ES, unless already in a short circuit. + // (Once in a short-circuit, can't turn off again, until that whole subexpression is done. + if (! shortCircuit) { + if ((token == PpAtomOr && leftSide == 1) || + (token == PpAtomAnd && leftSide == 0)) + shortCircuit = true; + } + + token = scanToken(ppToken); + token = eval(token, binop[op].precedence, shortCircuit, res, err, ppToken); + + if (binop[op].op == op_div || binop[op].op == op_mod) { + if (res == 0) { + parseContext.ppError(loc, "division by 0", "preprocessor evaluation", ""); + res = 1; + } + } + res = binop[op].op(leftSide, res); + } + + return token; +} + +// Expand macros, skipping empty expansions, to get to the first real token in those expansions. +int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken) +{ + while (token == PpAtomIdentifier && strcmp("defined", ppToken->name) != 0) { + switch (MacroExpand(ppToken, true, false)) { + case MacroExpandNotStarted: + case MacroExpandError: + parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", ""); + err = true; + res = 0; + break; + case MacroExpandStarted: + break; + case MacroExpandUndef: + if (! shortCircuit && parseContext.isEsProfile()) { + const char* message = "undefined macro in expression not allowed in es profile"; + if (parseContext.relaxedErrors()) + parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name); + else + parseContext.ppError(ppToken->loc, message, "preprocessor evaluation", ppToken->name); + } + break; + } + token = scanToken(ppToken); + if (err) + break; + } + + return token; +} + +// Handle #if +int TPpContext::CPPif(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (ifdepth >= maxIfNesting || elsetracker >= maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#if", ""); + return EndOfInput; + } else { + elsetracker++; + ifdepth++; + } + int res = 0; + bool err = false; + token = eval(token, MIN_PRECEDENCE, false, res, err, ppToken); + token = extraTokenCheck(PpAtomIf, ppToken, token); + if (!res && !err) + token = CPPelse(1, ppToken); + + return token; +} + +// Handle #ifdef +int TPpContext::CPPifdef(int defined, TPpToken* ppToken) +{ + int token = scanToken(ppToken); + if (ifdepth > maxIfNesting || elsetracker > maxIfNesting) { + parseContext.ppError(ppToken->loc, "maximum nesting depth exceeded", "#ifdef", ""); + return EndOfInput; + } else { + elsetracker++; + ifdepth++; + } + + if (token != PpAtomIdentifier) { + if (defined) + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifdef", ""); + else + parseContext.ppError(ppToken->loc, "must be followed by macro name", "#ifndef", ""); + } else { + MacroSymbol* macro = lookupMacroDef(atomStrings.getAtom(ppToken->name)); + token = scanToken(ppToken); + if (token != '\n') { + parseContext.ppError(ppToken->loc, "unexpected tokens following #ifdef directive - expected a newline", "#ifdef", ""); + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + } + if (((macro != nullptr && !macro->undef) ? 1 : 0) != defined) + token = CPPelse(1, ppToken); + } + + return token; +} + +// Handle #include ... +// TODO: Handle macro expansions for the header name +int TPpContext::CPPinclude(TPpToken* ppToken) +{ + const TSourceLoc directiveLoc = ppToken->loc; + bool startWithLocalSearch = true; // to additionally include the extra "" paths + int token; + + // Find the first non-whitespace char after #include + int ch = getChar(); + while (ch == ' ' || ch == '\t') { + ch = getChar(); + } + if (ch == '<') { + // style + startWithLocalSearch = false; + token = scanHeaderName(ppToken, '>'); + } else if (ch == '"') { + // "header-name" style + token = scanHeaderName(ppToken, '"'); + } else { + // unexpected, get the full token to generate the error + ungetChar(); + token = scanToken(ppToken); + } + + if (token != PpAtomConstString) { + parseContext.ppError(directiveLoc, "must be followed by a header name", "#include", ""); + return token; + } + + // Make a copy of the name because it will be overwritten by the next token scan. + const std::string filename = ppToken->name; + + // See if the directive was well formed + token = scanToken(ppToken); + if (token != '\n') { + if (token == EndOfInput) + parseContext.ppError(ppToken->loc, "expected newline after header name:", "#include", "%s", filename.c_str()); + else + parseContext.ppError(ppToken->loc, "extra content after header name:", "#include", "%s", filename.c_str()); + return token; + } + + // Process well-formed directive + + // Find the inclusion, first look in "Local" ("") paths, if requested, + // otherwise, only search the "System" (<>) paths. + TShader::Includer::IncludeResult* res = nullptr; + if (startWithLocalSearch) + res = includer.includeLocal(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1); + if (res == nullptr || res->headerName.empty()) { + includer.releaseInclude(res); + res = includer.includeSystem(filename.c_str(), currentSourceFile.c_str(), includeStack.size() + 1); + } + + // Process the results + if (res != nullptr && !res->headerName.empty()) { + if (res->headerData != nullptr && res->headerLength > 0) { + // path for processing one or more tokens from an included header, hand off 'res' + const bool forNextLine = parseContext.lineDirectiveShouldSetNextLine(); + std::ostringstream prologue; + std::ostringstream epilogue; + prologue << "#line " << forNextLine << " " << "\"" << res->headerName << "\"\n"; + epilogue << (res->headerData[res->headerLength - 1] == '\n'? "" : "\n") << + "#line " << directiveLoc.line + forNextLine << " " << directiveLoc.getStringNameOrNum() << "\n"; + pushInput(new TokenizableIncludeFile(directiveLoc, prologue.str(), res, epilogue.str(), this)); + parseContext.intermediate.addIncludeText(res->headerName.c_str(), res->headerData, res->headerLength); + // There's no "current" location anymore. + parseContext.setCurrentColumn(0); + } else { + // things are okay, but there is nothing to process + includer.releaseInclude(res); + } + } else { + // error path, clean up + std::string message = + res != nullptr ? std::string(res->headerData, res->headerLength) + : std::string("Could not process include directive"); + parseContext.ppError(directiveLoc, message.c_str(), "#include", "for header name: %s", filename.c_str()); + includer.releaseInclude(res); + } + + return token; +} + +// Handle #line +int TPpContext::CPPline(TPpToken* ppToken) +{ + // "#line must have, after macro substitution, one of the following forms: + // "#line line + // "#line line source-string-number" + + int token = scanToken(ppToken); + const TSourceLoc directiveLoc = ppToken->loc; + if (token == '\n') { + parseContext.ppError(ppToken->loc, "must by followed by an integral literal", "#line", ""); + return token; + } + + int lineRes = 0; // Line number after macro expansion. + int lineToken = 0; + bool hasFile = false; + int fileRes = 0; // Source file number after macro expansion. + const char* sourceName = nullptr; // Optional source file name. + bool lineErr = false; + bool fileErr = false; + disableEscapeSequences = true; + token = eval(token, MIN_PRECEDENCE, false, lineRes, lineErr, ppToken); + disableEscapeSequences = false; + if (! lineErr) { + lineToken = lineRes; + if (token == '\n') + ++lineRes; + + if (parseContext.lineDirectiveShouldSetNextLine()) + --lineRes; + parseContext.setCurrentLine(lineRes); + + if (token != '\n') { +#ifndef GLSLANG_WEB + if (token == PpAtomConstString) { + parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line"); + // We need to save a copy of the string instead of pointing + // to the name field of the token since the name field + // will likely be overwritten by the next token scan. + sourceName = atomStrings.getString(atomStrings.getAddAtom(ppToken->name)); + parseContext.setCurrentSourceName(sourceName); + hasFile = true; + token = scanToken(ppToken); + } else +#endif + { + token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken); + if (! fileErr) { + parseContext.setCurrentString(fileRes); + hasFile = true; + } + } + } + } + if (!fileErr && !lineErr) { + parseContext.notifyLineDirective(directiveLoc.line, lineToken, hasFile, fileRes, sourceName); + } + token = extraTokenCheck(PpAtomLine, ppToken, token); + + return token; +} + +// Handle #error +int TPpContext::CPPerror(TPpToken* ppToken) +{ + disableEscapeSequences = true; + int token = scanToken(ppToken); + disableEscapeSequences = false; + std::string message; + TSourceLoc loc = ppToken->loc; + + while (token != '\n' && token != EndOfInput) { + if (token == PpAtomConstInt16 || token == PpAtomConstUint16 || + token == PpAtomConstInt || token == PpAtomConstUint || + token == PpAtomConstInt64 || token == PpAtomConstUint64 || + token == PpAtomConstFloat16 || + token == PpAtomConstFloat || token == PpAtomConstDouble) { + message.append(ppToken->name); + } else if (token == PpAtomIdentifier || token == PpAtomConstString) { + message.append(ppToken->name); + } else { + message.append(atomStrings.getString(token)); + } + message.append(" "); + token = scanToken(ppToken); + } + parseContext.notifyErrorDirective(loc.line, message.c_str()); + // store this msg into the shader's information log..set the Compile Error flag!!!! + parseContext.ppError(loc, message.c_str(), "#error", ""); + + return '\n'; +} + +// Handle #pragma +int TPpContext::CPPpragma(TPpToken* ppToken) +{ + char SrcStrName[2]; + TVector tokens; + + TSourceLoc loc = ppToken->loc; // because we go to the next line before processing + int token = scanToken(ppToken); + while (token != '\n' && token != EndOfInput) { + switch (token) { + case PpAtomIdentifier: + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstFloat: + case PpAtomConstDouble: + case PpAtomConstFloat16: + tokens.push_back(ppToken->name); + break; + default: + SrcStrName[0] = (char)token; + SrcStrName[1] = '\0'; + tokens.push_back(SrcStrName); + } + token = scanToken(ppToken); + } + + if (token == EndOfInput) + parseContext.ppError(loc, "directive must end with a newline", "#pragma", ""); + else + parseContext.handlePragma(loc, tokens); + + return token; +} + +// #version: This is just for error checking: the version and profile are decided before preprocessing starts +int TPpContext::CPPversion(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + + if (errorOnVersion || versionSeen) { + if (parseContext.isReadingHLSL()) + parseContext.ppError(ppToken->loc, "invalid preprocessor command", "#version", ""); + else + parseContext.ppError(ppToken->loc, "must occur first in shader", "#version", ""); + } + versionSeen = true; + + if (token == '\n') { + parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", ""); + + return token; + } + + if (token != PpAtomConstInt) + parseContext.ppError(ppToken->loc, "must be followed by version number", "#version", ""); + + ppToken->ival = atoi(ppToken->name); + int versionNumber = ppToken->ival; + int line = ppToken->loc.line; + token = scanToken(ppToken); + + if (token == '\n') { + parseContext.notifyVersion(line, versionNumber, nullptr); + return token; + } else { + int profileAtom = atomStrings.getAtom(ppToken->name); + if (profileAtom != PpAtomCore && + profileAtom != PpAtomCompatibility && + profileAtom != PpAtomEs) + parseContext.ppError(ppToken->loc, "bad profile name; use es, core, or compatibility", "#version", ""); + parseContext.notifyVersion(line, versionNumber, ppToken->name); + token = scanToken(ppToken); + + if (token == '\n') + return token; + else + parseContext.ppError(ppToken->loc, "bad tokens following profile -- expected newline", "#version", ""); + } + + return token; +} + +// Handle #extension +int TPpContext::CPPextension(TPpToken* ppToken) +{ + int line = ppToken->loc.line; + int token = scanToken(ppToken); + char extensionName[MaxTokenLength + 1]; + + if (token=='\n') { + parseContext.ppError(ppToken->loc, "extension name not specified", "#extension", ""); + return token; + } + + if (token != PpAtomIdentifier) + parseContext.ppError(ppToken->loc, "extension name expected", "#extension", ""); + + snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name); + + token = scanToken(ppToken); + if (token != ':') { + parseContext.ppError(ppToken->loc, "':' missing after extension name", "#extension", ""); + return token; + } + + token = scanToken(ppToken); + if (token != PpAtomIdentifier) { + parseContext.ppError(ppToken->loc, "behavior for extension not specified", "#extension", ""); + return token; + } + + parseContext.updateExtensionBehavior(line, extensionName, ppToken->name); + parseContext.notifyExtensionDirective(line, extensionName, ppToken->name); + + token = scanToken(ppToken); + if (token == '\n') + return token; + else + parseContext.ppError(ppToken->loc, "extra tokens -- expected newline", "#extension",""); + + return token; +} + +int TPpContext::readCPPline(TPpToken* ppToken) +{ + int token = scanToken(ppToken); + + if (token == PpAtomIdentifier) { + switch (atomStrings.getAtom(ppToken->name)) { + case PpAtomDefine: + token = CPPdefine(ppToken); + break; + case PpAtomElse: + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#else after #else", "#else", ""); + elseSeen[elsetracker] = true; + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#else", ""); + token = extraTokenCheck(PpAtomElse, ppToken, scanToken(ppToken)); + token = CPPelse(0, ppToken); + break; + case PpAtomElif: + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#elif", ""); + if (elseSeen[elsetracker]) + parseContext.ppError(ppToken->loc, "#elif after #else", "#elif", ""); + // this token is really a dont care, but we still need to eat the tokens + token = scanToken(ppToken); + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + token = CPPelse(0, ppToken); + break; + case PpAtomEndif: + if (ifdepth == 0) + parseContext.ppError(ppToken->loc, "mismatched statements", "#endif", ""); + else { + elseSeen[elsetracker] = false; + --elsetracker; + --ifdepth; + } + token = extraTokenCheck(PpAtomEndif, ppToken, scanToken(ppToken)); + break; + case PpAtomIf: + token = CPPif(ppToken); + break; + case PpAtomIfdef: + token = CPPifdef(1, ppToken); + break; + case PpAtomIfndef: + token = CPPifdef(0, ppToken); + break; + case PpAtomLine: + token = CPPline(ppToken); + break; +#ifndef GLSLANG_WEB + case PpAtomInclude: + if(!parseContext.isReadingHLSL()) { + parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include"); + } + token = CPPinclude(ppToken); + break; + case PpAtomPragma: + token = CPPpragma(ppToken); + break; +#endif + case PpAtomUndef: + token = CPPundef(ppToken); + break; + case PpAtomError: + token = CPPerror(ppToken); + break; + case PpAtomVersion: + token = CPPversion(ppToken); + break; + case PpAtomExtension: + token = CPPextension(ppToken); + break; + default: + parseContext.ppError(ppToken->loc, "invalid directive:", "#", ppToken->name); + break; + } + } else if (token != '\n' && token != EndOfInput) + parseContext.ppError(ppToken->loc, "invalid directive", "#", ""); + + while (token != '\n' && token != EndOfInput) + token = scanToken(ppToken); + + return token; +} + +// Context-dependent parsing of a #include . +// Assumes no macro expansions etc. are being done; the name is just on the current input. +// Always creates a name and returns PpAtomicConstString, unless we run out of input. +int TPpContext::scanHeaderName(TPpToken* ppToken, char delimit) +{ + bool tooLong = false; + + if (inputStack.empty()) + return EndOfInput; + + int len = 0; + ppToken->name[0] = '\0'; + do { + int ch = inputStack.back()->getch(); + + // done yet? + if (ch == delimit) { + ppToken->name[len] = '\0'; + if (tooLong) + parseContext.ppError(ppToken->loc, "header name too long", "", ""); + return PpAtomConstString; + } else if (ch == EndOfInput) + return EndOfInput; + + // found a character to expand the name with + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else + tooLong = true; + } while (true); +} + +// Macro-expand a macro argument 'arg' to create 'expandedArg'. +// Does not replace 'arg'. +// Returns nullptr if no expanded argument is created. +TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken* ppToken, bool newLineOkay) +{ + // expand the argument + TokenStream* expandedArg = new TokenStream; + pushInput(new tMarkerInput(this)); + pushTokenStreamInput(arg); + int token; + while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) { + token = tokenPaste(token, *ppToken); + if (token == PpAtomIdentifier) { + switch (MacroExpand(ppToken, false, newLineOkay)) { + case MacroExpandNotStarted: + break; + case MacroExpandError: + // toss the rest of the pushed-input argument by scanning until tMarkerInput + while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput) + ; + break; + case MacroExpandStarted: + case MacroExpandUndef: + continue; + } + } + if (token == tMarkerInput::marker || token == EndOfInput) + break; + expandedArg->putToken(token, ppToken); + } + + if (token != tMarkerInput::marker) { + // Error, or MacroExpand ate the marker, so had bad input, recover + delete expandedArg; + expandedArg = nullptr; + } + + return expandedArg; +} + +// +// Return the next token for a macro expansion, handling macro arguments, +// whose semantics are dependent on being adjacent to ##. +// +int TPpContext::tMacroInput::scan(TPpToken* ppToken) +{ + int token; + do { + token = mac->body.getToken(pp->parseContext, ppToken); + } while (token == ' '); // handle white space in macro + + // Hash operators basically turn off a round of macro substitution + // (the round done on the argument before the round done on the RHS of the + // macro definition): + // + // "A parameter in the replacement list, unless preceded by a # or ## + // preprocessing token or followed by a ## preprocessing token (see below), + // is replaced by the corresponding argument after all macros contained + // therein have been expanded." + // + // "If, in the replacement list, a parameter is immediately preceded or + // followed by a ## preprocessing token, the parameter is replaced by the + // corresponding argument's preprocessing token sequence." + + bool pasting = false; + if (postpaste) { + // don't expand next token + pasting = true; + postpaste = false; + } + + if (prepaste) { + // already know we should be on a ##, verify + assert(token == PpAtomPaste); + prepaste = false; + postpaste = true; + } + + // see if are preceding a ## + if (mac->body.peekUntokenizedPasting()) { + prepaste = true; + pasting = true; + } + + // HLSL does expand macros before concatenation + if (pasting && pp->parseContext.isReadingHLSL()) + pasting = false; + + // TODO: preprocessor: properly handle whitespace (or lack of it) between tokens when expanding + if (token == PpAtomIdentifier) { + int i; + for (i = (int)mac->args.size() - 1; i >= 0; i--) + if (strcmp(pp->atomStrings.getString(mac->args[i]), ppToken->name) == 0) + break; + if (i >= 0) { + TokenStream* arg = expandedArgs[i]; + if (arg == nullptr || pasting) + arg = args[i]; + pp->pushTokenStreamInput(*arg, prepaste); + + return pp->scanToken(ppToken); + } + } + + if (token == EndOfInput) + mac->busy = 0; + + return token; +} + +// return a textual zero, for scanning a macro that was never defined +int TPpContext::tZeroInput::scan(TPpToken* ppToken) +{ + if (done) + return EndOfInput; + + ppToken->name[0] = '0'; + ppToken->name[1] = 0; + ppToken->ival = 0; + ppToken->space = false; + done = true; + + return PpAtomConstInt; +} + +// +// Check a token to see if it is a macro that should be expanded: +// - If it is, and defined, push a tInput that will produce the appropriate +// expansion and return MacroExpandStarted. +// - If it is, but undefined, and expandUndef is requested, push a tInput +// that will expand to 0 and return MacroExpandUndef. +// - Otherwise, there is no expansion, and there are two cases: +// * It might be okay there is no expansion, and no specific error was +// detected. Returns MacroExpandNotStarted. +// * The expansion was started, but could not be completed, due to an error +// that cannot be recovered from. Returns MacroExpandError. +// +MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay) +{ + ppToken->space = false; + int macroAtom = atomStrings.getAtom(ppToken->name); + switch (macroAtom) { + case PpAtomLineMacro: + // Arguments which are macro have been replaced in the first stage. + if (ppToken->ival == 0) + ppToken->ival = parseContext.getCurrentLoc().line; + snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + + case PpAtomFileMacro: { + if (parseContext.getCurrentLoc().name) + parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based __FILE__"); + ppToken->ival = parseContext.getCurrentLoc().string; + snprintf(ppToken->name, sizeof(ppToken->name), "%s", ppToken->loc.getStringNameOrNum().c_str()); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + } + + case PpAtomVersionMacro: + ppToken->ival = parseContext.version; + snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival); + UngetToken(PpAtomConstInt, ppToken); + return MacroExpandStarted; + + default: + break; + } + + MacroSymbol* macro = macroAtom == 0 ? nullptr : lookupMacroDef(macroAtom); + + // no recursive expansions + if (macro != nullptr && macro->busy) + return MacroExpandNotStarted; + + // not expanding undefined macros + if ((macro == nullptr || macro->undef) && ! expandUndef) + return MacroExpandNotStarted; + + // 0 is the value of an undefined macro + if ((macro == nullptr || macro->undef) && expandUndef) { + pushInput(new tZeroInput(this)); + return MacroExpandUndef; + } + + tMacroInput *in = new tMacroInput(this); + + TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error + in->mac = macro; + if (macro->functionLike) { + // We don't know yet if this will be a successful call of a + // function-like macro; need to look for a '(', but without trashing + // the passed in ppToken, until we know we are no longer speculative. + TPpToken parenToken; + int token = scanToken(&parenToken); + if (newLineOkay) { + while (token == '\n') + token = scanToken(&parenToken); + } + if (token != '(') { + // Function-like macro called with object-like syntax: okay, don't expand. + // (We ate exactly one token that might not be white space; put it back. + UngetToken(token, &parenToken); + delete in; + return MacroExpandNotStarted; + } + in->args.resize(in->mac->args.size()); + for (size_t i = 0; i < in->mac->args.size(); i++) + in->args[i] = new TokenStream; + in->expandedArgs.resize(in->mac->args.size()); + for (size_t i = 0; i < in->mac->args.size(); i++) + in->expandedArgs[i] = nullptr; + size_t arg = 0; + bool tokenRecorded = false; + do { + TVector nestStack; + while (true) { + token = scanToken(ppToken); + if (token == EndOfInput || token == tMarkerInput::marker) { + parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + if (token == '\n') { + if (! newLineOkay) { + parseContext.ppError(loc, "End of line in macro substitution:", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + continue; + } + if (token == '#') { + parseContext.ppError(ppToken->loc, "unexpected '#'", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + if (in->mac->args.size() == 0 && token != ')') + break; + if (nestStack.size() == 0 && (token == ',' || token == ')')) + break; + if (token == '(') + nestStack.push_back(')'); + else if (token == '{' && parseContext.isReadingHLSL()) + nestStack.push_back('}'); + else if (nestStack.size() > 0 && token == nestStack.back()) + nestStack.pop_back(); + + //Macro replacement list is expanded in the last stage. + if (atomStrings.getAtom(ppToken->name) == PpAtomLineMacro) + ppToken->ival = parseContext.getCurrentLoc().line; + + in->args[arg]->putToken(token, ppToken); + tokenRecorded = true; + } + // end of single argument scan + + if (token == ')') { + // closing paren of call + if (in->mac->args.size() == 1 && !tokenRecorded) + break; + arg++; + break; + } + arg++; + } while (arg < in->mac->args.size()); + // end of all arguments scan + + if (arg < in->mac->args.size()) + parseContext.ppError(loc, "Too few args in Macro", "macro expansion", atomStrings.getString(macroAtom)); + else if (token != ')') { + // Error recover code; find end of call, if possible + int depth = 0; + while (token != EndOfInput && (depth > 0 || token != ')')) { + if (token == ')' || token == '}') + depth--; + token = scanToken(ppToken); + if (token == '(' || token == '{') + depth++; + } + + if (token == EndOfInput) { + parseContext.ppError(loc, "End of input in macro", "macro expansion", atomStrings.getString(macroAtom)); + delete in; + return MacroExpandError; + } + parseContext.ppError(loc, "Too many args in macro", "macro expansion", atomStrings.getString(macroAtom)); + } + + // We need both expanded and non-expanded forms of the argument, for whether or + // not token pasting will be applied later when the argument is consumed next to ##. + for (size_t i = 0; i < in->mac->args.size(); i++) + in->expandedArgs[i] = PrescanMacroArg(*in->args[i], ppToken, newLineOkay); + } + + pushInput(in); + macro->busy = 1; + macro->body.reset(); + + return MacroExpandStarted; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp new file mode 100644 index 0000000..06c2333 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp @@ -0,0 +1,181 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace { + +using namespace glslang; + +const struct { + int val; + const char* str; +} tokens[] = { + + { PPAtomAddAssign, "+=" }, + { PPAtomSubAssign, "-=" }, + { PPAtomMulAssign, "*=" }, + { PPAtomDivAssign, "/=" }, + { PPAtomModAssign, "%=" }, + + { PpAtomRight, ">>" }, + { PpAtomLeft, "<<" }, + { PpAtomAnd, "&&" }, + { PpAtomOr, "||" }, + { PpAtomXor, "^^" }, + + { PpAtomRightAssign, ">>=" }, + { PpAtomLeftAssign, "<<=" }, + { PpAtomAndAssign, "&=" }, + { PpAtomOrAssign, "|=" }, + { PpAtomXorAssign, "^=" }, + + { PpAtomEQ, "==" }, + { PpAtomNE, "!=" }, + { PpAtomGE, ">=" }, + { PpAtomLE, "<=" }, + + { PpAtomDecrement, "--" }, + { PpAtomIncrement, "++" }, + + { PpAtomColonColon, "::" }, + + { PpAtomDefine, "define" }, + { PpAtomUndef, "undef" }, + { PpAtomIf, "if" }, + { PpAtomElif, "elif" }, + { PpAtomElse, "else" }, + { PpAtomEndif, "endif" }, + { PpAtomIfdef, "ifdef" }, + { PpAtomIfndef, "ifndef" }, + { PpAtomLine, "line" }, + { PpAtomPragma, "pragma" }, + { PpAtomError, "error" }, + + { PpAtomVersion, "version" }, + { PpAtomCore, "core" }, + { PpAtomCompatibility, "compatibility" }, + { PpAtomEs, "es" }, + { PpAtomExtension, "extension" }, + + { PpAtomLineMacro, "__LINE__" }, + { PpAtomFileMacro, "__FILE__" }, + { PpAtomVersionMacro, "__VERSION__" }, + + { PpAtomInclude, "include" }, +}; + +} // end anonymous namespace + +namespace glslang { + +// +// Initialize the atom table. +// +TStringAtomMap::TStringAtomMap() +{ + badToken.assign(""); + + // Add single character tokens to the atom table: + const char* s = "~!%^&*()-+=|,.<>/?;:[]{}#\\"; + char t[2]; + + t[1] = '\0'; + while (*s) { + t[0] = *s; + addAtomFixed(t, s[0]); + s++; + } + + // Add multiple character scanner tokens : + for (size_t ii = 0; ii < sizeof(tokens)/sizeof(tokens[0]); ii++) + addAtomFixed(tokens[ii].str, tokens[ii].val); + + nextAtom = PpAtomLast; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp new file mode 100644 index 0000000..1363ce2 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp @@ -0,0 +1,120 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#include +#include + +#include "PpContext.h" + +namespace glslang { + +TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) : + preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false), + rootFileName(rootFileName), + currentSourceFile(rootFileName), + disableEscapeSequences(false) +{ + ifdepth = 0; + for (elsetracker = 0; elsetracker < maxIfNesting; elsetracker++) + elseSeen[elsetracker] = false; + elsetracker = 0; + + strtodStream.imbue(std::locale::classic()); +} + +TPpContext::~TPpContext() +{ + delete [] preamble; + + // free up the inputStack + while (! inputStack.empty()) + popInput(); +} + +void TPpContext::setInput(TInputScanner& input, bool versionWillBeError) +{ + assert(inputStack.size() == 0); + + pushInput(new tStringInput(this, input)); + + errorOnVersion = versionWillBeError; + versionSeen = false; +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.h b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.h new file mode 100644 index 0000000..714b5ea --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpContext.h @@ -0,0 +1,703 @@ +// +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef PPCONTEXT_H +#define PPCONTEXT_H + +#include +#include +#include + +#include "../ParseHelper.h" +#include "PpTokens.h" + +/* windows only pragma */ +#ifdef _MSC_VER + #pragma warning(disable : 4127) +#endif + +namespace glslang { + +class TPpToken { +public: + TPpToken() { clear(); } + void clear() + { + space = false; + i64val = 0; + loc.init(); + name[0] = 0; + } + + // Used for comparing macro definitions, so checks what is relevant for that. + bool operator==(const TPpToken& right) const + { + return space == right.space && + ival == right.ival && dval == right.dval && i64val == right.i64val && + strncmp(name, right.name, MaxTokenLength) == 0; + } + bool operator!=(const TPpToken& right) const { return ! operator==(right); } + + TSourceLoc loc; + // True if a space (for white space or a removed comment) should also be + // recognized, in front of the token returned: + bool space; + // Numeric value of the token: + union { + int ival; + double dval; + long long i64val; + }; + // Text string of the token: + char name[MaxTokenLength + 1]; +}; + +class TStringAtomMap { +// +// Implementation is in PpAtom.cpp +// +// Maintain a bi-directional mapping between relevant preprocessor strings and +// "atoms" which a unique integers (small, contiguous, not hash-like) per string. +// +public: + TStringAtomMap(); + + // Map string -> atom. + // Return 0 if no existing string. + int getAtom(const char* s) const + { + auto it = atomMap.find(s); + return it == atomMap.end() ? 0 : it->second; + } + + // Map a new or existing string -> atom, inventing a new atom if necessary. + int getAddAtom(const char* s) + { + int atom = getAtom(s); + if (atom == 0) { + atom = nextAtom++; + addAtomFixed(s, atom); + } + return atom; + } + + // Map atom -> string. + const char* getString(int atom) const { return stringMap[atom]->c_str(); } + +protected: + TStringAtomMap(TStringAtomMap&); + TStringAtomMap& operator=(TStringAtomMap&); + + TUnorderedMap atomMap; + TVector stringMap; // these point into the TString in atomMap + int nextAtom; + + // Bad source characters can lead to bad atoms, so gracefully handle those by + // pre-filling the table with them (to avoid if tests later). + TString badToken; + + // Add bi-directional mappings: + // - string -> atom + // - atom -> string + void addAtomFixed(const char* s, int atom) + { + auto it = atomMap.insert(std::pair(s, atom)).first; + if (stringMap.size() < (size_t)atom + 1) + stringMap.resize(atom + 100, &badToken); + stringMap[atom] = &it->first; + } +}; + +class TInputScanner; + +enum MacroExpandResult { + MacroExpandNotStarted, // macro not expanded, which might not be an error + MacroExpandError, // a clear error occurred while expanding, no expansion + MacroExpandStarted, // macro expansion process has started + MacroExpandUndef // macro is undefined and will be expanded +}; + +// This class is the result of turning a huge pile of C code communicating through globals +// into a class. This was done to allowing instancing to attain thread safety. +// Don't expect too much in terms of OO design. +class TPpContext { +public: + TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&); + virtual ~TPpContext(); + + void setPreamble(const char* preamble, size_t length); + + int tokenize(TPpToken& ppToken); + int tokenPaste(int token, TPpToken&); + + class tInput { + public: + tInput(TPpContext* p) : done(false), pp(p) { } + virtual ~tInput() { } + + virtual int scan(TPpToken*) = 0; + virtual int getch() = 0; + virtual void ungetch() = 0; + virtual bool peekPasting() { return false; } // true when about to see ## + virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste + virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define) + virtual bool isMacroInput() { return false; } + + // Will be called when we start reading tokens from this instance + virtual void notifyActivated() {} + // Will be called when we do not read tokens from this instance anymore + virtual void notifyDeleted() {} + protected: + bool done; + TPpContext* pp; + }; + + void setInput(TInputScanner& input, bool versionWillBeError); + + void pushInput(tInput* in) + { + inputStack.push_back(in); + in->notifyActivated(); + } + void popInput() + { + inputStack.back()->notifyDeleted(); + delete inputStack.back(); + inputStack.pop_back(); + } + + // + // From PpTokens.cpp + // + + // Capture the needed parts of a token stream for macro recording/playback. + class TokenStream { + public: + // Manage a stream of these 'Token', which capture the relevant parts + // of a TPpToken, plus its atom. + class Token { + public: + Token(int atom, const TPpToken& ppToken) : + atom(atom), + space(ppToken.space), + i64val(ppToken.i64val), + name(ppToken.name) { } + int get(TPpToken& ppToken) + { + ppToken.clear(); + ppToken.space = space; + ppToken.i64val = i64val; + snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str()); + return atom; + } + bool isAtom(int a) const { return atom == a; } + int getAtom() const { return atom; } + bool nonSpaced() const { return !space; } + protected: + Token() {} + int atom; + bool space; // did a space precede the token? + long long i64val; + TString name; + }; + + TokenStream() : currentPos(0) { } + + void putToken(int token, TPpToken* ppToken); + bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); } + bool peekContinuedPasting(int atom) + { + // This is basically necessary because, for example, the PP + // tokenizer only accepts valid numeric-literals plus suffixes, so + // separates numeric-literals plus bad suffix into two tokens, which + // should get both pasted together as one token when token pasting. + // + // The following code is a bit more generalized than the above example. + if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) { + switch(stream[currentPos].getAtom()) { + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstFloat: + case PpAtomConstDouble: + case PpAtomConstFloat16: + case PpAtomConstString: + case PpAtomIdentifier: + return true; + default: + break; + } + } + + return false; + } + int getToken(TParseContextBase&, TPpToken*); + bool atEnd() { return currentPos >= stream.size(); } + bool peekTokenizedPasting(bool lastTokenPastes); + bool peekUntokenizedPasting(); + void reset() { currentPos = 0; } + + protected: + TVector stream; + size_t currentPos; + }; + + // + // From Pp.cpp + // + + struct MacroSymbol { + MacroSymbol() : functionLike(0), busy(0), undef(0) { } + TVector args; + TokenStream body; + unsigned functionLike : 1; // 0 means object-like, 1 means function-like + unsigned busy : 1; + unsigned undef : 1; + }; + + typedef TMap TSymbolMap; + TSymbolMap macroDefs; // map atoms to macro definitions + MacroSymbol* lookupMacroDef(int atom) + { + auto existingMacroIt = macroDefs.find(atom); + return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second); + } + void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; } + +protected: + TPpContext(TPpContext&); + TPpContext& operator=(TPpContext&); + + TStringAtomMap atomStrings; + char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble + int preambleLength; + char** strings; // official strings of shader, starting a string 0 line 1 + size_t* lengths; + int numStrings; // how many official strings there are + int currentString; // which string we're currently parsing (-1 for preamble) + + // Scanner data: + int previous_token; + TParseContextBase& parseContext; + + // Get the next token from *stack* of input sources, popping input sources + // that are out of tokens, down until an input source is found that has a token. + // Return EndOfInput when there are no more tokens to be found by doing this. + int scanToken(TPpToken* ppToken) + { + int token = EndOfInput; + + while (! inputStack.empty()) { + token = inputStack.back()->scan(ppToken); + if (token != EndOfInput || inputStack.empty()) + break; + popInput(); + } + + return token; + } + int getChar() { return inputStack.back()->getch(); } + void ungetChar() { inputStack.back()->ungetch(); } + bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); } + bool peekContinuedPasting(int a) + { + return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a); + } + bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); } + bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); } + + static const int maxIfNesting = 65; + + int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor) + bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth + int elsetracker; // #if-#else and #endif constructs...Counter. + + class tMacroInput : public tInput { + public: + tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { } + virtual ~tMacroInput() + { + for (size_t i = 0; i < args.size(); ++i) + delete args[i]; + for (size_t i = 0; i < expandedArgs.size(); ++i) + delete expandedArgs[i]; + } + + virtual int scan(TPpToken*) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + bool peekPasting() override { return prepaste; } + bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); } + bool endOfReplacementList() override { return mac->body.atEnd(); } + bool isMacroInput() override { return true; } + + MacroSymbol *mac; + TVector args; + TVector expandedArgs; + + protected: + bool prepaste; // true if we are just before ## + bool postpaste; // true if we are right after ## + }; + + class tMarkerInput : public tInput { + public: + tMarkerInput(TPpContext* pp) : tInput(pp) { } + virtual int scan(TPpToken*) override + { + if (done) + return EndOfInput; + done = true; + + return marker; + } + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + static const int marker = -3; + }; + + class tZeroInput : public tInput { + public: + tZeroInput(TPpContext* pp) : tInput(pp) { } + virtual int scan(TPpToken*) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + }; + + std::vector inputStack; + bool errorOnVersion; + bool versionSeen; + + // + // from Pp.cpp + // + + // Used to obtain #include content. + TShader::Includer& includer; + + int CPPdefine(TPpToken * ppToken); + int CPPundef(TPpToken * ppToken); + int CPPelse(int matchelse, TPpToken * ppToken); + int extraTokenCheck(int atom, TPpToken* ppToken, int token); + int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken); + int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken); + int CPPif (TPpToken * ppToken); + int CPPifdef(int defined, TPpToken * ppToken); + int CPPinclude(TPpToken * ppToken); + int CPPline(TPpToken * ppToken); + int CPPerror(TPpToken * ppToken); + int CPPpragma(TPpToken * ppToken); + int CPPversion(TPpToken * ppToken); + int CPPextension(TPpToken * ppToken); + int readCPPline(TPpToken * ppToken); + int scanHeaderName(TPpToken* ppToken, char delimit); + TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay); + MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay); + + // + // From PpTokens.cpp + // + void pushTokenStreamInput(TokenStream&, bool pasting = false); + void UngetToken(int token, TPpToken*); + + class tTokenInput : public tInput { + public: + tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : + tInput(pp), + tokens(t), + lastTokenPastes(prepasting) { } + virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); } + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); } + bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); } + protected: + TokenStream* tokens; + bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token + }; + + class tUngotTokenInput : public tInput { + public: + tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { } + virtual int scan(TPpToken *) override; + virtual int getch() override { assert(0); return EndOfInput; } + virtual void ungetch() override { assert(0); } + protected: + int token; + TPpToken lval; + }; + + // + // From PpScanner.cpp + // + class tStringInput : public tInput { + public: + tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { } + virtual int scan(TPpToken*) override; + + // Scanner used to get source stream characters. + // - Escaped newlines are handled here, invisibly to the caller. + // - All forms of newline are handled, and turned into just a '\n'. + int getch() override + { + int ch = input->get(); + + if (ch == '\\') { + // Move past escaped newlines, as many as sequentially exist + do { + if (input->peek() == '\r' || input->peek() == '\n') { + bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment); + if (! allowed && pp->inComment) + return '\\'; + + // escape one newline now + ch = input->get(); + int nextch = input->get(); + if (ch == '\r' && nextch == '\n') + ch = input->get(); + else + ch = nextch; + } else + return '\\'; + } while (ch == '\\'); + } + + // handle any non-escaped newline + if (ch == '\r' || ch == '\n') { + if (ch == '\r' && input->peek() == '\n') + input->get(); + return '\n'; + } + + return ch; + } + + // Scanner used to backup the source stream characters. Newlines are + // handled here, invisibly to the caller, meaning have to undo exactly + // what getch() above does (e.g., don't leave things in the middle of a + // sequence of escaped newlines). + void ungetch() override + { + input->unget(); + + do { + int ch = input->peek(); + if (ch == '\r' || ch == '\n') { + if (ch == '\n') { + // correct for two-character newline + input->unget(); + if (input->peek() != '\r') + input->get(); + } + // now in front of a complete newline, move past an escape character + input->unget(); + if (input->peek() == '\\') + input->unget(); + else { + input->get(); + break; + } + } else + break; + } while (true); + } + + protected: + TInputScanner* input; + }; + + // Holds a reference to included file data, as well as a + // prologue and an epilogue string. This can be scanned using the tInput + // interface and acts as a single source string. + class TokenizableIncludeFile : public tInput { + public: + // Copies prologue and epilogue. The includedFile must remain valid + // until this TokenizableIncludeFile is no longer used. + TokenizableIncludeFile(const TSourceLoc& startLoc, + const std::string& prologue, + TShader::Includer::IncludeResult* includedFile, + const std::string& epilogue, + TPpContext* pp) + : tInput(pp), + prologue_(prologue), + epilogue_(epilogue), + includedFile_(includedFile), + scanner(3, strings, lengths, nullptr, 0, 0, true), + prevScanner(nullptr), + stringInput(pp, scanner) + { + strings[0] = prologue_.data(); + strings[1] = includedFile_->headerData; + strings[2] = epilogue_.data(); + + lengths[0] = prologue_.size(); + lengths[1] = includedFile_->headerLength; + lengths[2] = epilogue_.size(); + + scanner.setLine(startLoc.line); + scanner.setString(startLoc.string); + + scanner.setFile(startLoc.getFilenameStr(), 0); + scanner.setFile(startLoc.getFilenameStr(), 1); + scanner.setFile(startLoc.getFilenameStr(), 2); + } + + // tInput methods: + int scan(TPpToken* t) override { return stringInput.scan(t); } + int getch() override { return stringInput.getch(); } + void ungetch() override { stringInput.ungetch(); } + + void notifyActivated() override + { + prevScanner = pp->parseContext.getScanner(); + pp->parseContext.setScanner(&scanner); + pp->push_include(includedFile_); + } + + void notifyDeleted() override + { + pp->parseContext.setScanner(prevScanner); + pp->pop_include(); + } + + private: + TokenizableIncludeFile& operator=(const TokenizableIncludeFile&); + + // Stores the prologue for this string. + const std::string prologue_; + + // Stores the epilogue for this string. + const std::string epilogue_; + + // Points to the IncludeResult that this TokenizableIncludeFile represents. + TShader::Includer::IncludeResult* includedFile_; + + // Will point to prologue_, includedFile_->headerData and epilogue_ + // This is passed to scanner constructor. + // These do not own the storage and it must remain valid until this + // object has been destroyed. + const char* strings[3]; + // Length of str_, passed to scanner constructor. + size_t lengths[3]; + // Scans over str_. + TInputScanner scanner; + // The previous effective scanner before the scanner in this instance + // has been activated. + TInputScanner* prevScanner; + // Delegate object implementing the tInput interface. + tStringInput stringInput; + }; + + int ScanFromString(char* s); + void missingEndifCheck(); + int lFloatConst(int len, int ch, TPpToken* ppToken); + int characterLiteral(TPpToken* ppToken); + + void push_include(TShader::Includer::IncludeResult* result) + { + currentSourceFile = result->headerName; + includeStack.push(result); + } + + void pop_include() + { + TShader::Includer::IncludeResult* include = includeStack.top(); + includeStack.pop(); + includer.releaseInclude(include); + if (includeStack.empty()) { + currentSourceFile = rootFileName; + } else { + currentSourceFile = includeStack.top()->headerName; + } + } + + bool inComment; + std::string rootFileName; + std::stack includeStack; + std::string currentSourceFile; + + std::istringstream strtodStream; + bool disableEscapeSequences; +}; + +} // end namespace glslang + +#endif // PPCONTEXT_H diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp new file mode 100644 index 0000000..e0f44f8 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp @@ -0,0 +1,1315 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2017 ARM Limited. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" +#include "../Scan.h" + +namespace glslang { + +/////////////////////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////// Floating point constants: ///////////////////////////////// +/////////////////////////////////////////////////////////////////////////////////////////////// + +// +// Scan a single- or double-precision floating point constant. +// Assumes that the scanner has seen at least one digit, +// followed by either a decimal '.' or the letter 'e', or a +// precision ending (e.g., F or LF). +// +// This is technically not correct, as the preprocessor should just +// accept the numeric literal along with whatever suffix it has, but +// currently, it stops on seeing a bad suffix, treating that as the +// next token. This effects things like token pasting, where it is +// relevant how many tokens something was broken into. +// +// See peekContinuedPasting(). +int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken) +{ + const auto saveName = [&](int ch) { + if (len <= MaxTokenLength) + ppToken->name[len++] = static_cast(ch); + }; + + // find the range of non-zero digits before the decimal point + int startNonZero = 0; + while (startNonZero < len && ppToken->name[startNonZero] == '0') + ++startNonZero; + int endNonZero = len; + while (endNonZero > startNonZero && ppToken->name[endNonZero-1] == '0') + --endNonZero; + int numWholeNumberDigits = endNonZero - startNonZero; + + // accumulate the range's value + bool fastPath = numWholeNumberDigits <= 15; // when the number gets too complex, set to false + unsigned long long wholeNumber = 0; + if (fastPath) { + for (int i = startNonZero; i < endNonZero; ++i) + wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0'); + } + int decimalShift = len - endNonZero; + + // Decimal point: + bool hasDecimalOrExponent = false; + if (ch == '.') { + hasDecimalOrExponent = true; + saveName(ch); + ch = getChar(); + int firstDecimal = len; + +#ifdef ENABLE_HLSL + // 1.#INF or -1.#INF + if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) { + if ((len < 2) || + (len == 2 && ppToken->name[0] != '1') || + (len == 3 && ppToken->name[1] != '1' && !(ppToken->name[0] == '-' || ppToken->name[0] == '+')) || + (len > 3)) + parseContext.ppError(ppToken->loc, "unexpected use of", "#", ""); + else { + // we have 1.# or -1.# or +1.#, check for 'INF' + if ((ch = getChar()) != 'I' || + (ch = getChar()) != 'N' || + (ch = getChar()) != 'F') + parseContext.ppError(ppToken->loc, "expected 'INF'", "#", ""); + else { + // we have [+-].#INF, and we are targeting IEEE 754, so wrap it up: + saveName('I'); + saveName('N'); + saveName('F'); + ppToken->name[len] = '\0'; + if (ppToken->name[0] == '-') + ppToken->i64val = 0xfff0000000000000; // -Infinity + else + ppToken->i64val = 0x7ff0000000000000; // +Infinity + return PpAtomConstFloat; + } + } + } +#endif + + // Consume leading-zero digits after the decimal point + while (ch == '0') { + saveName(ch); + ch = getChar(); + } + int startNonZeroDecimal = len; + int endNonZeroDecimal = len; + + // Consume remaining digits, up to the exponent + while (ch >= '0' && ch <= '9') { + saveName(ch); + if (ch != '0') + endNonZeroDecimal = len; + ch = getChar(); + } + + // Compute accumulation up to the last non-zero digit + if (endNonZeroDecimal > startNonZeroDecimal) { + numWholeNumberDigits += endNonZeroDecimal - endNonZero - 1; // don't include the "." + if (numWholeNumberDigits > 15) + fastPath = false; + if (fastPath) { + for (int i = endNonZero; i < endNonZeroDecimal; ++i) { + if (ppToken->name[i] != '.') + wholeNumber = wholeNumber * 10 + (ppToken->name[i] - '0'); + } + } + decimalShift = firstDecimal - endNonZeroDecimal; + } + } + + // Exponent: + bool negativeExponent = false; + double exponentValue = 0.0; + int exponent = 0; + { + if (ch == 'e' || ch == 'E') { + hasDecimalOrExponent = true; + saveName(ch); + ch = getChar(); + if (ch == '+' || ch == '-') { + negativeExponent = ch == '-'; + saveName(ch); + ch = getChar(); + } + if (ch >= '0' && ch <= '9') { + while (ch >= '0' && ch <= '9') { + exponent = exponent * 10 + (ch - '0'); + saveName(ch); + ch = getChar(); + } + } else { + parseContext.ppError(ppToken->loc, "bad character in float exponent", "", ""); + } + } + + // Compensate for location of decimal + if (negativeExponent) + exponent -= decimalShift; + else { + exponent += decimalShift; + if (exponent < 0) { + negativeExponent = true; + exponent = -exponent; + } + } + if (exponent > 22) + fastPath = false; + + if (fastPath) { + // Compute the floating-point value of the exponent + exponentValue = 1.0; + if (exponent > 0) { + double expFactor = 10; + while (exponent > 0) { + if (exponent & 0x1) + exponentValue *= expFactor; + expFactor *= expFactor; + exponent >>= 1; + } + } + } + } + + // Suffix: + bool isDouble = false; + bool isFloat16 = false; +#ifndef GLSLANG_WEB + if (ch == 'l' || ch == 'L') { + if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl) + parseContext.doubleCheck(ppToken->loc, "double floating-point suffix"); + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + if (parseContext.intermediate.getSource() == EShSourceGlsl) { + int ch2 = getChar(); + if (ch2 != 'f' && ch2 != 'F') { + ungetChar(); + ungetChar(); + } else { + saveName(ch); + saveName(ch2); + isDouble = true; + } + } else if (parseContext.intermediate.getSource() == EShSourceHlsl) { + saveName(ch); + isDouble = true; + } + } else if (ch == 'h' || ch == 'H') { + if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl) + parseContext.float16Check(ppToken->loc, "half floating-point suffix"); + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + if (parseContext.intermediate.getSource() == EShSourceGlsl) { + int ch2 = getChar(); + if (ch2 != 'f' && ch2 != 'F') { + ungetChar(); + ungetChar(); + } else { + saveName(ch); + saveName(ch2); + isFloat16 = true; + } + } else if (parseContext.intermediate.getSource() == EShSourceHlsl) { + saveName(ch); + isFloat16 = true; + } + } else +#endif + if (ch == 'f' || ch == 'F') { +#ifndef GLSLANG_WEB + if (ifdepth == 0) + parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix"); + if (ifdepth == 0 && !parseContext.relaxedErrors()) + parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix"); +#endif + if (ifdepth == 0 && !hasDecimalOrExponent) + parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", ""); + saveName(ch); + } else + ungetChar(); + + // Patch up the name and length for overflow + + if (len > MaxTokenLength) { + len = MaxTokenLength; + parseContext.ppError(ppToken->loc, "float literal too long", "", ""); + } + ppToken->name[len] = '\0'; + + // Compute the numerical value + if (fastPath) { + // compute the floating-point value of the exponent + if (exponentValue == 0.0) + ppToken->dval = (double)wholeNumber; + else if (negativeExponent) + ppToken->dval = (double)wholeNumber / exponentValue; + else + ppToken->dval = (double)wholeNumber * exponentValue; + } else { + // slow path + ppToken->dval = 0.0; + + // remove suffix + TString numstr(ppToken->name); + if (numstr.back() == 'f' || numstr.back() == 'F') + numstr.pop_back(); + if (numstr.back() == 'h' || numstr.back() == 'H') + numstr.pop_back(); + if (numstr.back() == 'l' || numstr.back() == 'L') + numstr.pop_back(); + + // use platform library + strtodStream.clear(); + strtodStream.str(numstr.c_str()); + strtodStream >> ppToken->dval; + if (strtodStream.fail()) { + // Assume failure combined with a large exponent was overflow, in + // an attempt to set INF. + if (!negativeExponent && exponent + numWholeNumberDigits > 300) + ppToken->i64val = 0x7ff0000000000000; // +Infinity + // Assume failure combined with a small exponent was overflow. + if (negativeExponent && exponent + numWholeNumberDigits > 300) + ppToken->dval = 0.0; + // Unknown reason for failure. Theory is that either + // - the 0.0 is still there, or + // - something reasonable was written that is better than 0.0 + } + } + + // Return the right token type + if (isDouble) + return PpAtomConstDouble; + else if (isFloat16) + return PpAtomConstFloat16; + else + return PpAtomConstFloat; +} + +// Recognize a character literal. +// +// The first ' has already been accepted, read the rest, through the closing '. +// +// Always returns PpAtomConstInt. +// +int TPpContext::characterLiteral(TPpToken* ppToken) +{ + ppToken->name[0] = 0; + ppToken->ival = 0; + + if (parseContext.intermediate.getSource() != EShSourceHlsl) { + // illegal, except in macro definition, for which case we report the character + return '\''; + } + + int ch = getChar(); + switch (ch) { + case '\'': + // As empty sequence: '' + parseContext.ppError(ppToken->loc, "unexpected", "\'", ""); + return PpAtomConstInt; + case '\\': + // As escape sequence: '\XXX' + switch (ch = getChar()) { + case 'a': + ppToken->ival = 7; + break; + case 'b': + ppToken->ival = 8; + break; + case 't': + ppToken->ival = 9; + break; + case 'n': + ppToken->ival = 10; + break; + case 'v': + ppToken->ival = 11; + break; + case 'f': + ppToken->ival = 12; + break; + case 'r': + ppToken->ival = 13; + break; + case 'x': + case '0': + parseContext.ppError(ppToken->loc, "octal and hex sequences not supported", "\\", ""); + break; + default: + // This catches '\'', '\"', '\?', etc. + // Also, things like '\C' mean the same thing as 'C' + // (after the above cases are filtered out). + ppToken->ival = ch; + break; + } + break; + default: + ppToken->ival = ch; + break; + } + ppToken->name[0] = (char)ppToken->ival; + ppToken->name[1] = '\0'; + ch = getChar(); + if (ch != '\'') { + parseContext.ppError(ppToken->loc, "expected", "\'", ""); + // Look ahead for a closing ' + do { + ch = getChar(); + } while (ch != '\'' && ch != EndOfInput && ch != '\n'); + } + + return PpAtomConstInt; +} + +// +// Scanner used to tokenize source stream. +// +// N.B. Invalid numeric suffixes are not consumed.// +// This is technically not correct, as the preprocessor should just +// accept the numeric literal along with whatever suffix it has, but +// currently, it stops on seeing a bad suffix, treating that as the +// next token. This effects things like token pasting, where it is +// relevant how many tokens something was broken into. +// See peekContinuedPasting(). +// +int TPpContext::tStringInput::scan(TPpToken* ppToken) +{ + int AlreadyComplained = 0; + int len = 0; + int ch = 0; + int ii = 0; + unsigned long long ival = 0; + const auto floatingPointChar = [&](int ch) { return ch == '.' || ch == 'e' || ch == 'E' || + ch == 'f' || ch == 'F' || + ch == 'h' || ch == 'H'; }; + + static const char* const Int64_Extensions[] = { + E_GL_ARB_gpu_shader_int64, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int64 }; + static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]); + + static const char* const Int16_Extensions[] = { + E_GL_AMD_gpu_shader_int16, + E_GL_EXT_shader_explicit_arithmetic_types, + E_GL_EXT_shader_explicit_arithmetic_types_int16 }; + static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]); + + ppToken->ival = 0; + ppToken->i64val = 0; + ppToken->space = false; + ch = getch(); + for (;;) { + while (ch == ' ' || ch == '\t') { + ppToken->space = true; + ch = getch(); + } + + ppToken->loc = pp->parseContext.getCurrentLoc(); + len = 0; + switch (ch) { + default: + // Single character token, including EndOfInput, '#' and '\' (escaped newlines are handled at a lower level, so this is just a '\' token) + if (ch > PpAtomMaxSingle) + ch = PpAtomBadToken; + return ch; + + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': case 'G': case 'H': case 'I': case 'J': + case 'K': case 'L': case 'M': case 'N': case 'O': + case 'P': case 'Q': case 'R': case 'S': case 'T': + case 'U': case 'V': case 'W': case 'X': case 'Y': + case 'Z': case '_': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': case 'g': case 'h': case 'i': case 'j': + case 'k': case 'l': case 'm': case 'n': case 'o': + case 'p': case 'q': case 'r': case 's': case 't': + case 'u': case 'v': case 'w': case 'x': case 'y': + case 'z': + do { + if (len < MaxTokenLength) { + ppToken->name[len++] = (char)ch; + ch = getch(); + } else { + if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "name too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } + } while ((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '_'); + + // line continuation with no token before or after makes len == 0, and need to start over skipping white space, etc. + if (len == 0) + continue; + + ppToken->name[len] = '\0'; + ungetch(); + return PpAtomIdentifier; + case '0': + ppToken->name[len++] = (char)ch; + ch = getch(); + if (ch == 'x' || ch == 'X') { + // must be hexadecimal + + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + ppToken->name[len++] = (char)ch; + ch = getch(); + if ((ch >= '0' && ch <= '9') || + (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')) { + + ival = 0; + do { + if (len < MaxTokenLength && ival <= 0x0fffffffffffffffull) { + ppToken->name[len++] = (char)ch; + if (ch >= '0' && ch <= '9') { + ii = ch - '0'; + } else if (ch >= 'A' && ch <= 'F') { + ii = ch - 'A' + 10; + } else if (ch >= 'a' && ch <= 'f') { + ii = ch - 'a' + 10; + } else + pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", ""); + ival = (ival << 4) | ii; + } else { + if (! AlreadyComplained) { + if(len < MaxTokenLength) + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", ""); + else + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too long", "", ""); + AlreadyComplained = 1; + } + ival = 0xffffffffffffffffull; + } + ch = getch(); + } while ((ch >= '0' && ch <= '9') || + (ch >= 'A' && ch <= 'F') || + (ch >= 'a' && ch <= 'f')); + } else { + pp->parseContext.ppError(ppToken->loc, "bad digit in hexadecimal literal", "", ""); + } + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + ppToken->name[len] = '\0'; + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit hexadecimal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit hexadecimal literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0) { + if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit hexadecimal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit hexadecimal literal"); + } + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + if (ival > 0xffffffffu && !AlreadyComplained) + pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", ""); + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } else { + // could be octal integer or floating point, speculative pursue octal until it must be floating point + + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + bool octalOverflow = false; + bool nonOctal = false; + ival = 0; + + // see how much octal-like stuff we can read + while (ch >= '0' && ch <= '7') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + if (ival <= 0x1fffffffffffffffull) { + ii = ch - '0'; + ival = (ival << 3) | ii; + } else + octalOverflow = true; + ch = getch(); + } + + // could be part of a float... + if (ch == '8' || ch == '9') { + nonOctal = true; + do { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } while (ch >= '0' && ch <= '9'); + } + if (floatingPointChar(ch)) + return pp->lFloatConst(len, ch, ppToken); + + // wasn't a float, so must be octal... + if (nonOctal) + pp->parseContext.ppError(ppToken->loc, "octal literal digit too large", "", ""); + + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + ppToken->name[len] = '\0'; + + if (!isInt64 && ival > 0xffffffffu) + octalOverflow = true; + + if (octalOverflow) + pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", ""); + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit octal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit octal literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0) { + if (pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit octal literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit octal literal"); + } + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } + break; + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + // can't be hexadecimal or octal, is either decimal or floating point + + do { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + else if (! AlreadyComplained) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too long", "", ""); + AlreadyComplained = 1; + } + ch = getch(); + } while (ch >= '0' && ch <= '9'); + if (floatingPointChar(ch)) + return pp->lFloatConst(len, ch, ppToken); + else { + // Finish handling signed and unsigned integers + int numericLen = len; + bool isUnsigned = false; + bool isInt64 = false; + bool isInt16 = false; + if (ch == 'u' || ch == 'U') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isUnsigned = true; + +#ifndef GLSLANG_WEB + int nextCh = getch(); + if (nextCh == 'l' || nextCh == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt64 = true; + } else + ungetch(); + + nextCh = getch(); + if ((nextCh == 's' || nextCh == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)nextCh; + isInt16 = true; + } else + ungetch(); + } else if (ch == 'l' || ch == 'L') { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt64 = true; + } else if ((ch == 's' || ch == 'S') && + pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (len < MaxTokenLength) + ppToken->name[len++] = (char)ch; + isInt16 = true; +#endif + } else + ungetch(); + + ppToken->name[len] = '\0'; + ival = 0; + const unsigned oneTenthMaxInt = 0xFFFFFFFFu / 10; + const unsigned remainderMaxInt = 0xFFFFFFFFu - 10 * oneTenthMaxInt; + const unsigned long long oneTenthMaxInt64 = 0xFFFFFFFFFFFFFFFFull / 10; + const unsigned long long remainderMaxInt64 = 0xFFFFFFFFFFFFFFFFull - 10 * oneTenthMaxInt64; + const unsigned short oneTenthMaxInt16 = 0xFFFFu / 10; + const unsigned short remainderMaxInt16 = 0xFFFFu - 10 * oneTenthMaxInt16; + for (int i = 0; i < numericLen; i++) { + ch = ppToken->name[i] - '0'; + bool overflow = false; + if (isInt64) + overflow = (ival > oneTenthMaxInt64 || (ival == oneTenthMaxInt64 && (unsigned long long)ch > remainderMaxInt64)); + else if (isInt16) + overflow = (ival > oneTenthMaxInt16 || (ival == oneTenthMaxInt16 && (unsigned short)ch > remainderMaxInt16)); + else + overflow = (ival > oneTenthMaxInt || (ival == oneTenthMaxInt && (unsigned)ch > remainderMaxInt)); + if (overflow) { + pp->parseContext.ppError(ppToken->loc, "numeric literal too big", "", ""); + ival = 0xFFFFFFFFFFFFFFFFull; + break; + } else + ival = ival * 10 + ch; + } + + if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + if (pp->ifdepth == 0) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "64-bit literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int64_Extensions, Int64_Extensions, "64-bit literal"); + } + ppToken->i64val = ival; + return isUnsigned ? PpAtomConstUint64 : PpAtomConstInt64; + } else if (isInt16) { + if (pp->ifdepth == 0 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) { + pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile, + "16-bit literal"); + pp->parseContext.profileRequires(ppToken->loc, ~EEsProfile, 0, + Num_Int16_Extensions, Int16_Extensions, "16-bit literal"); + } + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16; + } else { + ppToken->ival = (int)ival; + return isUnsigned ? PpAtomConstUint : PpAtomConstInt; + } + } + break; + case '-': + ch = getch(); + if (ch == '-') { + return PpAtomDecrement; + } else if (ch == '=') { + return PPAtomSubAssign; + } else { + ungetch(); + return '-'; + } + case '+': + ch = getch(); + if (ch == '+') { + return PpAtomIncrement; + } else if (ch == '=') { + return PPAtomAddAssign; + } else { + ungetch(); + return '+'; + } + case '*': + ch = getch(); + if (ch == '=') { + return PPAtomMulAssign; + } else { + ungetch(); + return '*'; + } + case '%': + ch = getch(); + if (ch == '=') { + return PPAtomModAssign; + } else { + ungetch(); + return '%'; + } + case '^': + ch = getch(); + if (ch == '^') { + return PpAtomXor; + } else { + if (ch == '=') + return PpAtomXorAssign; + else{ + ungetch(); + return '^'; + } + } + + case '=': + ch = getch(); + if (ch == '=') { + return PpAtomEQ; + } else { + ungetch(); + return '='; + } + case '!': + ch = getch(); + if (ch == '=') { + return PpAtomNE; + } else { + ungetch(); + return '!'; + } + case '|': + ch = getch(); + if (ch == '|') { + return PpAtomOr; + } else if (ch == '=') { + return PpAtomOrAssign; + } else { + ungetch(); + return '|'; + } + case '&': + ch = getch(); + if (ch == '&') { + return PpAtomAnd; + } else if (ch == '=') { + return PpAtomAndAssign; + } else { + ungetch(); + return '&'; + } + case '<': + ch = getch(); + if (ch == '<') { + ch = getch(); + if (ch == '=') + return PpAtomLeftAssign; + else { + ungetch(); + return PpAtomLeft; + } + } else if (ch == '=') { + return PpAtomLE; + } else { + ungetch(); + return '<'; + } + case '>': + ch = getch(); + if (ch == '>') { + ch = getch(); + if (ch == '=') + return PpAtomRightAssign; + else { + ungetch(); + return PpAtomRight; + } + } else if (ch == '=') { + return PpAtomGE; + } else { + ungetch(); + return '>'; + } + case '.': + ch = getch(); + if (ch >= '0' && ch <= '9') { + ungetch(); + return pp->lFloatConst(0, '.', ppToken); + } else { + ungetch(); + return '.'; + } + case '/': + ch = getch(); + if (ch == '/') { + pp->inComment = true; + do { + ch = getch(); + } while (ch != '\n' && ch != EndOfInput); + ppToken->space = true; + pp->inComment = false; + + return ch; + } else if (ch == '*') { + ch = getch(); + do { + while (ch != '*') { + if (ch == EndOfInput) { + pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); + return ch; + } + ch = getch(); + } + ch = getch(); + if (ch == EndOfInput) { + pp->parseContext.ppError(ppToken->loc, "End of input in comment", "comment", ""); + return ch; + } + } while (ch != '/'); + ppToken->space = true; + // loop again to get the next token... + break; + } else if (ch == '=') { + return PPAtomDivAssign; + } else { + ungetch(); + return '/'; + } + break; + case '\'': + return pp->characterLiteral(ppToken); + case '"': + // #include uses scanHeaderName() to ignore these escape sequences. + ch = getch(); + while (ch != '"' && ch != '\n' && ch != EndOfInput) { + if (len < MaxTokenLength) { + if (ch == '\\' && !pp->disableEscapeSequences) { + int nextCh = getch(); + switch (nextCh) { + case '\'': ch = 0x27; break; + case '"': ch = 0x22; break; + case '?': ch = 0x3f; break; + case '\\': ch = 0x5c; break; + case 'a': ch = 0x07; break; + case 'b': ch = 0x08; break; + case 'f': ch = 0x0c; break; + case 'n': ch = 0x0a; break; + case 'r': ch = 0x0d; break; + case 't': ch = 0x09; break; + case 'v': ch = 0x0b; break; + case 'x': + // Hex value, arbitrary number of characters. Terminated by the first + // non-hex digit + { + int numDigits = 0; + ch = 0; + while (true) { + nextCh = getch(); + if (nextCh >= '0' && nextCh <= '9') + nextCh -= '0'; + else if (nextCh >= 'A' && nextCh <= 'F') + nextCh -= 'A' - 10; + else if (nextCh >= 'a' && nextCh <= 'f') + nextCh -= 'a' - 10; + else { + ungetch(); + break; + } + numDigits++; + ch = ch * 0x10 + nextCh; + } + if (numDigits == 0) { + pp->parseContext.ppError(ppToken->loc, "Expected hex value in escape sequence", "string", ""); + } + break; + } + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + // Octal value, up to three octal digits + { + int numDigits = 1; + ch = nextCh - '0'; + while (numDigits < 3) { + nextCh = getch(); + if (nextCh >= '0' && nextCh <= '7') + nextCh -= '0'; + else { + ungetch(); + break; + } + numDigits++; + ch = ch * 8 + nextCh; + } + break; + } + default: + pp->parseContext.ppError(ppToken->loc, "Invalid escape sequence", "string", ""); + break; + } + } + ppToken->name[len] = (char)ch; + len++; + ch = getch(); + } else + break; + }; + ppToken->name[len] = '\0'; + if (ch != '"') { + ungetch(); + pp->parseContext.ppError(ppToken->loc, "End of line in string", "string", ""); + } + return PpAtomConstString; + case ':': + ch = getch(); + if (ch == ':') + return PpAtomColonColon; + ungetch(); + return ':'; + } + + ch = getch(); + } +} + +// +// The main functional entry point into the preprocessor, which will +// scan the source strings to figure out and return the next processing token. +// +// Return the token, or EndOfInput when no more tokens. +// +int TPpContext::tokenize(TPpToken& ppToken) +{ + for(;;) { + int token = scanToken(&ppToken); + + // Handle token-pasting logic + token = tokenPaste(token, ppToken); + + if (token == EndOfInput) { + missingEndifCheck(); + return EndOfInput; + } + if (token == '#') { + if (previous_token == '\n') { + token = readCPPline(&ppToken); + if (token == EndOfInput) { + missingEndifCheck(); + return EndOfInput; + } + continue; + } else { + parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", ""); + return EndOfInput; + } + } + previous_token = token; + + if (token == '\n') + continue; + + // expand macros + if (token == PpAtomIdentifier) { + switch (MacroExpand(&ppToken, false, true)) { + case MacroExpandNotStarted: + break; + case MacroExpandError: + return EndOfInput; + case MacroExpandStarted: + case MacroExpandUndef: + continue; + } + } + + switch (token) { + case PpAtomIdentifier: + case PpAtomConstInt: + case PpAtomConstUint: + case PpAtomConstFloat: + case PpAtomConstInt64: + case PpAtomConstUint64: + case PpAtomConstInt16: + case PpAtomConstUint16: + case PpAtomConstDouble: + case PpAtomConstFloat16: + if (ppToken.name[0] == '\0') + continue; + break; + case PpAtomConstString: + // HLSL allows string literals. + // GLSL allows string literals with GL_EXT_debug_printf. + if (ifdepth == 0 && parseContext.intermediate.getSource() != EShSourceHlsl) { + parseContext.requireExtensions(ppToken.loc, 1, &E_GL_EXT_debug_printf, "string literal"); + if (!parseContext.extensionTurnedOn(E_GL_EXT_debug_printf)) + continue; + } + break; + case '\'': + parseContext.ppError(ppToken.loc, "character literals not supported", "\'", ""); + continue; + default: + snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token)); + break; + } + + return token; + } +} + +// +// Do all token-pasting related combining of two pasted tokens when getting a +// stream of tokens from a replacement list. Degenerates to no processing if a +// replacement list is not the source of the token stream. +// +int TPpContext::tokenPaste(int token, TPpToken& ppToken) +{ + // starting with ## is illegal, skip to next token + if (token == PpAtomPaste) { + parseContext.ppError(ppToken.loc, "unexpected location", "##", ""); + return scanToken(&ppToken); + } + + int resultToken = token; // "foo" pasted with "35" is an identifier, not a number + + // ## can be chained, process all in the chain at once + while (peekPasting()) { + TPpToken pastedPpToken; + + // next token has to be ## + token = scanToken(&pastedPpToken); + assert(token == PpAtomPaste); + + // This covers end of macro expansion + if (endOfReplacementList()) { + parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", ""); + break; + } + + // Get the token(s) after the ##. + // Because of "space" semantics, and prior tokenization, what + // appeared a single token, e.g. "3A", might have been tokenized + // into two tokens "3" and "A", but the "A" will have 'space' set to + // false. Accumulate all of these to recreate the original lexical + // appearing token. + do { + token = scanToken(&pastedPpToken); + + // This covers end of argument expansion + if (token == tMarkerInput::marker) { + parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", ""); + return resultToken; + } + + // get the token text + switch (resultToken) { + case PpAtomIdentifier: + // already have the correct text in token.names + break; + case '=': + case '!': + case '-': + case '~': + case '+': + case '*': + case '/': + case '%': + case '<': + case '>': + case '|': + case '^': + case '&': + case PpAtomRight: + case PpAtomLeft: + case PpAtomAnd: + case PpAtomOr: + case PpAtomXor: + snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken)); + snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token)); + break; + default: + parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", ""); + return resultToken; + } + + // combine the tokens + if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) { + parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", ""); + return resultToken; + } + snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name), + "%s", pastedPpToken.name); + + // correct the kind of token we are making, if needed (identifiers stay identifiers) + if (resultToken != PpAtomIdentifier) { + int newToken = atomStrings.getAtom(ppToken.name); + if (newToken > 0) + resultToken = newToken; + else + parseContext.ppError(ppToken.loc, "combined token is invalid", "##", ""); + } + } while (peekContinuedPasting(resultToken)); + } + + return resultToken; +} + +// Checks if we've seen balanced #if...#endif +void TPpContext::missingEndifCheck() +{ + if (ifdepth > 0) + parseContext.ppError(parseContext.getCurrentLoc(), "missing #endif", "", ""); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp new file mode 100644 index 0000000..7ed5870 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp @@ -0,0 +1,221 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +// +// For recording and playing back the stream of tokens in a macro definition. +// + +#ifndef _CRT_SECURE_NO_WARNINGS +#define _CRT_SECURE_NO_WARNINGS +#endif +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +#define snprintf sprintf_s +#endif + +#include +#include +#include +#include + +#include "PpContext.h" +#include "PpTokens.h" + +namespace glslang { + +// Add a token (including backing string) to the end of a macro +// token stream, for later playback. +void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken) +{ + TokenStream::Token streamToken(atom, *ppToken); + stream.push_back(streamToken); +} + +// Read the next token from a macro token stream. +int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken) +{ + if (atEnd()) + return EndOfInput; + + int atom = stream[currentPos++].get(*ppToken); + ppToken->loc = parseContext.getCurrentLoc(); + +#ifndef GLSLANG_WEB + // Check for ##, unless the current # is the last character + if (atom == '#') { + if (peekToken('#')) { + parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)"); + parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)"); + currentPos++; + atom = PpAtomPaste; + } + } +#endif + + return atom; +} + +// We are pasting if +// 1. we are preceding a pasting operator within this stream +// or +// 2. the entire macro is preceding a pasting operator (lastTokenPastes) +// and we are also on the last token +bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes) +{ + // 1. preceding ##? + + size_t savePos = currentPos; + // skip white space + while (peekToken(' ')) + ++currentPos; + if (peekToken(PpAtomPaste)) { + currentPos = savePos; + return true; + } + + // 2. last token and we've been told after this there will be a ## + + if (! lastTokenPastes) + return false; + // Getting here means the last token will be pasted, after this + + // Are we at the last non-whitespace token? + savePos = currentPos; + bool moreTokens = false; + do { + if (atEnd()) + break; + if (!peekToken(' ')) { + moreTokens = true; + break; + } + ++currentPos; + } while (true); + currentPos = savePos; + + return !moreTokens; +} + +// See if the next non-white-space tokens are two consecutive # +bool TPpContext::TokenStream::peekUntokenizedPasting() +{ + // don't return early, have to restore this + size_t savePos = currentPos; + + // skip white-space + while (peekToken(' ')) + ++currentPos; + + // check for ## + bool pasting = false; + if (peekToken('#')) { + ++currentPos; + if (peekToken('#')) + pasting = true; + } + + currentPos = savePos; + + return pasting; +} + +void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting) +{ + pushInput(new tTokenInput(this, &ts, prepasting)); + ts.reset(); +} + +int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken) +{ + if (done) + return EndOfInput; + + int ret = token; + *ppToken = lval; + done = true; + + return ret; +} + +void TPpContext::UngetToken(int token, TPpToken* ppToken) +{ + pushInput(new tUngotTokenInput(this, token, ppToken)); +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h new file mode 100644 index 0000000..7b0f815 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h @@ -0,0 +1,179 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +/****************************************************************************\ +Copyright (c) 2002, NVIDIA Corporation. + +NVIDIA Corporation("NVIDIA") supplies this software to you in +consideration of your agreement to the following terms, and your use, +installation, modification or redistribution of this NVIDIA software +constitutes acceptance of these terms. If you do not agree with these +terms, please do not use, install, modify or redistribute this NVIDIA +software. + +In consideration of your agreement to abide by the following terms, and +subject to these terms, NVIDIA grants you a personal, non-exclusive +license, under NVIDIA's copyrights in this original NVIDIA software (the +"NVIDIA Software"), to use, reproduce, modify and redistribute the +NVIDIA Software, with or without modifications, in source and/or binary +forms; provided that if you redistribute the NVIDIA Software, you must +retain the copyright notice of NVIDIA, this notice and the following +text and disclaimers in all such redistributions of the NVIDIA Software. +Neither the name, trademarks, service marks nor logos of NVIDIA +Corporation may be used to endorse or promote products derived from the +NVIDIA Software without specific prior written permission from NVIDIA. +Except as expressly stated in this notice, no other rights or licenses +express or implied, are granted by NVIDIA herein, including but not +limited to any patent rights that may be infringed by your derivative +works or by other works in which the NVIDIA Software may be +incorporated. No hardware is licensed hereunder. + +THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, +INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER +PRODUCTS. + +IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, +INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY +OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE +NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, +TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF +NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +\****************************************************************************/ + +#ifndef PARSER_H +#define PARSER_H + +namespace glslang { + +// Multi-character tokens +enum EFixedAtoms { + // single character tokens get their own char value as their token; start here for multi-character tokens + PpAtomMaxSingle = 127, + + // replace bad character tokens with this, to avoid accidental aliasing with the below + PpAtomBadToken, + + // Operators + + PPAtomAddAssign, + PPAtomSubAssign, + PPAtomMulAssign, + PPAtomDivAssign, + PPAtomModAssign, + + PpAtomRight, + PpAtomLeft, + + PpAtomRightAssign, + PpAtomLeftAssign, + PpAtomAndAssign, + PpAtomOrAssign, + PpAtomXorAssign, + + PpAtomAnd, + PpAtomOr, + PpAtomXor, + + PpAtomEQ, + PpAtomNE, + PpAtomGE, + PpAtomLE, + + PpAtomDecrement, + PpAtomIncrement, + + PpAtomColonColon, + + PpAtomPaste, + + // Constants + + PpAtomConstInt, + PpAtomConstUint, + PpAtomConstInt64, + PpAtomConstUint64, + PpAtomConstInt16, + PpAtomConstUint16, + PpAtomConstFloat, + PpAtomConstDouble, + PpAtomConstFloat16, + PpAtomConstString, + + // Identifiers + PpAtomIdentifier, + + // preprocessor "keywords" + + PpAtomDefine, + PpAtomUndef, + + PpAtomIf, + PpAtomIfdef, + PpAtomIfndef, + PpAtomElse, + PpAtomElif, + PpAtomEndif, + + PpAtomLine, + PpAtomPragma, + PpAtomError, + + // #version ... + PpAtomVersion, + PpAtomCore, + PpAtomCompatibility, + PpAtomEs, + + // #extension + PpAtomExtension, + + // __LINE__, __FILE__, __VERSION__ + + PpAtomLineMacro, + PpAtomFileMacro, + PpAtomVersionMacro, + + // #include + PpAtomInclude, + + PpAtomLast, +}; + +} // end namespace glslang + +#endif /* not PARSER_H */ diff --git a/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp b/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp new file mode 100644 index 0000000..9def592 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.cpp @@ -0,0 +1,870 @@ +// +// Copyright (C) 2015-2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// propagate the 'noContraction' qualifier. +// + +#ifndef GLSLANG_WEB + +#include "propagateNoContraction.h" + +#include +#include +#include +#include +#include + +#include "localintermediate.h" +namespace { + +// Use a string to hold the access chain information, as in most cases the +// access chain is short and may contain only one element, which is the symbol +// ID. +// Example: struct {float a; float b;} s; +// Object s.a will be represented with: /0 +// Object s.b will be represented with: /1 +// Object s will be represented with: +// For members of vector, matrix and arrays, they will be represented with the +// same symbol ID of their container symbol objects. This is because their +// preciseness is always the same as their container symbol objects. +typedef std::string ObjectAccessChain; + +// The delimiter used in the ObjectAccessChain string to separate symbol ID and +// different level of struct indices. +const char ObjectAccesschainDelimiter = '/'; + +// Mapping from Symbol IDs of symbol nodes, to their defining operation +// nodes. +typedef std::unordered_multimap NodeMapping; +// Mapping from object nodes to their access chain info string. +typedef std::unordered_map AccessChainMapping; + +// Set of object IDs. +typedef std::unordered_set ObjectAccesschainSet; +// Set of return branch nodes. +typedef std::unordered_set ReturnBranchNodeSet; + +// A helper function to tell whether a node is 'noContraction'. Returns true if +// the node has 'noContraction' qualifier, otherwise false. +bool isPreciseObjectNode(glslang::TIntermTyped* node) +{ + return node->getType().getQualifier().isNoContraction(); +} + +// Returns true if the opcode is a dereferencing one. +bool isDereferenceOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpIndexDirect: + case glslang::EOpIndexDirectStruct: + case glslang::EOpIndexIndirect: + case glslang::EOpVectorSwizzle: + case glslang::EOpMatrixSwizzle: + return true; + default: + return false; + } +} + +// Returns true if the opcode leads to an assignment operation. +bool isAssignOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpAssign: + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + case glslang::EOpAndAssign: + case glslang::EOpLeftShiftAssign: + case glslang::EOpRightShiftAssign: + case glslang::EOpInclusiveOrAssign: + case glslang::EOpExclusiveOrAssign: + + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + return true; + default: + return false; + } +} + +// A helper function to get the unsigned int from a given constant union node. +// Note the node should only hold a uint scalar. +unsigned getStructIndexFromConstantUnion(glslang::TIntermTyped* node) +{ + assert(node->getAsConstantUnion() && node->getAsConstantUnion()->isScalar()); + unsigned struct_dereference_index = node->getAsConstantUnion()->getConstArray()[0].getUConst(); + return struct_dereference_index; +} + +// A helper function to generate symbol_label. +ObjectAccessChain generateSymbolLabel(glslang::TIntermSymbol* node) +{ + ObjectAccessChain symbol_id = + std::to_string(node->getId()) + "(" + node->getName().c_str() + ")"; + return symbol_id; +} + +// Returns true if the operation is an arithmetic operation and valid for +// the 'NoContraction' decoration. +bool isArithmeticOperation(glslang::TOperator op) +{ + switch (op) { + case glslang::EOpAddAssign: + case glslang::EOpSubAssign: + case glslang::EOpMulAssign: + case glslang::EOpVectorTimesMatrixAssign: + case glslang::EOpVectorTimesScalarAssign: + case glslang::EOpMatrixTimesScalarAssign: + case glslang::EOpMatrixTimesMatrixAssign: + case glslang::EOpDivAssign: + case glslang::EOpModAssign: + + case glslang::EOpNegative: + + case glslang::EOpAdd: + case glslang::EOpSub: + case glslang::EOpMul: + case glslang::EOpDiv: + case glslang::EOpMod: + + case glslang::EOpVectorTimesScalar: + case glslang::EOpVectorTimesMatrix: + case glslang::EOpMatrixTimesVector: + case glslang::EOpMatrixTimesScalar: + case glslang::EOpMatrixTimesMatrix: + + case glslang::EOpDot: + + case glslang::EOpPostIncrement: + case glslang::EOpPostDecrement: + case glslang::EOpPreIncrement: + case glslang::EOpPreDecrement: + return true; + default: + return false; + } +} + +// A helper class to help manage the populating_initial_no_contraction_ flag. +template class StateSettingGuard { +public: + StateSettingGuard(T* state_ptr, T new_state_value) + : state_ptr_(state_ptr), previous_state_(*state_ptr) + { + *state_ptr = new_state_value; + } + StateSettingGuard(T* state_ptr) : state_ptr_(state_ptr), previous_state_(*state_ptr) {} + void setState(T new_state_value) { *state_ptr_ = new_state_value; } + ~StateSettingGuard() { *state_ptr_ = previous_state_; } + +private: + T* state_ptr_; + T previous_state_; +}; + +// A helper function to get the front element from a given ObjectAccessChain +ObjectAccessChain getFrontElement(const ObjectAccessChain& chain) +{ + size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter); + return pos_delimiter == std::string::npos ? chain : chain.substr(0, pos_delimiter); +} + +// A helper function to get the access chain starting from the second element. +ObjectAccessChain subAccessChainFromSecondElement(const ObjectAccessChain& chain) +{ + size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter); + return pos_delimiter == std::string::npos ? "" : chain.substr(pos_delimiter + 1); +} + +// A helper function to get the access chain after removing a given prefix. +ObjectAccessChain getSubAccessChainAfterPrefix(const ObjectAccessChain& chain, + const ObjectAccessChain& prefix) +{ + size_t pos = chain.find(prefix); + if (pos != 0) + return chain; + return chain.substr(prefix.length() + sizeof(ObjectAccesschainDelimiter)); +} + +// +// A traverser which traverses the whole AST and populates: +// 1) A mapping from symbol nodes' IDs to their defining operation nodes. +// 2) A set of access chains of the initial precise object nodes. +// +class TSymbolDefinitionCollectingTraverser : public glslang::TIntermTraverser { +public: + TSymbolDefinitionCollectingTraverser(NodeMapping* symbol_definition_mapping, + AccessChainMapping* accesschain_mapping, + ObjectAccesschainSet* precise_objects, + ReturnBranchNodeSet* precise_return_nodes); + + bool visitUnary(glslang::TVisit, glslang::TIntermUnary*) override; + bool visitBinary(glslang::TVisit, glslang::TIntermBinary*) override; + void visitSymbol(glslang::TIntermSymbol*) override; + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*) override; + bool visitBranch(glslang::TVisit, glslang::TIntermBranch*) override; + +protected: + TSymbolDefinitionCollectingTraverser& operator=(const TSymbolDefinitionCollectingTraverser&); + + // The mapping from symbol node IDs to their defining nodes. This should be + // populated along traversing the AST. + NodeMapping& symbol_definition_mapping_; + // The set of symbol node IDs for precise symbol nodes, the ones marked as + // 'noContraction'. + ObjectAccesschainSet& precise_objects_; + // The set of precise return nodes. + ReturnBranchNodeSet& precise_return_nodes_; + // A temporary cache of the symbol node whose defining node is to be found + // currently along traversing the AST. + ObjectAccessChain current_object_; + // A map from object node to its access chain. This traverser stores + // the built access chains into this map for each object node it has + // visited. + AccessChainMapping& accesschain_mapping_; + // The pointer to the Function Definition node, so we can get the + // preciseness of the return expression from it when we traverse the + // return branch node. + glslang::TIntermAggregate* current_function_definition_node_; +}; + +TSymbolDefinitionCollectingTraverser::TSymbolDefinitionCollectingTraverser( + NodeMapping* symbol_definition_mapping, AccessChainMapping* accesschain_mapping, + ObjectAccesschainSet* precise_objects, + std::unordered_set* precise_return_nodes) + : TIntermTraverser(true, false, false), symbol_definition_mapping_(*symbol_definition_mapping), + precise_objects_(*precise_objects), precise_return_nodes_(*precise_return_nodes), + current_object_(), accesschain_mapping_(*accesschain_mapping), + current_function_definition_node_(nullptr) {} + +// Visits a symbol node, set the current_object_ to the +// current node symbol ID, and record a mapping from this node to the current +// current_object_, which is the just obtained symbol +// ID. +void TSymbolDefinitionCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node) +{ + current_object_ = generateSymbolLabel(node); + accesschain_mapping_[node] = current_object_; +} + +// Visits an aggregate node, traverses all of its children. +bool TSymbolDefinitionCollectingTraverser::visitAggregate(glslang::TVisit, + glslang::TIntermAggregate* node) +{ + // This aggregate node might be a function definition node, in which case we need to + // cache this node, so we can get the preciseness information of the return value + // of this function later. + StateSettingGuard current_function_definition_node_setting_guard( + ¤t_function_definition_node_); + if (node->getOp() == glslang::EOpFunction) { + // This is function definition node, we need to cache this node so that we can + // get the preciseness of the return value later. + current_function_definition_node_setting_guard.setState(node); + } + // Traverse the items in the sequence. + glslang::TIntermSequence& seq = node->getSequence(); + for (int i = 0; i < (int)seq.size(); ++i) { + current_object_.clear(); + seq[i]->traverse(this); + } + return false; +} + +bool TSymbolDefinitionCollectingTraverser::visitBranch(glslang::TVisit, + glslang::TIntermBranch* node) +{ + if (node->getFlowOp() == glslang::EOpReturn && node->getExpression() && + current_function_definition_node_ && + current_function_definition_node_->getType().getQualifier().noContraction) { + // This node is a return node with an expression, and its function has a + // precise return value. We need to find the involved objects in its + // expression and add them to the set of initial precise objects. + precise_return_nodes_.insert(node); + node->getExpression()->traverse(this); + } + return false; +} + +// Visits a unary node. This might be an implicit assignment like i++, i--. etc. +bool TSymbolDefinitionCollectingTraverser::visitUnary(glslang::TVisit /* visit */, + glslang::TIntermUnary* node) +{ + current_object_.clear(); + node->getOperand()->traverse(this); + if (isAssignOperation(node->getOp())) { + // We should always be able to get an access chain of the operand node. + assert(!current_object_.empty()); + + // If the operand node object is 'precise', we collect its access chain + // for the initial set of 'precise' objects. + if (isPreciseObjectNode(node->getOperand())) { + // The operand node is an 'precise' object node, add its + // access chain to the set of 'precise' objects. This is to collect + // the initial set of 'precise' objects. + precise_objects_.insert(current_object_); + } + // Gets the symbol ID from the object's access chain. + ObjectAccessChain id_symbol = getFrontElement(current_object_); + // Add a mapping from the symbol ID to this assignment operation node. + symbol_definition_mapping_.insert(std::make_pair(id_symbol, node)); + } + // A unary node is not a dereference node, so we clear the access chain which + // is under construction. + current_object_.clear(); + return false; +} + +// Visits a binary node and updates the mapping from symbol IDs to the definition +// nodes. Also collects the access chains for the initial precise objects. +bool TSymbolDefinitionCollectingTraverser::visitBinary(glslang::TVisit /* visit */, + glslang::TIntermBinary* node) +{ + // Traverses the left node to build the access chain info for the object. + current_object_.clear(); + node->getLeft()->traverse(this); + + if (isAssignOperation(node->getOp())) { + // We should always be able to get an access chain for the left node. + assert(!current_object_.empty()); + + // If the left node object is 'precise', it is an initial precise object + // specified in the shader source. Adds it to the initial work list to + // process later. + if (isPreciseObjectNode(node->getLeft())) { + // The left node is an 'precise' object node, add its access chain to + // the set of 'precise' objects. This is to collect the initial set + // of 'precise' objects. + precise_objects_.insert(current_object_); + } + // Gets the symbol ID from the object access chain, which should be the + // first element recorded in the access chain. + ObjectAccessChain id_symbol = getFrontElement(current_object_); + // Adds a mapping from the symbol ID to this assignment operation node. + symbol_definition_mapping_.insert(std::make_pair(id_symbol, node)); + + // Traverses the right node, there may be other 'assignment' + // operations in the right. + current_object_.clear(); + node->getRight()->traverse(this); + + } else if (isDereferenceOperation(node->getOp())) { + // The left node (parent node) is a struct type object. We need to + // record the access chain information of the current node into its + // object id. + if (node->getOp() == glslang::EOpIndexDirectStruct) { + unsigned struct_dereference_index = getStructIndexFromConstantUnion(node->getRight()); + current_object_.push_back(ObjectAccesschainDelimiter); + current_object_.append(std::to_string(struct_dereference_index)); + } + accesschain_mapping_[node] = current_object_; + + // For a dereference node, there is no need to traverse the right child + // node as the right node should always be an integer type object. + + } else { + // For other binary nodes, still traverse the right node. + current_object_.clear(); + node->getRight()->traverse(this); + } + return false; +} + +// Traverses the AST and returns a tuple of four members: +// 1) a mapping from symbol IDs to the definition nodes (aka. assignment nodes) of these symbols. +// 2) a mapping from object nodes in the AST to the access chains of these objects. +// 3) a set of access chains of precise objects. +// 4) a set of return nodes with precise expressions. +std::tuple +getSymbolToDefinitionMappingAndPreciseSymbolIDs(const glslang::TIntermediate& intermediate) +{ + auto result_tuple = std::make_tuple(NodeMapping(), AccessChainMapping(), ObjectAccesschainSet(), + ReturnBranchNodeSet()); + + TIntermNode* root = intermediate.getTreeRoot(); + if (root == 0) + return result_tuple; + + NodeMapping& symbol_definition_mapping = std::get<0>(result_tuple); + AccessChainMapping& accesschain_mapping = std::get<1>(result_tuple); + ObjectAccesschainSet& precise_objects = std::get<2>(result_tuple); + ReturnBranchNodeSet& precise_return_nodes = std::get<3>(result_tuple); + + // Traverses the AST and populate the results. + TSymbolDefinitionCollectingTraverser collector(&symbol_definition_mapping, &accesschain_mapping, + &precise_objects, &precise_return_nodes); + root->traverse(&collector); + + return result_tuple; +} + +// +// A traverser that determine whether the left node (or operand node for unary +// node) of an assignment node is 'precise', containing 'precise' or not, +// according to the access chain a given precise object which share the same +// symbol as the left node. +// +// Post-orderly traverses the left node subtree of an binary assignment node and: +// +// 1) Propagates the 'precise' from the left object nodes to this object node. +// +// 2) Builds object access chain along the traversal, and also compares with +// the access chain of the given 'precise' object along with the traversal to +// tell if the node to be defined is 'precise' or not. +// +class TNoContractionAssigneeCheckingTraverser : public glslang::TIntermTraverser { + + enum DecisionStatus { + // The object node to be assigned to may contain 'precise' objects and also not 'precise' objects. + Mixed = 0, + // The object node to be assigned to is either a 'precise' object or a struct objects whose members are all 'precise'. + Precise = 1, + // The object node to be assigned to is not a 'precise' object. + NotPreicse = 2, + }; + +public: + TNoContractionAssigneeCheckingTraverser(const AccessChainMapping& accesschain_mapping) + : TIntermTraverser(true, false, false), accesschain_mapping_(accesschain_mapping), + precise_object_(nullptr) {} + + // Checks the preciseness of a given assignment node with a precise object + // represented as access chain. The precise object shares the same symbol + // with the assignee of the given assignment node. Return a tuple of two: + // + // 1) The preciseness of the assignee node of this assignment node. True + // if the assignee contains 'precise' objects or is 'precise', false if + // the assignee is not 'precise' according to the access chain of the given + // precise object. + // + // 2) The incremental access chain from the assignee node to its nested + // 'precise' object, according to the access chain of the given precise + // object. This incremental access chain can be empty, which means the + // assignee is 'precise'. Otherwise it shows the path to the nested + // precise object. + std::tuple + getPrecisenessAndRemainedAccessChain(glslang::TIntermOperator* node, + const ObjectAccessChain& precise_object) + { + assert(isAssignOperation(node->getOp())); + precise_object_ = &precise_object; + ObjectAccessChain assignee_object; + if (glslang::TIntermBinary* BN = node->getAsBinaryNode()) { + // This is a binary assignment node, we need to check the + // preciseness of the left node. + assert(accesschain_mapping_.count(BN->getLeft())); + // The left node (assignee node) is an object node, traverse the + // node to let the 'precise' of nesting objects being transfered to + // nested objects. + BN->getLeft()->traverse(this); + // After traversing the left node, if the left node is 'precise', + // we can conclude this assignment should propagate 'precise'. + if (isPreciseObjectNode(BN->getLeft())) { + return make_tuple(true, ObjectAccessChain()); + } + // If the preciseness of the left node (assignee node) can not + // be determined by now, we need to compare the access chain string + // of the assignee object with the given precise object. + assignee_object = accesschain_mapping_.at(BN->getLeft()); + + } else if (glslang::TIntermUnary* UN = node->getAsUnaryNode()) { + // This is a unary assignment node, we need to check the + // preciseness of the operand node. For unary assignment node, the + // operand node should always be an object node. + assert(accesschain_mapping_.count(UN->getOperand())); + // Traverse the operand node to let the 'precise' being propagated + // from lower nodes to upper nodes. + UN->getOperand()->traverse(this); + // After traversing the operand node, if the operand node is + // 'precise', this assignment should propagate 'precise'. + if (isPreciseObjectNode(UN->getOperand())) { + return make_tuple(true, ObjectAccessChain()); + } + // If the preciseness of the operand node (assignee node) can not + // be determined by now, we need to compare the access chain string + // of the assignee object with the given precise object. + assignee_object = accesschain_mapping_.at(UN->getOperand()); + } else { + // Not a binary or unary node, should not happen. + assert(false); + } + + // Compare the access chain string of the assignee node with the given + // precise object to determine if this assignment should propagate + // 'precise'. + if (assignee_object.find(precise_object) == 0) { + // The access chain string of the given precise object is a prefix + // of assignee's access chain string. The assignee should be + // 'precise'. + return make_tuple(true, ObjectAccessChain()); + } else if (precise_object.find(assignee_object) == 0) { + // The assignee's access chain string is a prefix of the given + // precise object, the assignee object contains 'precise' object, + // and we need to pass the remained access chain to the object nodes + // in the right. + return make_tuple(true, getSubAccessChainAfterPrefix(precise_object, assignee_object)); + } else { + // The access chain strings do not match, the assignee object can + // not be labeled as 'precise' according to the given precise + // object. + return make_tuple(false, ObjectAccessChain()); + } + } + +protected: + TNoContractionAssigneeCheckingTraverser& operator=(const TNoContractionAssigneeCheckingTraverser&); + + bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override; + void visitSymbol(glslang::TIntermSymbol* node) override; + + // A map from object nodes to their access chain string (used as object ID). + const AccessChainMapping& accesschain_mapping_; + // A given precise object, represented in it access chain string. This + // precise object is used to be compared with the assignee node to tell if + // the assignee node is 'precise', contains 'precise' object or not + // 'precise'. + const ObjectAccessChain* precise_object_; +}; + +// Visits a binary node. If the node is an object node, it must be a dereference +// node. In such cases, if the left node is 'precise', this node should also be +// 'precise'. +bool TNoContractionAssigneeCheckingTraverser::visitBinary(glslang::TVisit, + glslang::TIntermBinary* node) +{ + // Traverses the left so that we transfer the 'precise' from nesting object + // to its nested object. + node->getLeft()->traverse(this); + // If this binary node is an object node, we should have it in the + // accesschain_mapping_. + if (accesschain_mapping_.count(node)) { + // A binary object node must be a dereference node. + assert(isDereferenceOperation(node->getOp())); + // If the left node is 'precise', this node should also be precise, + // otherwise, compare with the given precise_object_. If the + // access chain of this node matches with the given precise_object_, + // this node should be marked as 'precise'. + if (isPreciseObjectNode(node->getLeft())) { + node->getWritableType().getQualifier().noContraction = true; + } else if (accesschain_mapping_.at(node) == *precise_object_) { + node->getWritableType().getQualifier().noContraction = true; + } + } + return false; +} + +// Visits a symbol node, if the symbol node ID (its access chain string) matches +// with the given precise object, this node should be 'precise'. +void TNoContractionAssigneeCheckingTraverser::visitSymbol(glslang::TIntermSymbol* node) +{ + // A symbol node should always be an object node, and should have been added + // to the map from object nodes to their access chain strings. + assert(accesschain_mapping_.count(node)); + if (accesschain_mapping_.at(node) == *precise_object_) { + node->getWritableType().getQualifier().noContraction = true; + } +} + +// +// A traverser that only traverses the right side of binary assignment nodes +// and the operand node of unary assignment nodes. +// +// 1) Marks arithmetic operations as 'NoContraction'. +// +// 2) Find the object which should be marked as 'precise' in the right and +// update the 'precise' object work list. +// +class TNoContractionPropagator : public glslang::TIntermTraverser { +public: + TNoContractionPropagator(ObjectAccesschainSet* precise_objects, + const AccessChainMapping& accesschain_mapping) + : TIntermTraverser(true, false, false), + precise_objects_(*precise_objects), added_precise_object_ids_(), + remained_accesschain_(), accesschain_mapping_(accesschain_mapping) {} + + // Propagates 'precise' in the right nodes of a given assignment node with + // access chain record from the assignee node to a 'precise' object it + // contains. + void + propagateNoContractionInOneExpression(glslang::TIntermTyped* defining_node, + const ObjectAccessChain& assignee_remained_accesschain) + { + remained_accesschain_ = assignee_remained_accesschain; + if (glslang::TIntermBinary* BN = defining_node->getAsBinaryNode()) { + assert(isAssignOperation(BN->getOp())); + BN->getRight()->traverse(this); + if (isArithmeticOperation(BN->getOp())) { + BN->getWritableType().getQualifier().noContraction = true; + } + } else if (glslang::TIntermUnary* UN = defining_node->getAsUnaryNode()) { + assert(isAssignOperation(UN->getOp())); + UN->getOperand()->traverse(this); + if (isArithmeticOperation(UN->getOp())) { + UN->getWritableType().getQualifier().noContraction = true; + } + } + } + + // Propagates 'precise' in a given precise return node. + void propagateNoContractionInReturnNode(glslang::TIntermBranch* return_node) + { + remained_accesschain_ = ""; + assert(return_node->getFlowOp() == glslang::EOpReturn && return_node->getExpression()); + return_node->getExpression()->traverse(this); + } + +protected: + TNoContractionPropagator& operator=(const TNoContractionPropagator&); + + // Visits an aggregate node. The node can be a initializer list, in which + // case we need to find the 'precise' or 'precise' containing object node + // with the access chain record. In other cases, just need to traverse all + // the children nodes. + bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override + { + if (!remained_accesschain_.empty() && node->getOp() == glslang::EOpConstructStruct) { + // This is a struct initializer node, and the remained + // access chain is not empty, we need to refer to the + // assignee_remained_access_chain_ to find the nested + // 'precise' object. And we don't need to visit other nodes in this + // aggregate node. + + // Gets the struct dereference index that leads to 'precise' object. + ObjectAccessChain precise_accesschain_index_str = + getFrontElement(remained_accesschain_); + unsigned precise_accesschain_index = (unsigned)strtoul(precise_accesschain_index_str.c_str(), nullptr, 10); + // Gets the node pointed by the access chain index extracted before. + glslang::TIntermTyped* potential_precise_node = + node->getSequence()[precise_accesschain_index]->getAsTyped(); + assert(potential_precise_node); + // Pop the front access chain index from the path, and visit the nested node. + { + ObjectAccessChain next_level_accesschain = + subAccessChainFromSecondElement(remained_accesschain_); + StateSettingGuard setup_remained_accesschain_for_next_level( + &remained_accesschain_, next_level_accesschain); + potential_precise_node->traverse(this); + } + return false; + } + return true; + } + + // Visits a binary node. A binary node can be an object node, e.g. a dereference node. + // As only the top object nodes in the right side of an assignment needs to be visited + // and added to 'precise' work list, this traverser won't visit the children nodes of + // an object node. If the binary node does not represent an object node, it should + // go on to traverse its children nodes and if it is an arithmetic operation node, this + // operation should be marked as 'noContraction'. + bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override + { + if (isDereferenceOperation(node->getOp())) { + // This binary node is an object node. Need to update the precise + // object set with the access chain of this node + remained + // access chain . + ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node); + if (remained_accesschain_.empty()) { + node->getWritableType().getQualifier().noContraction = true; + } else { + new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_; + } + // Cache the access chain as added precise object, so we won't add the + // same object to the work list again. + if (!added_precise_object_ids_.count(new_precise_accesschain)) { + precise_objects_.insert(new_precise_accesschain); + added_precise_object_ids_.insert(new_precise_accesschain); + } + // Only the upper-most object nodes should be visited, so do not + // visit children of this object node. + return false; + } + // If this is an arithmetic operation, marks this node as 'noContraction'. + if (isArithmeticOperation(node->getOp()) && node->getBasicType() != glslang::EbtInt) { + node->getWritableType().getQualifier().noContraction = true; + } + // As this node is not an object node, need to traverse the children nodes. + return true; + } + + // Visits a unary node. A unary node can not be an object node. If the operation + // is an arithmetic operation, need to mark this node as 'noContraction'. + bool visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) override + { + // If this is an arithmetic operation, marks this with 'noContraction' + if (isArithmeticOperation(node->getOp())) { + node->getWritableType().getQualifier().noContraction = true; + } + return true; + } + + // Visits a symbol node. A symbol node is always an object node. So we + // should always be able to find its in our collected mapping from object + // nodes to access chains. As an object node, a symbol node can be either + // 'precise' or containing 'precise' objects according to unused + // access chain information we have when we visit this node. + void visitSymbol(glslang::TIntermSymbol* node) override + { + // Symbol nodes are object nodes and should always have an + // access chain collected before matches with it. + assert(accesschain_mapping_.count(node)); + ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node); + // If the unused access chain is empty, this symbol node should be + // marked as 'precise'. Otherwise, the unused access chain should be + // appended to the symbol ID to build a new access chain which points to + // the nested 'precise' object in this symbol object. + if (remained_accesschain_.empty()) { + node->getWritableType().getQualifier().noContraction = true; + } else { + new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_; + } + // Add the new 'precise' access chain to the work list and make sure we + // don't visit it again. + if (!added_precise_object_ids_.count(new_precise_accesschain)) { + precise_objects_.insert(new_precise_accesschain); + added_precise_object_ids_.insert(new_precise_accesschain); + } + } + + // A set of precise objects, represented as access chains. + ObjectAccesschainSet& precise_objects_; + // Visited symbol nodes, should not revisit these nodes. + ObjectAccesschainSet added_precise_object_ids_; + // The left node of an assignment operation might be an parent of 'precise' objects. + // This means the left node might not be an 'precise' object node, but it may contains + // 'precise' qualifier which should be propagated to the corresponding child node in + // the right. So we need the path from the left node to its nested 'precise' node to + // tell us how to find the corresponding 'precise' node in the right. + ObjectAccessChain remained_accesschain_; + // A map from node pointers to their access chains. + const AccessChainMapping& accesschain_mapping_; +}; +} + +namespace glslang { + +void PropagateNoContraction(const glslang::TIntermediate& intermediate) +{ + // First, traverses the AST, records symbols with their defining operations + // and collects the initial set of precise symbols (symbol nodes that marked + // as 'noContraction') and precise return nodes. + auto mappings_and_precise_objects = + getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate); + + // The mapping of symbol node IDs to their defining nodes. This enables us + // to get the defining node directly from a given symbol ID without + // traversing the tree again. + NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects); + + // The mapping of object nodes to their access chains recorded. + AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects); + + // The initial set of 'precise' objects which are represented as the + // access chain toward them. + ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects); + + // The set of 'precise' return nodes. + ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects); + + // Second, uses the initial set of precise objects as a work list, pops an + // access chain, extract the symbol ID from it. Then: + // 1) Check the assignee object, see if it is 'precise' object node or + // contains 'precise' object. Obtain the incremental access chain from the + // assignee node to its nested 'precise' node (if any). + // 2) If the assignee object node is 'precise' or it contains 'precise' + // objects, traverses the right side of the assignment operation + // expression to mark arithmetic operations as 'noContration' and update + // 'precise' access chain work list with new found object nodes. + // Repeat above steps until the work list is empty. + TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping); + TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping); + + // We have two initial precise work lists to handle: + // 1) precise return nodes + // 2) precise object access chains + // We should process the precise return nodes first and the involved + // objects in the return expression should be added to the precise object + // access chain set. + while (!precise_return_nodes.empty()) { + glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin(); + propagator.propagateNoContractionInReturnNode(precise_return_node); + precise_return_nodes.erase(precise_return_node); + } + + while (!precise_object_accesschains.empty()) { + // Get the access chain of a precise object from the work list. + ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin(); + // Get the symbol id from the access chain. + ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain); + // Get all the defining nodes of that symbol ID. + std::pair range = + symbol_definition_mapping.equal_range(symbol_id); + // Visits all the assignment nodes of that symbol ID and + // 1) Check if the assignee node is 'precise' or contains 'precise' + // objects. + // 2) Propagate the 'precise' to the top layer object nodes + // in the right side of the assignment operation, update the 'precise' + // work list with new access chains representing the new 'precise' + // objects, and mark arithmetic operations as 'noContraction'. + for (NodeMapping::iterator defining_node_iter = range.first; + defining_node_iter != range.second; defining_node_iter++) { + TIntermOperator* defining_node = defining_node_iter->second; + // Check the assignee node. + auto checker_result = checker.getPrecisenessAndRemainedAccessChain( + defining_node, precise_object_accesschain); + bool& contain_precise = std::get<0>(checker_result); + ObjectAccessChain& remained_accesschain = std::get<1>(checker_result); + // If the assignee node is 'precise' or contains 'precise', propagate the + // 'precise' to the right. Otherwise just skip this assignment node. + if (contain_precise) { + propagator.propagateNoContractionInOneExpression(defining_node, + remained_accesschain); + } + } + // Remove the last processed 'precise' object from the work list. + precise_object_accesschains.erase(precise_object_accesschain); + } +} +}; + +#endif // GLSLANG_WEB diff --git a/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.h b/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.h new file mode 100644 index 0000000..8521ad7 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/propagateNoContraction.h @@ -0,0 +1,55 @@ +// +// Copyright (C) 2015-2016 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// +// Visit the nodes in the glslang intermediate tree representation to +// propagate 'noContraction' qualifier. +// + +#pragma once + +#include "../Include/intermediate.h" + +namespace glslang { + +// Propagates the 'precise' qualifier for objects (objects marked with +// 'noContraction' qualifier) from the shader source specified 'precise' +// variables to all the involved objects, and add 'noContraction' qualifier for +// the involved arithmetic operations. +// Note that the same qualifier: 'noContraction' is used in both object nodes +// and arithmetic operation nodes, but has different meaning. For object nodes, +// 'noContraction' means the object is 'precise'; and for arithmetic operation +// nodes, it means the operation should not be contracted. +void PropagateNoContraction(const glslang::TIntermediate& intermediate); +}; diff --git a/src/third_party/glslang/glslang/MachineIndependent/reflection.cpp b/src/third_party/glslang/glslang/MachineIndependent/reflection.cpp new file mode 100644 index 0000000..0aabf37 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/reflection.cpp @@ -0,0 +1,1269 @@ +// +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#include "../Include/Common.h" +#include "reflection.h" +#include "LiveTraverser.h" +#include "localintermediate.h" + +#include "gl_types.h" + +// +// Grow the reflection database through a friend traverser class of TReflection and a +// collection of functions to do a liveness traversal that note what uniforms are used +// in semantically non-dead code. +// +// Can be used multiple times, once per stage, to grow a program reflection. +// +// High-level algorithm for one stage: +// +// 1. Put the entry point on the list of live functions. +// +// 2. Traverse any live function, while skipping if-tests with a compile-time constant +// condition of false, and while adding any encountered function calls to the live +// function list. +// +// Repeat until the live function list is empty. +// +// 3. Add any encountered uniform variables and blocks to the reflection database. +// +// Can be attempted with a failed link, but will return false if recursion had been detected, or +// there wasn't exactly one entry point. +// + +namespace glslang { + +// +// The traverser: mostly pass through, except +// - processing binary nodes to see if they are dereferences of an aggregates to track +// - processing symbol nodes to see if they are non-aggregate objects to track +// +// This ignores semantically dead code by using TLiveTraverser. +// +// This is in the glslang namespace directly so it can be a friend of TReflection. +// + +class TReflectionTraverser : public TIntermTraverser { +public: + TReflectionTraverser(const TIntermediate& i, TReflection& r) : + TIntermTraverser(), intermediate(i), reflection(r), updateStageMasks(true) { } + + virtual bool visitBinary(TVisit, TIntermBinary* node); + virtual void visitSymbol(TIntermSymbol* base); + + // Add a simple reference to a uniform variable to the uniform database, no dereference involved. + // However, no dereference doesn't mean simple... it could be a complex aggregate. + void addUniform(const TIntermSymbol& base) + { + if (processedDerefs.find(&base) == processedDerefs.end()) { + processedDerefs.insert(&base); + + int blockIndex = -1; + int offset = -1; + TList derefs; + TString baseName = base.getName(); + + if (base.getType().getBasicType() == EbtBlock) { + offset = 0; + bool anonymous = IsAnonymous(baseName); + const TString& blockName = base.getType().getTypeName(); + + if (!anonymous) + baseName = blockName; + else + baseName = ""; + + blockIndex = addBlockName(blockName, base.getType(), intermediate.getBlockSize(base.getType())); + } + + // Use a degenerate (empty) set of dereferences to immediately put as at the end of + // the dereference change expected by blowUpActiveAggregate. + blowUpActiveAggregate(base.getType(), baseName, derefs, derefs.end(), offset, blockIndex, 0, -1, 0, + base.getQualifier().storage, updateStageMasks); + } + } + + void addPipeIOVariable(const TIntermSymbol& base) + { + if (processedDerefs.find(&base) == processedDerefs.end()) { + processedDerefs.insert(&base); + + const TString &name = base.getName(); + const TType &type = base.getType(); + const bool input = base.getQualifier().isPipeInput(); + + TReflection::TMapIndexToReflection &ioItems = + input ? reflection.indexToPipeInput : reflection.indexToPipeOutput; + + + TReflection::TNameToIndex &ioMapper = + input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex; + + if (reflection.options & EShReflectionUnwrapIOBlocks) { + bool anonymous = IsAnonymous(name); + + TString baseName; + if (type.getBasicType() == EbtBlock) { + baseName = anonymous ? TString() : type.getTypeName(); + } else { + baseName = anonymous ? TString() : name; + } + + // by convention if this is an arrayed block we ignore the array in the reflection + if (type.isArray() && type.getBasicType() == EbtBlock) { + blowUpIOAggregate(input, baseName, TType(type, 0)); + } else { + blowUpIOAggregate(input, baseName, type); + } + } else { + TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str()); + if (it == ioMapper.end()) { + // seperate pipe i/o params from uniforms and blocks + // in is only for input in first stage as out is only for last stage. check traverse in call stack. + ioMapper[name.c_str()] = static_cast(ioItems.size()); + ioItems.push_back( + TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0)); + EShLanguageMask& stages = ioItems.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } else { + EShLanguageMask& stages = ioItems[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + } + } + + // Lookup or calculate the offset of all block members at once, using the recursively + // defined block offset rules. + void getOffsets(const TType& type, TVector& offsets) + { + const TTypeList& memberList = *type.getStruct(); + int memberSize = 0; + int offset = 0; + + for (size_t m = 0; m < offsets.size(); ++m) { + // if the user supplied an offset, snap to it now + if (memberList[m].type->getQualifier().hasOffset()) + offset = memberList[m].type->getQualifier().layoutOffset; + + // calculate the offset of the next member and align the current offset to this member + intermediate.updateOffset(type, *memberList[m].type, offset, memberSize); + + // save the offset of this member + offsets[m] = offset; + + // update for the next member + offset += memberSize; + } + } + + // Calculate the stride of an array type + int getArrayStride(const TType& baseType, const TType& type) + { + int dummySize; + int stride; + + // consider blocks to have 0 stride, so that all offsets are relative to the start of their block + if (type.getBasicType() == EbtBlock) + return 0; + + TLayoutMatrix subMatrixLayout = type.getQualifier().layoutMatrix; + intermediate.getMemberAlignment(type, dummySize, stride, + baseType.getQualifier().layoutPacking, + subMatrixLayout != ElmNone + ? subMatrixLayout == ElmRowMajor + : baseType.getQualifier().layoutMatrix == ElmRowMajor); + + return stride; + } + + // count the total number of leaf members from iterating out of a block type + int countAggregateMembers(const TType& parentType) + { + if (! parentType.isStruct()) + return 1; + + const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix); + + bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer); + + const TTypeList &memberList = *parentType.getStruct(); + + int ret = 0; + + for (size_t i = 0; i < memberList.size(); i++) + { + const TType &memberType = *memberList[i].type; + int numMembers = countAggregateMembers(memberType); + // for sized arrays of structs, apply logic to expand out the same as we would below in + // blowUpActiveAggregate + if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) { + if (! strictArraySuffix || ! blockParent) + numMembers *= memberType.getArraySizes()->getCumulativeSize(); + } + ret += numMembers; + } + + return ret; + } + + // Traverse the provided deref chain, including the base, and + // - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity + // - recursively expand any variable array index in the middle of that traversal + // - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity + // + // arraySize tracks, just for the final dereference in the chain, if there was a specific known size. + // A value of 0 for arraySize will mean to use the full array's size. + void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList& derefs, + TList::const_iterator deref, int offset, int blockIndex, int arraySize, + int topLevelArraySize, int topLevelArrayStride, TStorageQualifier baseStorage, bool active) + { + // when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query. + // Broadly: + // * arrays-of-structs always have a [x] suffix. + // * with array-of-struct variables in the root of a buffer block, only ever return [0]. + // * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array. + const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix); + + // is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element. + bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer); + + // process the part of the dereference chain that was explicit in the shader + TString name = baseName; + const TType* terminalType = &baseType; + for (; deref != derefs.end(); ++deref) { + TIntermBinary* visitNode = *deref; + terminalType = &visitNode->getType(); + int index; + switch (visitNode->getOp()) { + case EOpIndexIndirect: { + int stride = getArrayStride(baseType, visitNode->getLeft()->getType()); + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + // Visit all the indices of this array, and for each one add on the remaining dereferencing + for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) { + TString newBaseName = name; + if (terminalType->getBasicType() == EbtBlock) {} + else if (strictArraySuffix && blockParent) + newBaseName.append(TString("[0]")); + else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) + newBaseName.append(TString("[") + String(i) + "]"); + TList::const_iterator nextDeref = deref; + ++nextDeref; + blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize, + topLevelArraySize, topLevelArrayStride, baseStorage, active); + + if (offset >= 0) + offset += stride; + } + + // it was all completed in the recursive calls above + return; + } + case EOpIndexDirect: { + int stride = getArrayStride(baseType, visitNode->getLeft()->getType()); + + index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (terminalType->getBasicType() == EbtBlock) {} + else if (strictArraySuffix && blockParent) + name.append(TString("[0]")); + else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) { + name.append(TString("[") + String(index) + "]"); + + if (offset >= 0) + offset += stride * index; + } + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + // expand top-level arrays in blocks with [0] suffix + if (topLevelArrayStride != 0 && visitNode->getLeft()->getType().isArray()) { + blockParent = false; + } + break; + } + case EOpIndexDirectStruct: + index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst(); + if (offset >= 0) + offset += intermediate.getOffset(visitNode->getLeft()->getType(), index); + if (name.size() > 0) + name.append("."); + name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName()); + + // expand non top-level arrays with [x] suffix + if (visitNode->getLeft()->getType().getBasicType() != EbtBlock && terminalType->isArray()) + { + blockParent = false; + } + break; + default: + break; + } + } + + // if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it... + if (! isReflectionGranularity(*terminalType)) { + // the base offset of this node, that children are relative to + int baseOffset = offset; + + if (terminalType->isArray()) { + // Visit all the indices of this array, and for each one, + // fully explode the remaining aggregate to dereference + + int stride = 0; + if (offset >= 0) + stride = getArrayStride(baseType, *terminalType); + + int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1); + + // for top-level arrays in blocks, only expand [0] to avoid explosion of items + if ((strictArraySuffix && blockParent) || + ((topLevelArraySize == arrayIterateSize) && (topLevelArrayStride == 0))) { + arrayIterateSize = 1; + } + + if (topLevelArrayStride == 0) + topLevelArrayStride = stride; + + for (int i = 0; i < arrayIterateSize; ++i) { + TString newBaseName = name; + if (terminalType->getBasicType() != EbtBlock) + newBaseName.append(TString("[") + String(i) + "]"); + TType derefType(*terminalType, 0); + if (offset >= 0) + offset = baseOffset + stride * i; + + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0, + topLevelArraySize, topLevelArrayStride, baseStorage, active); + } + } else { + // Visit all members of this aggregate, and for each one, + // fully explode the remaining aggregate to dereference + const TTypeList& typeList = *terminalType->getStruct(); + + TVector memberOffsets; + + if (baseOffset >= 0) { + memberOffsets.resize(typeList.size()); + getOffsets(*terminalType, memberOffsets); + } + + for (int i = 0; i < (int)typeList.size(); ++i) { + TString newBaseName = name; + if (newBaseName.size() > 0) + newBaseName.append("."); + newBaseName.append(typeList[i].type->getFieldName()); + TType derefType(*terminalType, i); + if (offset >= 0) + offset = baseOffset + memberOffsets[i]; + + int arrayStride = topLevelArrayStride; + if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer && + derefType.isArray()) { + arrayStride = getArrayStride(baseType, derefType); + } + + if (topLevelArraySize == -1 && arrayStride == 0 && blockParent) + topLevelArraySize = 1; + + if (strictArraySuffix && blockParent) { + // if this member is an array, store the top-level array stride but start the explosion from + // the inner struct type. + if (derefType.isArray() && derefType.isStruct()) { + newBaseName.append("[0]"); + auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0); + blowUpActiveAggregate(TType(derefType, 0), newBaseName, derefs, derefs.end(), memberOffsets[i], + blockIndex, 0, dimSize, arrayStride, terminalType->getQualifier().storage, false); + } + else if (derefType.isArray()) { + auto dimSize = derefType.isUnsizedArray() ? 0 : derefType.getArraySizes()->getDimSize(0); + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex, + 0, dimSize, 0, terminalType->getQualifier().storage, false); + } + else { + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), memberOffsets[i], blockIndex, + 0, 1, 0, terminalType->getQualifier().storage, false); + } + } else { + blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0, + topLevelArraySize, arrayStride, baseStorage, active); + } + } + } + + // it was all completed in the recursive calls above + return; + } + + if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) { + name.append(TString("[0]")); + } + + // Finally, add a full string to the reflection database, and update the array size if necessary. + // If the dereferenced entity to record is an array, compute the size and update the maximum size. + + // there might not be a final array dereference, it could have been copied as an array object + if (arraySize == 0) + arraySize = mapToGlArraySize(*terminalType); + + TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage); + + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str()); + if (it == reflection.nameToIndex.end()) { + int uniformIndex = (int)variables.size(); + reflection.nameToIndex[name.c_str()] = uniformIndex; + variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType), + arraySize, blockIndex)); + if (terminalType->isArray()) { + variables.back().arrayStride = getArrayStride(baseType, *terminalType); + if (topLevelArrayStride == 0) + topLevelArrayStride = variables.back().arrayStride; + } + + if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic()) + reflection.atomicCounterUniformIndices.push_back(uniformIndex); + + variables.back().topLevelArraySize = topLevelArraySize; + variables.back().topLevelArrayStride = topLevelArrayStride; + + if ((reflection.options & EShReflectionAllBlockVariables) && active) { + EShLanguageMask& stages = variables.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } else { + if (arraySize > 1) { + int& reflectedArraySize = variables[it->second].size; + reflectedArraySize = std::max(arraySize, reflectedArraySize); + } + + if ((reflection.options & EShReflectionAllBlockVariables) && active) { + EShLanguageMask& stages = variables[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + } + + // similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow. + void blowUpIOAggregate(bool input, const TString &baseName, const TType &type) + { + TString name = baseName; + + // if the type is still too coarse a granularity, this is still an aggregate to expand, expand it... + if (! isReflectionGranularity(type)) { + if (type.isArray()) { + // Visit all the indices of this array, and for each one, + // fully explode the remaining aggregate to dereference + for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) { + TString newBaseName = name; + newBaseName.append(TString("[") + String(i) + "]"); + TType derefType(type, 0); + + blowUpIOAggregate(input, newBaseName, derefType); + } + } else { + // Visit all members of this aggregate, and for each one, + // fully explode the remaining aggregate to dereference + const TTypeList& typeList = *type.getStruct(); + + for (int i = 0; i < (int)typeList.size(); ++i) { + TString newBaseName = name; + if (newBaseName.size() > 0) + newBaseName.append("."); + newBaseName.append(typeList[i].type->getFieldName()); + TType derefType(type, i); + + blowUpIOAggregate(input, newBaseName, derefType); + } + } + + // it was all completed in the recursive calls above + return; + } + + if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) { + name.append(TString("[0]")); + } + + TReflection::TMapIndexToReflection &ioItems = + input ? reflection.indexToPipeInput : reflection.indexToPipeOutput; + + std::string namespacedName = input ? "in " : "out "; + namespacedName += name.c_str(); + + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName); + if (it == reflection.nameToIndex.end()) { + reflection.nameToIndex[namespacedName] = (int)ioItems.size(); + ioItems.push_back( + TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0)); + + EShLanguageMask& stages = ioItems.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } else { + EShLanguageMask& stages = ioItems[it->second].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + + // Add a uniform dereference where blocks/struct/arrays are involved in the access. + // Handles the situation where the left node is at the correct or too coarse a + // granularity for reflection. (That is, further dereferences up the tree will be + // skipped.) Earlier dereferences, down the tree, will be handled + // at the same time, and logged to prevent reprocessing as the tree is traversed. + // + // Note: Other things like the following must be caught elsewhere: + // - a simple non-array, non-struct variable (no dereference even conceivable) + // - an aggregrate consumed en masse, without a dereference + // + // So, this code is for cases like + // - a struct/block dereferencing a member (whether the member is array or not) + // - an array of struct + // - structs/arrays containing the above + // + void addDereferencedUniform(TIntermBinary* topNode) + { + // See if too fine-grained to process (wait to get further down the tree) + const TType& leftType = topNode->getLeft()->getType(); + if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray()) + return; + + // We have an array or structure or block dereference, see if it's a uniform + // based dereference (if not, skip it). + TIntermSymbol* base = findBase(topNode); + if (! base || ! base->getQualifier().isUniformOrBuffer()) + return; + + // See if we've already processed this (e.g., in the middle of something + // we did earlier), and if so skip it + if (processedDerefs.find(topNode) != processedDerefs.end()) + return; + + // Process this uniform dereference + + int offset = -1; + int blockIndex = -1; + bool anonymous = false; + + // See if we need to record the block itself + bool block = base->getBasicType() == EbtBlock; + if (block) { + offset = 0; + anonymous = IsAnonymous(base->getName()); + + const TString& blockName = base->getType().getTypeName(); + TString baseName; + + if (! anonymous) + baseName = blockName; + + blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType())); + + if (reflection.options & EShReflectionAllBlockVariables) { + // Use a degenerate (empty) set of dereferences to immediately put as at the end of + // the dereference change expected by blowUpActiveAggregate. + TList derefs; + + // otherwise - if we're not using strict array suffix rules, or this isn't a block so we are + // expanding root arrays anyway, just start the iteration from the base block type. + blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, -1, 0, + base->getQualifier().storage, false); + } + } + + // Process the dereference chain, backward, accumulating the pieces for later forward traversal. + // If the topNode is a reflection-granularity-array dereference, don't include that last dereference. + TList derefs; + for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) { + if (isReflectionGranularity(visitNode->getLeft()->getType())) + continue; + + derefs.push_front(visitNode); + processedDerefs.insert(visitNode); + } + processedDerefs.insert(base); + + // See if we have a specific array size to stick to while enumerating the explosion of the aggregate + int arraySize = 0; + if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) { + if (topNode->getOp() == EOpIndexDirect) + arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1; + } + + // Put the dereference chain together, forward + TString baseName; + if (! anonymous) { + if (block) + baseName = base->getType().getTypeName(); + else + baseName = base->getName(); + } + blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, -1, 0, + base->getQualifier().storage, true); + } + + int addBlockName(const TString& name, const TType& type, int size) + { + int blockIndex = 0; + if (type.isArray()) { + TType derefType(type, 0); + for (int e = 0; e < type.getOuterArraySize(); ++e) { + int memberBlockIndex = addBlockName(name + "[" + String(e) + "]", derefType, size); + if (e == 0) + blockIndex = memberBlockIndex; + } + } else { + TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage); + + TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str()); + if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) { + blockIndex = (int)blocks.size(); + reflection.nameToIndex[name.c_str()] = blockIndex; + blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, blockIndex)); + + blocks.back().numMembers = countAggregateMembers(type); + + EShLanguageMask& stages = blocks.back().stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + else { + blockIndex = it->second; + + EShLanguageMask& stages = blocks[blockIndex].stages; + stages = static_cast(stages | 1 << intermediate.getStage()); + } + } + + return blockIndex; + } + + // Are we at a level in a dereference chain at which individual active uniform queries are made? + bool isReflectionGranularity(const TType& type) + { + return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays(); + } + + // For a binary operation indexing into an aggregate, chase down the base of the aggregate. + // Return 0 if the topology does not fit this situation. + TIntermSymbol* findBase(const TIntermBinary* node) + { + TIntermSymbol *base = node->getLeft()->getAsSymbolNode(); + if (base) + return base; + TIntermBinary* left = node->getLeft()->getAsBinaryNode(); + if (! left) + return nullptr; + + return findBase(left); + } + + // + // Translate a glslang sampler type into the GL API #define number. + // + int mapSamplerToGlType(TSampler sampler) + { + if (! sampler.image) { + // a sampler... + switch (sampler.type) { + case EbtFloat: + switch ((int)sampler.dim) { + case Esd1D: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D; + case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW; + } + case Esd2D: + switch ((int)sampler.ms) { + case false: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D; + case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW; + } + case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_SAMPLER_3D; + case EsdCube: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE; + case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW; + } + case EsdRect: + return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_SAMPLER_BUFFER; + } + case EbtFloat16: + switch ((int)sampler.dim) { + case Esd1D: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD; + } + case Esd2D: + switch ((int)sampler.ms) { + case false: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD; + } + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD; + } + case Esd3D: + return GL_FLOAT16_SAMPLER_3D_AMD; + case EsdCube: + switch ((int)sampler.shadow) { + case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD; + } + case EsdRect: + return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD; + case EsdBuffer: + return GL_FLOAT16_SAMPLER_BUFFER_AMD; + } + case EbtInt: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D; + case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY + : GL_INT_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_INT_SAMPLER_3D; + case EsdCube: + return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE; + case EsdRect: + return GL_INT_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_INT_SAMPLER_BUFFER; + } + case EbtUint: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D; + case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY + : GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE; + } + case Esd3D: + return GL_UNSIGNED_INT_SAMPLER_3D; + case EsdCube: + return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE; + case EsdRect: + return GL_UNSIGNED_INT_SAMPLER_2D_RECT; + case EsdBuffer: + return GL_UNSIGNED_INT_SAMPLER_BUFFER; + } + default: + return 0; + } + } else { + // an image... + switch (sampler.type) { + case EbtFloat: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D; + case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE; + case EsdRect: + return GL_IMAGE_2D_RECT; + case EsdBuffer: + return GL_IMAGE_BUFFER; + } + case EbtFloat16: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD; + case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD; + } + case Esd3D: + return GL_FLOAT16_IMAGE_3D_AMD; + case EsdCube: + return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD; + case EsdRect: + return GL_FLOAT16_IMAGE_2D_RECT_AMD; + case EsdBuffer: + return GL_FLOAT16_IMAGE_BUFFER_AMD; + } + case EbtInt: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D; + case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_INT_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE; + case EsdRect: + return GL_INT_IMAGE_2D_RECT; + case EsdBuffer: + return GL_INT_IMAGE_BUFFER; + } + case EbtUint: + switch ((int)sampler.dim) { + case Esd1D: + return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D; + case Esd2D: + switch ((int)sampler.ms) { + case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D; + case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY + : GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE; + } + case Esd3D: + return GL_UNSIGNED_INT_IMAGE_3D; + case EsdCube: + return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE; + case EsdRect: + return GL_UNSIGNED_INT_IMAGE_2D_RECT; + case EsdBuffer: + return GL_UNSIGNED_INT_IMAGE_BUFFER; + } + default: + return 0; + } + } + } + + // + // Translate a glslang type into the GL API #define number. + // Ignores arrayness. + // + int mapToGlType(const TType& type) + { + switch (type.getBasicType()) { + case EbtSampler: + return mapSamplerToGlType(type.getSampler()); + case EbtStruct: + case EbtBlock: + case EbtVoid: + return 0; + default: + break; + } + + if (type.isVector()) { + int offset = type.getVectorSize() - 2; + switch (type.getBasicType()) { + case EbtFloat: return GL_FLOAT_VEC2 + offset; + case EbtDouble: return GL_DOUBLE_VEC2 + offset; + case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset; + case EbtInt: return GL_INT_VEC2 + offset; + case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset; + case EbtInt64: return GL_INT64_ARB + offset; + case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset; + case EbtBool: return GL_BOOL_VEC2 + offset; + case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset; + default: return 0; + } + } + if (type.isMatrix()) { + switch (type.getBasicType()) { + case EbtFloat: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT2; + case 3: return GL_FLOAT_MAT2x3; + case 4: return GL_FLOAT_MAT2x4; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT3x2; + case 3: return GL_FLOAT_MAT3; + case 4: return GL_FLOAT_MAT3x4; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT_MAT4x2; + case 3: return GL_FLOAT_MAT4x3; + case 4: return GL_FLOAT_MAT4; + default: return 0; + } + } + case EbtDouble: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT2; + case 3: return GL_DOUBLE_MAT2x3; + case 4: return GL_DOUBLE_MAT2x4; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT3x2; + case 3: return GL_DOUBLE_MAT3; + case 4: return GL_DOUBLE_MAT3x4; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_DOUBLE_MAT4x2; + case 3: return GL_DOUBLE_MAT4x3; + case 4: return GL_DOUBLE_MAT4; + default: return 0; + } + } + case EbtFloat16: + switch (type.getMatrixCols()) { + case 2: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT2_AMD; + case 3: return GL_FLOAT16_MAT2x3_AMD; + case 4: return GL_FLOAT16_MAT2x4_AMD; + default: return 0; + } + case 3: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT3x2_AMD; + case 3: return GL_FLOAT16_MAT3_AMD; + case 4: return GL_FLOAT16_MAT3x4_AMD; + default: return 0; + } + case 4: + switch (type.getMatrixRows()) { + case 2: return GL_FLOAT16_MAT4x2_AMD; + case 3: return GL_FLOAT16_MAT4x3_AMD; + case 4: return GL_FLOAT16_MAT4_AMD; + default: return 0; + } + } + default: + return 0; + } + } + if (type.getVectorSize() == 1) { + switch (type.getBasicType()) { + case EbtFloat: return GL_FLOAT; + case EbtDouble: return GL_DOUBLE; + case EbtFloat16: return GL_FLOAT16_NV; + case EbtInt: return GL_INT; + case EbtUint: return GL_UNSIGNED_INT; + case EbtInt64: return GL_INT64_ARB; + case EbtUint64: return GL_UNSIGNED_INT64_ARB; + case EbtBool: return GL_BOOL; + case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER; + default: return 0; + } + } + + return 0; + } + + int mapToGlArraySize(const TType& type) + { + return type.isArray() ? type.getOuterArraySize() : 1; + } + + const TIntermediate& intermediate; + TReflection& reflection; + std::set processedDerefs; + bool updateStageMasks; + +protected: + TReflectionTraverser(TReflectionTraverser&); + TReflectionTraverser& operator=(TReflectionTraverser&); +}; + +// +// Implement the traversal functions of interest. +// + +// To catch dereferenced aggregates that must be reflected. +// This catches them at the highest level possible in the tree. +bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node) +{ + switch (node->getOp()) { + case EOpIndexDirect: + case EOpIndexIndirect: + case EOpIndexDirectStruct: + addDereferencedUniform(node); + break; + default: + break; + } + + // still need to visit everything below, which could contain sub-expressions + // containing different uniforms + return true; +} + +// To reflect non-dereferenced objects. +void TReflectionTraverser::visitSymbol(TIntermSymbol* base) +{ + if (base->getQualifier().storage == EvqUniform) { + if (base->getBasicType() == EbtBlock) { + if (reflection.options & EShReflectionSharedStd140UBO) { + addUniform(*base); + } + } else { + addUniform(*base); + } + } + + // #TODO add std140/layout active rules for ssbo, same with ubo. + // Storage buffer blocks will be collected and expanding in this part. + if((reflection.options & EShReflectionSharedStd140SSBO) && + (base->getQualifier().storage == EvqBuffer && base->getBasicType() == EbtBlock && + (base->getQualifier().layoutPacking == ElpStd140 || base->getQualifier().layoutPacking == ElpShared))) + addUniform(*base); + + if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) || + (intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput())) + addPipeIOVariable(*base); +} + +// +// Implement TObjectReflection methods. +// + +TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType, + int pSize, int pIndex) + : name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1), + numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone()) +{ +} + +int TObjectReflection::getBinding() const +{ + if (type == nullptr || !type->getQualifier().hasBinding()) + return -1; + return type->getQualifier().layoutBinding; +} + +void TObjectReflection::dump() const +{ + printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size, + index, getBinding(), stages); + + if (counterIndex != -1) + printf(", counter %d", counterIndex); + + if (numMembers != -1) + printf(", numMembers %d", numMembers); + + if (arrayStride != 0) + printf(", arrayStride %d", arrayStride); + + if (topLevelArrayStride != 0) + printf(", topLevelArrayStride %d", topLevelArrayStride); + + printf("\n"); +} + +// +// Implement TReflection methods. +// + +// Track any required attribute reflection, such as compute shader numthreads. +// +void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate) +{ + if (stage == EShLangCompute) { + // Remember thread dimensions + for (int dim=0; dim<3; ++dim) + localSize[dim] = intermediate.getLocalSize(dim); + } +} + +// build counter block index associations for buffers +void TReflection::buildCounterIndices(const TIntermediate& intermediate) +{ +#ifdef ENABLE_HLSL + // search for ones that have counters + for (int i = 0; i < int(indexToUniformBlock.size()); ++i) { + const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str()); + const int index = getIndex(counterName); + + if (index >= 0) + indexToUniformBlock[i].counterIndex = index; + } +#endif +} + +// build Shader Stages mask for all uniforms +void TReflection::buildUniformStageMask(const TIntermediate& intermediate) +{ + if (options & EShReflectionAllBlockVariables) + return; + + for (int i = 0; i < int(indexToUniform.size()); ++i) { + indexToUniform[i].stages = static_cast(indexToUniform[i].stages | 1 << intermediate.getStage()); + } + + for (int i = 0; i < int(indexToBufferVariable.size()); ++i) { + indexToBufferVariable[i].stages = + static_cast(indexToBufferVariable[i].stages | 1 << intermediate.getStage()); + } +} + +// Merge live symbols from 'intermediate' into the existing reflection database. +// +// Returns false if the input is too malformed to do this. +bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate) +{ + if (intermediate.getTreeRoot() == nullptr || + intermediate.getNumEntryPoints() != 1 || + intermediate.isRecursive()) + return false; + + buildAttributeReflection(stage, intermediate); + + TReflectionTraverser it(intermediate, *this); + + for (auto& sequnence : intermediate.getTreeRoot()->getAsAggregate()->getSequence()) { + if (sequnence->getAsAggregate() != nullptr) { + if (sequnence->getAsAggregate()->getOp() == glslang::EOpLinkerObjects) { + it.updateStageMasks = false; + TIntermAggregate* linkerObjects = sequnence->getAsAggregate(); + for (auto& sequnence : linkerObjects->getSequence()) { + auto pNode = sequnence->getAsSymbolNode(); + if (pNode != nullptr) { + if ((pNode->getQualifier().storage == EvqUniform && + (options & EShReflectionSharedStd140UBO)) || + (pNode->getQualifier().storage == EvqBuffer && + (options & EShReflectionSharedStd140SSBO))) { + // collect std140 and shared uniform block form AST + if ((pNode->getBasicType() == EbtBlock) && + ((pNode->getQualifier().layoutPacking == ElpStd140) || + (pNode->getQualifier().layoutPacking == ElpShared))) { + pNode->traverse(&it); + } + } + else if ((options & EShReflectionAllIOVariables) && + (pNode->getQualifier().isPipeInput() || pNode->getQualifier().isPipeOutput())) + { + pNode->traverse(&it); + } + } + } + } else { + // This traverser will travers all function in AST. + // If we want reflect uncalled function, we need set linke message EShMsgKeepUncalled. + // When EShMsgKeepUncalled been set to true, all function will be keep in AST, even it is a uncalled function. + // This will keep some uniform variables in reflection, if those uniform variables is used in these uncalled function. + // + // If we just want reflect only live node, we can use a default link message or set EShMsgKeepUncalled false. + // When linke message not been set EShMsgKeepUncalled, linker won't keep uncalled function in AST. + // So, travers all function node can equivalent to travers live function. + it.updateStageMasks = true; + sequnence->getAsAggregate()->traverse(&it); + } + } + } + it.updateStageMasks = true; + + buildCounterIndices(intermediate); + buildUniformStageMask(intermediate); + + return true; +} + +void TReflection::dump() +{ + printf("Uniform reflection:\n"); + for (size_t i = 0; i < indexToUniform.size(); ++i) + indexToUniform[i].dump(); + printf("\n"); + + printf("Uniform block reflection:\n"); + for (size_t i = 0; i < indexToUniformBlock.size(); ++i) + indexToUniformBlock[i].dump(); + printf("\n"); + + printf("Buffer variable reflection:\n"); + for (size_t i = 0; i < indexToBufferVariable.size(); ++i) + indexToBufferVariable[i].dump(); + printf("\n"); + + printf("Buffer block reflection:\n"); + for (size_t i = 0; i < indexToBufferBlock.size(); ++i) + indexToBufferBlock[i].dump(); + printf("\n"); + + printf("Pipeline input reflection:\n"); + for (size_t i = 0; i < indexToPipeInput.size(); ++i) + indexToPipeInput[i].dump(); + printf("\n"); + + printf("Pipeline output reflection:\n"); + for (size_t i = 0; i < indexToPipeOutput.size(); ++i) + indexToPipeOutput[i].dump(); + printf("\n"); + + if (getLocalSize(0) > 1) { + static const char* axis[] = { "X", "Y", "Z" }; + + for (int dim=0; dim<3; ++dim) + if (getLocalSize(dim) > 1) + printf("Local size %s: %u\n", axis[dim], getLocalSize(dim)); + + printf("\n"); + } + + // printf("Live names\n"); + // for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it) + // printf("%s: %d\n", it->first.c_str(), it->second); + // printf("\n"); +} + +} // end namespace glslang + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE diff --git a/src/third_party/glslang/glslang/MachineIndependent/reflection.h b/src/third_party/glslang/glslang/MachineIndependent/reflection.h new file mode 100644 index 0000000..5af4467 --- /dev/null +++ b/src/third_party/glslang/glslang/MachineIndependent/reflection.h @@ -0,0 +1,223 @@ +// +// Copyright (C) 2013-2016 LunarG, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +#ifndef _REFLECTION_INCLUDED +#define _REFLECTION_INCLUDED + +#include "../Public/ShaderLang.h" +#include "../Include/Types.h" + +#include +#include + +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +namespace glslang { + +class TIntermediate; +class TIntermAggregate; +class TReflectionTraverser; + +// The full reflection database +class TReflection { +public: + TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last) + : options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection()) + { + for (int dim=0; dim<3; ++dim) + localSize[dim] = 0; + } + + virtual ~TReflection() {} + + // grow the reflection stage by stage + bool addStage(EShLanguage, const TIntermediate&); + + // for mapping a uniform index to a uniform object's description + int getNumUniforms() { return (int)indexToUniform.size(); } + const TObjectReflection& getUniform(int i) const + { + if (i >= 0 && i < (int)indexToUniform.size()) + return indexToUniform[i]; + else + return badReflection; + } + + // for mapping a block index to the block's description + int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); } + const TObjectReflection& getUniformBlock(int i) const + { + if (i >= 0 && i < (int)indexToUniformBlock.size()) + return indexToUniformBlock[i]; + else + return badReflection; + } + + // for mapping an pipeline input index to the input's description + int getNumPipeInputs() { return (int)indexToPipeInput.size(); } + const TObjectReflection& getPipeInput(int i) const + { + if (i >= 0 && i < (int)indexToPipeInput.size()) + return indexToPipeInput[i]; + else + return badReflection; + } + + // for mapping an pipeline output index to the output's description + int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); } + const TObjectReflection& getPipeOutput(int i) const + { + if (i >= 0 && i < (int)indexToPipeOutput.size()) + return indexToPipeOutput[i]; + else + return badReflection; + } + + // for mapping from an atomic counter to the uniform index + int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); } + const TObjectReflection& getAtomicCounter(int i) const + { + if (i >= 0 && i < (int)atomicCounterUniformIndices.size()) + return getUniform(atomicCounterUniformIndices[i]); + else + return badReflection; + } + + // for mapping a buffer variable index to a buffer variable object's description + int getNumBufferVariables() { return (int)indexToBufferVariable.size(); } + const TObjectReflection& getBufferVariable(int i) const + { + if (i >= 0 && i < (int)indexToBufferVariable.size()) + return indexToBufferVariable[i]; + else + return badReflection; + } + + // for mapping a storage block index to the storage block's description + int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); } + const TObjectReflection& getStorageBufferBlock(int i) const + { + if (i >= 0 && i < (int)indexToBufferBlock.size()) + return indexToBufferBlock[i]; + else + return badReflection; + } + + // for mapping any name to its index (block names, uniform names and input/output names) + int getIndex(const char* name) const + { + TNameToIndex::const_iterator it = nameToIndex.find(name); + if (it == nameToIndex.end()) + return -1; + else + return it->second; + } + + // see getIndex(const char*) + int getIndex(const TString& name) const { return getIndex(name.c_str()); } + + + // for mapping any name to its index (only pipe input/output names) + int getPipeIOIndex(const char* name, const bool inOrOut) const + { + TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name); + if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end())) + return -1; + else + return it->second; + } + + // see gePipeIOIndex(const char*, const bool) + int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); } + + // Thread local size + unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; } + + void dump(); + +protected: + friend class glslang::TReflectionTraverser; + + void buildCounterIndices(const TIntermediate&); + void buildUniformStageMask(const TIntermediate& intermediate); + void buildAttributeReflection(EShLanguage, const TIntermediate&); + + // Need a TString hash: typedef std::unordered_map TNameToIndex; + typedef std::map TNameToIndex; + typedef std::vector TMapIndexToReflection; + typedef std::vector TIndices; + + TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage) + { + if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer) + return indexToBufferBlock; + return indexToUniformBlock; + } + TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage) + { + if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer) + return indexToBufferVariable; + return indexToUniform; + } + + EShReflectionOptions options; + + EShLanguage firstStage; + EShLanguage lastStage; + + TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this + TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed + TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers. + TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers. + TMapIndexToReflection indexToUniform; + TMapIndexToReflection indexToUniformBlock; + TMapIndexToReflection indexToBufferVariable; + TMapIndexToReflection indexToBufferBlock; + TMapIndexToReflection indexToPipeInput; + TMapIndexToReflection indexToPipeOutput; + TIndices atomicCounterUniformIndices; + + unsigned int localSize[3]; +}; + +} // end namespace glslang + +#endif // _REFLECTION_INCLUDED + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE diff --git a/src/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp b/src/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp new file mode 100644 index 0000000..3f029f0 --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Unix/ossource.cpp @@ -0,0 +1,207 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// +// This file contains the Linux-specific functions +// +#include "../osinclude.h" +#include "../../../OGLCompilersDLL/InitializeDll.h" + +#include +#include +#include +#include +#include +#include +#include + +#if !defined(__Fuchsia__) +#include +#endif + +namespace glslang { + +// +// Thread cleanup +// + +// +// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects +// the cleanup routine to return void. +// +static void DetachThreadLinux(void *) +{ + DetachThread(); +} + +// +// Registers cleanup handler, sets cancel type and state, and executes the thread specific +// cleanup handler. This function will be called in the Standalone.cpp for regression +// testing. When OpenGL applications are run with the driver code, Linux OS does the +// thread cleanup. +// +void OS_CleanupThreadData(void) +{ +#if defined(__ANDROID__) || defined(__Fuchsia__) + DetachThreadLinux(NULL); +#else + int old_cancel_state, old_cancel_type; + void *cleanupArg = NULL; + + // + // Set thread cancel state and push cleanup handler. + // + pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state); + pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg); + + // + // Put the thread in deferred cancellation mode. + // + pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type); + + // + // Pop cleanup handler and execute it prior to unregistering the cleanup handler. + // + pthread_cleanup_pop(1); + + // + // Restore the thread's previous cancellation mode. + // + pthread_setcanceltype(old_cancel_state, NULL); +#endif +} + +// +// Thread Local Storage Operations +// +inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key) +{ + return (OS_TLSIndex)((uintptr_t)key + 1); +} + +inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex) +{ + return (pthread_key_t)((uintptr_t)nIndex - 1); +} + +OS_TLSIndex OS_AllocTLSIndex() +{ + pthread_key_t pPoolIndex; + + // + // Create global pool key. + // + if ((pthread_key_create(&pPoolIndex, NULL)) != 0) { + assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage"); + return OS_INVALID_TLS_INDEX; + } + else + return PthreadKeyToTLSIndex(pPoolIndex); +} + +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0) + return true; + else + return false; +} + +void* OS_GetTLSValue(OS_TLSIndex nIndex) +{ + // + // This function should return 0 if nIndex is invalid. + // + assert(nIndex != OS_INVALID_TLS_INDEX); + return pthread_getspecific(TLSIndexToPthreadKey(nIndex)); +} + +bool OS_FreeTLSIndex(OS_TLSIndex nIndex) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + // + // Delete the global pool key. + // + if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0) + return true; + else + return false; +} + +namespace { + pthread_mutex_t gMutex; +} + +void InitGlobalLock() +{ + pthread_mutexattr_t mutexattr; + pthread_mutexattr_init(&mutexattr); + pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&gMutex, &mutexattr); +} + +void GetGlobalLock() +{ + pthread_mutex_lock(&gMutex); +} + +void ReleaseGlobalLock() +{ + pthread_mutex_unlock(&gMutex); +} + +// #define DUMP_COUNTERS + +void OS_DumpMemoryCounters() +{ +#ifdef DUMP_COUNTERS + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage) == 0) + printf("Working set size: %ld\n", usage.ru_maxrss * 1024); +#else + printf("Recompile with DUMP_COUNTERS defined to see counters.\n"); +#endif +} + +} // end namespace glslang diff --git a/src/third_party/glslang/glslang/OSDependent/Web/glslang.after.js b/src/third_party/glslang/glslang/OSDependent/Web/glslang.after.js new file mode 100644 index 0000000..c2cfc35 --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Web/glslang.after.js @@ -0,0 +1,26 @@ +export default (() => { + const initialize = () => { + return new Promise(resolve => { + Module({ + locateFile() { + const i = import.meta.url.lastIndexOf('/') + return import.meta.url.substring(0, i) + '/glslang.wasm'; + }, + onRuntimeInitialized() { + resolve({ + compileGLSLZeroCopy: this.compileGLSLZeroCopy, + compileGLSL: this.compileGLSL, + }); + }, + }); + }); + }; + + let instance; + return () => { + if (!instance) { + instance = initialize(); + } + return instance; + }; +})(); diff --git a/src/third_party/glslang/glslang/OSDependent/Web/glslang.js.cpp b/src/third_party/glslang/glslang/OSDependent/Web/glslang.js.cpp new file mode 100644 index 0000000..f2306a6 --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Web/glslang.js.cpp @@ -0,0 +1,287 @@ +// +// Copyright (C) 2019 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include +#include +#include + +#ifdef __EMSCRIPTEN__ +#include +#endif + +#include "../../../SPIRV/GlslangToSpv.h" +#include "../../../glslang/Public/ShaderLang.h" + +#ifndef __EMSCRIPTEN__ +#define EMSCRIPTEN_KEEPALIVE +#endif + +const TBuiltInResource DefaultTBuiltInResource = { + /* .MaxLights = */ 32, + /* .MaxClipPlanes = */ 6, + /* .MaxTextureUnits = */ 32, + /* .MaxTextureCoords = */ 32, + /* .MaxVertexAttribs = */ 64, + /* .MaxVertexUniformComponents = */ 4096, + /* .MaxVaryingFloats = */ 64, + /* .MaxVertexTextureImageUnits = */ 32, + /* .MaxCombinedTextureImageUnits = */ 80, + /* .MaxTextureImageUnits = */ 32, + /* .MaxFragmentUniformComponents = */ 4096, + /* .MaxDrawBuffers = */ 32, + /* .MaxVertexUniformVectors = */ 128, + /* .MaxVaryingVectors = */ 8, + /* .MaxFragmentUniformVectors = */ 16, + /* .MaxVertexOutputVectors = */ 16, + /* .MaxFragmentInputVectors = */ 15, + /* .MinProgramTexelOffset = */ -8, + /* .MaxProgramTexelOffset = */ 7, + /* .MaxClipDistances = */ 8, + /* .MaxComputeWorkGroupCountX = */ 65535, + /* .MaxComputeWorkGroupCountY = */ 65535, + /* .MaxComputeWorkGroupCountZ = */ 65535, + /* .MaxComputeWorkGroupSizeX = */ 1024, + /* .MaxComputeWorkGroupSizeY = */ 1024, + /* .MaxComputeWorkGroupSizeZ = */ 64, + /* .MaxComputeUniformComponents = */ 1024, + /* .MaxComputeTextureImageUnits = */ 16, + /* .MaxComputeImageUniforms = */ 8, + /* .MaxComputeAtomicCounters = */ 8, + /* .MaxComputeAtomicCounterBuffers = */ 1, + /* .MaxVaryingComponents = */ 60, + /* .MaxVertexOutputComponents = */ 64, + /* .MaxGeometryInputComponents = */ 64, + /* .MaxGeometryOutputComponents = */ 128, + /* .MaxFragmentInputComponents = */ 128, + /* .MaxImageUnits = */ 8, + /* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8, + /* .MaxCombinedShaderOutputResources = */ 8, + /* .MaxImageSamples = */ 0, + /* .MaxVertexImageUniforms = */ 0, + /* .MaxTessControlImageUniforms = */ 0, + /* .MaxTessEvaluationImageUniforms = */ 0, + /* .MaxGeometryImageUniforms = */ 0, + /* .MaxFragmentImageUniforms = */ 8, + /* .MaxCombinedImageUniforms = */ 8, + /* .MaxGeometryTextureImageUnits = */ 16, + /* .MaxGeometryOutputVertices = */ 256, + /* .MaxGeometryTotalOutputComponents = */ 1024, + /* .MaxGeometryUniformComponents = */ 1024, + /* .MaxGeometryVaryingComponents = */ 64, + /* .MaxTessControlInputComponents = */ 128, + /* .MaxTessControlOutputComponents = */ 128, + /* .MaxTessControlTextureImageUnits = */ 16, + /* .MaxTessControlUniformComponents = */ 1024, + /* .MaxTessControlTotalOutputComponents = */ 4096, + /* .MaxTessEvaluationInputComponents = */ 128, + /* .MaxTessEvaluationOutputComponents = */ 128, + /* .MaxTessEvaluationTextureImageUnits = */ 16, + /* .MaxTessEvaluationUniformComponents = */ 1024, + /* .MaxTessPatchComponents = */ 120, + /* .MaxPatchVertices = */ 32, + /* .MaxTessGenLevel = */ 64, + /* .MaxViewports = */ 16, + /* .MaxVertexAtomicCounters = */ 0, + /* .MaxTessControlAtomicCounters = */ 0, + /* .MaxTessEvaluationAtomicCounters = */ 0, + /* .MaxGeometryAtomicCounters = */ 0, + /* .MaxFragmentAtomicCounters = */ 8, + /* .MaxCombinedAtomicCounters = */ 8, + /* .MaxAtomicCounterBindings = */ 1, + /* .MaxVertexAtomicCounterBuffers = */ 0, + /* .MaxTessControlAtomicCounterBuffers = */ 0, + /* .MaxTessEvaluationAtomicCounterBuffers = */ 0, + /* .MaxGeometryAtomicCounterBuffers = */ 0, + /* .MaxFragmentAtomicCounterBuffers = */ 1, + /* .MaxCombinedAtomicCounterBuffers = */ 1, + /* .MaxAtomicCounterBufferSize = */ 16384, + /* .MaxTransformFeedbackBuffers = */ 4, + /* .MaxTransformFeedbackInterleavedComponents = */ 64, + /* .MaxCullDistances = */ 8, + /* .MaxCombinedClipAndCullDistances = */ 8, + /* .MaxSamples = */ 4, + /* .maxMeshOutputVerticesNV = */ 256, + /* .maxMeshOutputPrimitivesNV = */ 512, + /* .maxMeshWorkGroupSizeX_NV = */ 32, + /* .maxMeshWorkGroupSizeY_NV = */ 1, + /* .maxMeshWorkGroupSizeZ_NV = */ 1, + /* .maxTaskWorkGroupSizeX_NV = */ 32, + /* .maxTaskWorkGroupSizeY_NV = */ 1, + /* .maxTaskWorkGroupSizeZ_NV = */ 1, + /* .maxMeshViewCountNV = */ 4, + /* .maxDualSourceDrawBuffersEXT = */ 1, + + /* .limits = */ { + /* .nonInductiveForLoops = */ 1, + /* .whileLoops = */ 1, + /* .doWhileLoops = */ 1, + /* .generalUniformIndexing = */ 1, + /* .generalAttributeMatrixVectorIndexing = */ 1, + /* .generalVaryingIndexing = */ 1, + /* .generalSamplerIndexing = */ 1, + /* .generalVariableIndexing = */ 1, + /* .generalConstantMatrixVectorIndexing = */ 1, + }}; + +static bool initialized = false; + +extern "C" { + +/* + * Takes in a GLSL shader as a string and converts it to SPIR-V in binary form. + * + * |glsl| Null-terminated string containing the shader to be converted. + * |stage_int| Magic number indicating the type of shader being processed. +* Legal values are as follows: + * Vertex = 0 + * Fragment = 4 + * Compute = 5 + * |gen_debug| Flag to indicate if debug information should be generated. + * |spirv| Output parameter for a pointer to the resulting SPIR-V data. + * |spirv_len| Output parameter for the length of the output binary buffer. + * + * Returns a void* pointer which, if not null, must be destroyed by + * destroy_output_buffer.o. (This is not the same pointer returned in |spirv|.) + * If null, the compilation failed. + */ +EMSCRIPTEN_KEEPALIVE +void* convert_glsl_to_spirv(const char* glsl, + int stage_int, + bool gen_debug, + glslang::EShTargetLanguageVersion spirv_version, + uint32_t** spirv, + size_t* spirv_len) +{ + if (glsl == nullptr) { + fprintf(stderr, "Input pointer null\n"); + return nullptr; + } + if (spirv == nullptr || spirv_len == nullptr) { + fprintf(stderr, "Output pointer null\n"); + return nullptr; + } + *spirv = nullptr; + *spirv_len = 0; + + if (stage_int != 0 && stage_int != 4 && stage_int != 5) { + fprintf(stderr, "Invalid shader stage\n"); + return nullptr; + } + EShLanguage stage = static_cast(stage_int); + switch (spirv_version) { + case glslang::EShTargetSpv_1_0: + case glslang::EShTargetSpv_1_1: + case glslang::EShTargetSpv_1_2: + case glslang::EShTargetSpv_1_3: + case glslang::EShTargetSpv_1_4: + case glslang::EShTargetSpv_1_5: + break; + default: + fprintf(stderr, "Invalid SPIR-V version number\n"); + return nullptr; + } + + if (!initialized) { + glslang::InitializeProcess(); + initialized = true; + } + + glslang::TShader shader(stage); + shader.setStrings(&glsl, 1); + shader.setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, 100); + shader.setEnvClient(glslang::EShClientVulkan, glslang::EShTargetVulkan_1_0); + shader.setEnvTarget(glslang::EShTargetSpv, spirv_version); + if (!shader.parse(&DefaultTBuiltInResource, 100, true, EShMsgDefault)) { + fprintf(stderr, "Parse failed\n"); + fprintf(stderr, "%s\n", shader.getInfoLog()); + return nullptr; + } + + glslang::TProgram program; + program.addShader(&shader); + if (!program.link(EShMsgDefault)) { + fprintf(stderr, "Link failed\n"); + fprintf(stderr, "%s\n", program.getInfoLog()); + return nullptr; + } + + glslang::SpvOptions spvOptions; + spvOptions.generateDebugInfo = gen_debug; + spvOptions.optimizeSize = false; + spvOptions.disassemble = false; + spvOptions.validate = false; + + std::vector* output = new std::vector; + glslang::GlslangToSpv(*program.getIntermediate(stage), *output, nullptr, &spvOptions); + + *spirv_len = output->size(); + *spirv = output->data(); + return output; +} + +/* + * Destroys a buffer created by convert_glsl_to_spirv + */ +EMSCRIPTEN_KEEPALIVE +void destroy_output_buffer(void* p) +{ + delete static_cast*>(p); +} + +} // extern "C" + +/* + * For non-Emscripten builds we supply a generic main, so that the glslang.js + * build target can generate an executable with a trivial use case instead of + * generating a WASM binary. This is done so that there is a target that can be + * built and output analyzed using desktop tools, since WASM binaries are + * specific to the Emscripten toolchain. + */ +#ifndef __EMSCRIPTEN__ +int main() { + const char* input = R"(#version 310 es + +void main() { })"; + + uint32_t* output; + size_t output_len; + + void* id = convert_glsl_to_spirv(input, 4, false, glslang::EShTargetSpv_1_0, &output, &output_len); + assert(output != nullptr); + assert(output_len != 0); + destroy_output_buffer(id); + return 0; +} +#endif // ifndef __EMSCRIPTEN__ diff --git a/src/third_party/glslang/glslang/OSDependent/Web/glslang.pre.js b/src/third_party/glslang/glslang/OSDependent/Web/glslang.pre.js new file mode 100644 index 0000000..46a5695 --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Web/glslang.pre.js @@ -0,0 +1,56 @@ +Module['compileGLSLZeroCopy'] = function(glsl, shader_stage, gen_debug, spirv_version) { + gen_debug = !!gen_debug; + + var shader_stage_int; // EShLanguage + switch (shader_stage) { + case 'vertex': shader_stage_int = 0; break; + case 'fragment': shader_stage_int = 4; break; + case 'compute': shader_stage_int = 5; break; + default: + throw new Error("shader_stage must be 'vertex', 'fragment', or 'compute'."); + } + + spirv_version = spirv_version || '1.0'; + var spirv_version_int; // EShTargetLanguageVersion + switch (spirv_version) { + case '1.0': spirv_version_int = (1 << 16) | (0 << 8); break; + case '1.1': spirv_version_int = (1 << 16) | (1 << 8); break; + case '1.2': spirv_version_int = (1 << 16) | (2 << 8); break; + case '1.3': spirv_version_int = (1 << 16) | (3 << 8); break; + case '1.4': spirv_version_int = (1 << 16) | (4 << 8); break; + case '1.5': spirv_version_int = (1 << 16) | (5 << 8); break; + default: + throw new Error("spirv_version must be '1.0' ~ '1.5'."); + } + + var p_output = Module['_malloc'](4); + var p_output_len = Module['_malloc'](4); + var id = ccall('convert_glsl_to_spirv', + 'number', + ['string', 'number', 'boolean', 'number', 'number', 'number'], + [glsl, shader_stage_int, gen_debug, spirv_version_int, p_output, p_output_len]); + var output = getValue(p_output, 'i32'); + var output_len = getValue(p_output_len, 'i32'); + Module['_free'](p_output); + Module['_free'](p_output_len); + + if (id === 0) { + throw new Error('GLSL compilation failed'); + } + + var ret = {}; + var outputIndexU32 = output / 4; + ret['data'] = Module['HEAPU32'].subarray(outputIndexU32, outputIndexU32 + output_len); + ret['free'] = function() { + Module['_destroy_output_buffer'](id); + }; + + return ret; +}; + +Module['compileGLSL'] = function(glsl, shader_stage, gen_debug, spirv_version) { + var compiled = Module['compileGLSLZeroCopy'](glsl, shader_stage, gen_debug, spirv_version); + var ret = compiled['data'].slice() + compiled['free'](); + return ret; +}; diff --git a/src/third_party/glslang/glslang/OSDependent/Windows/main.cpp b/src/third_party/glslang/glslang/OSDependent/Windows/main.cpp new file mode 100644 index 0000000..0bcde7b --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Windows/main.cpp @@ -0,0 +1,74 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "InitializeDll.h" + +#define STRICT +#define VC_EXTRALEAN 1 +#include +#include + +BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) +{ + switch (fdwReason) + { + case DLL_PROCESS_ATTACH: + + if (! glslang::InitProcess()) + return FALSE; + break; + case DLL_THREAD_ATTACH: + + if (! glslang::InitThread()) + return FALSE; + break; + + case DLL_THREAD_DETACH: + + if (! glslang::DetachThread()) + return FALSE; + break; + + case DLL_PROCESS_DETACH: + + glslang::DetachProcess(); + break; + + default: + assert(0 && "DllMain(): Reason for calling DLL Main is unknown"); + return FALSE; + } + + return TRUE; +} diff --git a/src/third_party/glslang/glslang/OSDependent/Windows/ossource.cpp b/src/third_party/glslang/glslang/OSDependent/Windows/ossource.cpp new file mode 100644 index 0000000..870840c --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/Windows/ossource.cpp @@ -0,0 +1,147 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#include "../osinclude.h" + +#define STRICT +#define VC_EXTRALEAN 1 +#include +#include +#include +#include +#include +#include + +// +// This file contains the Window-OS-specific functions +// + +#if !(defined(_WIN32) || defined(_WIN64)) +#error Trying to build a windows specific file in a non windows build. +#endif + +namespace glslang { + +inline OS_TLSIndex ToGenericTLSIndex (DWORD handle) +{ + return (OS_TLSIndex)((uintptr_t)handle + 1); +} + +inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex) +{ + return (DWORD)((uintptr_t)nIndex - 1); +} + +// +// Thread Local Storage Operations +// +OS_TLSIndex OS_AllocTLSIndex() +{ + DWORD dwIndex = TlsAlloc(); + if (dwIndex == TLS_OUT_OF_INDEXES) { + assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage"); + return OS_INVALID_TLS_INDEX; + } + + return ToGenericTLSIndex(dwIndex); +} + +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue)) + return true; + else + return false; +} + +void* OS_GetTLSValue(OS_TLSIndex nIndex) +{ + assert(nIndex != OS_INVALID_TLS_INDEX); + return TlsGetValue(ToNativeTLSIndex(nIndex)); +} + +bool OS_FreeTLSIndex(OS_TLSIndex nIndex) +{ + if (nIndex == OS_INVALID_TLS_INDEX) { + assert(0 && "OS_SetTLSValue(): Invalid TLS Index"); + return false; + } + + if (TlsFree(ToNativeTLSIndex(nIndex))) + return true; + else + return false; +} + +HANDLE GlobalLock; + +void InitGlobalLock() +{ + GlobalLock = CreateMutex(0, false, 0); +} + +void GetGlobalLock() +{ + WaitForSingleObject(GlobalLock, INFINITE); +} + +void ReleaseGlobalLock() +{ + ReleaseMutex(GlobalLock); +} + +unsigned int __stdcall EnterGenericThread (void* entry) +{ + return ((TThreadEntrypoint)entry)(0); +} + +//#define DUMP_COUNTERS + +void OS_DumpMemoryCounters() +{ +#ifdef DUMP_COUNTERS + PROCESS_MEMORY_COUNTERS counters; + GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)); + printf("Working set size: %d\n", counters.WorkingSetSize); +#else + printf("Recompile with DUMP_COUNTERS defined to see counters.\n"); +#endif +} + +} // namespace glslang diff --git a/src/third_party/glslang/glslang/OSDependent/osinclude.h b/src/third_party/glslang/glslang/OSDependent/osinclude.h new file mode 100644 index 0000000..218abe4 --- /dev/null +++ b/src/third_party/glslang/glslang/OSDependent/osinclude.h @@ -0,0 +1,63 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +#ifndef __OSINCLUDE_H +#define __OSINCLUDE_H + +namespace glslang { + +// +// Thread Local Storage Operations +// +typedef void* OS_TLSIndex; +#define OS_INVALID_TLS_INDEX ((void*)0) + +OS_TLSIndex OS_AllocTLSIndex(); +bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue); +bool OS_FreeTLSIndex(OS_TLSIndex nIndex); +void* OS_GetTLSValue(OS_TLSIndex nIndex); + +void InitGlobalLock(); +void GetGlobalLock(); +void ReleaseGlobalLock(); + +typedef unsigned int (*TThreadEntrypoint)(void*); + +void OS_CleanupThreadData(void); + +void OS_DumpMemoryCounters(); + +} // end namespace glslang + +#endif // __OSINCLUDE_H diff --git a/src/third_party/glslang/glslang/Public/ShaderLang.h b/src/third_party/glslang/glslang/Public/ShaderLang.h new file mode 100644 index 0000000..273f156 --- /dev/null +++ b/src/third_party/glslang/glslang/Public/ShaderLang.h @@ -0,0 +1,948 @@ +// +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. +// Copyright (C) 2013-2016 LunarG, Inc. +// Copyright (C) 2015-2018 Google, Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +#ifndef _COMPILER_INTERFACE_INCLUDED_ +#define _COMPILER_INTERFACE_INCLUDED_ + +#include "../Include/ResourceLimits.h" +#include "../MachineIndependent/Versions.h" + +#include +#include + +#ifdef _WIN32 + #define C_DECL __cdecl +#else + #define C_DECL +#endif + +#ifdef GLSLANG_IS_SHARED_LIBRARY + #ifdef _WIN32 + #ifdef GLSLANG_EXPORTING + #define GLSLANG_EXPORT __declspec(dllexport) + #else + #define GLSLANG_EXPORT __declspec(dllimport) + #endif + #elif __GNUC__ >= 4 + #define GLSLANG_EXPORT __attribute__((visibility("default"))) + #endif +#endif // GLSLANG_IS_SHARED_LIBRARY + +#ifndef GLSLANG_EXPORT +#define GLSLANG_EXPORT +#endif + +// +// This is the platform independent interface between an OGL driver +// and the shading language compiler/linker. +// + +#ifdef __cplusplus + extern "C" { +#endif + +// +// Call before doing any other compiler/linker operations. +// +// (Call once per process, not once per thread.) +// +GLSLANG_EXPORT int ShInitialize(); + +// +// Call this at process shutdown to clean up memory. +// +GLSLANG_EXPORT int ShFinalize(); + +// +// Types of languages the compiler can consume. +// +typedef enum { + EShLangVertex, + EShLangTessControl, + EShLangTessEvaluation, + EShLangGeometry, + EShLangFragment, + EShLangCompute, + EShLangRayGen, + EShLangRayGenNV = EShLangRayGen, + EShLangIntersect, + EShLangIntersectNV = EShLangIntersect, + EShLangAnyHit, + EShLangAnyHitNV = EShLangAnyHit, + EShLangClosestHit, + EShLangClosestHitNV = EShLangClosestHit, + EShLangMiss, + EShLangMissNV = EShLangMiss, + EShLangCallable, + EShLangCallableNV = EShLangCallable, + EShLangTaskNV, + EShLangMeshNV, + LAST_ELEMENT_MARKER(EShLangCount), +} EShLanguage; // would be better as stage, but this is ancient now + +typedef enum : unsigned { + EShLangVertexMask = (1 << EShLangVertex), + EShLangTessControlMask = (1 << EShLangTessControl), + EShLangTessEvaluationMask = (1 << EShLangTessEvaluation), + EShLangGeometryMask = (1 << EShLangGeometry), + EShLangFragmentMask = (1 << EShLangFragment), + EShLangComputeMask = (1 << EShLangCompute), + EShLangRayGenMask = (1 << EShLangRayGen), + EShLangRayGenNVMask = EShLangRayGenMask, + EShLangIntersectMask = (1 << EShLangIntersect), + EShLangIntersectNVMask = EShLangIntersectMask, + EShLangAnyHitMask = (1 << EShLangAnyHit), + EShLangAnyHitNVMask = EShLangAnyHitMask, + EShLangClosestHitMask = (1 << EShLangClosestHit), + EShLangClosestHitNVMask = EShLangClosestHitMask, + EShLangMissMask = (1 << EShLangMiss), + EShLangMissNVMask = EShLangMissMask, + EShLangCallableMask = (1 << EShLangCallable), + EShLangCallableNVMask = EShLangCallableMask, + EShLangTaskNVMask = (1 << EShLangTaskNV), + EShLangMeshNVMask = (1 << EShLangMeshNV), + LAST_ELEMENT_MARKER(EShLanguageMaskCount), +} EShLanguageMask; + +namespace glslang { + +class TType; + +typedef enum { + EShSourceNone, + EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL) + EShSourceHlsl, // HLSL + LAST_ELEMENT_MARKER(EShSourceCount), +} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead + +typedef enum { + EShClientNone, // use when there is no client, e.g. for validation + EShClientVulkan, + EShClientOpenGL, + LAST_ELEMENT_MARKER(EShClientCount), +} EShClient; + +typedef enum { + EShTargetNone, + EShTargetSpv, // SPIR-V (preferred spelling) + EshTargetSpv = EShTargetSpv, // legacy spelling + LAST_ELEMENT_MARKER(EShTargetCount), +} EShTargetLanguage; + +typedef enum { + EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0 + EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1 + EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2 + EShTargetOpenGL_450 = 450, // OpenGL + LAST_ELEMENT_MARKER(EShTargetClientVersionCount), +} EShTargetClientVersion; + +typedef EShTargetClientVersion EshTargetClientVersion; + +typedef enum { + EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0 + EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1 + EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2 + EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3 + EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4 + EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5 + LAST_ELEMENT_MARKER(EShTargetLanguageVersionCount), +} EShTargetLanguageVersion; + +struct TInputLanguage { + EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone + EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone + EShClient dialect; + int dialectVersion; // version of client's language definition, not the client (when not EShClientNone) +}; + +struct TClient { + EShClient client; + EShTargetClientVersion version; // version of client itself (not the client's input dialect) +}; + +struct TTarget { + EShTargetLanguage language; + EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header + bool hlslFunctionality1; // can target hlsl_functionality1 extension(s) +}; + +// All source/client/target versions and settings. +// Can override previous methods of setting, when items are set here. +// Expected to grow, as more are added, rather than growing parameter lists. +struct TEnvironment { + TInputLanguage input; // definition of the input language + TClient client; // what client is the overall compilation being done for? + TTarget target; // what to generate +}; + +GLSLANG_EXPORT const char* StageName(EShLanguage); + +} // end namespace glslang + +// +// Types of output the linker will create. +// +typedef enum { + EShExVertexFragment, + EShExFragment +} EShExecutable; + +// +// Optimization level for the compiler. +// +typedef enum { + EShOptNoGeneration, + EShOptNone, + EShOptSimple, // Optimizations that can be done quickly + EShOptFull, // Optimizations that will take more time + LAST_ELEMENT_MARKER(EshOptLevelCount), +} EShOptimizationLevel; + +// +// Texture and Sampler transformation mode. +// +typedef enum { + EShTexSampTransKeep, // keep textures and samplers as is (default) + EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers + LAST_ELEMENT_MARKER(EShTexSampTransCount), +} EShTextureSamplerTransformMode; + +// +// Message choices for what errors and warnings are given. +// +enum EShMessages : unsigned { + EShMsgDefault = 0, // default is to give all required errors and extra warnings + EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input + EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification + EShMsgAST = (1 << 2), // print the AST intermediate representation + EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation + EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V + EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor + EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics + EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit + EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions + EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules + EShMsgDebugInfo = (1 << 10), // save debug information + EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL + EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages + EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (for samplers and semantics) + EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table + LAST_ELEMENT_MARKER(EShMsgCount), +}; + +// +// Options for building reflection +// +typedef enum { + EShReflectionDefault = 0, // default is original behaviour before options were added + EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes + EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection + EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader + EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately + EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive + EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks + EShReflectionAllIOVariables = (1 << 6), // reflect all input/output variables, even if they are inactive + EShReflectionSharedStd140SSBO = (1 << 7), // Apply std140/shared rules for ubo to ssbo + EShReflectionSharedStd140UBO = (1 << 8), // Apply std140/shared rules for ubo to ssbo + LAST_ELEMENT_MARKER(EShReflectionCount), +} EShReflectionOptions; + +// +// Build a table for bindings. This can be used for locating +// attributes, uniforms, globals, etc., as needed. +// +typedef struct { + const char* name; + int binding; +} ShBinding; + +typedef struct { + int numBindings; + ShBinding* bindings; // array of bindings +} ShBindingTable; + +// +// ShHandle held by but opaque to the driver. It is allocated, +// managed, and de-allocated by the compiler/linker. It's contents +// are defined by and used by the compiler and linker. For example, +// symbol table information and object code passed from the compiler +// to the linker can be stored where ShHandle points. +// +// If handle creation fails, 0 will be returned. +// +typedef void* ShHandle; + +// +// Driver calls these to create and destroy compiler/linker +// objects. +// +GLSLANG_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader +GLSLANG_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair +GLSLANG_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object) +GLSLANG_EXPORT void ShDestruct(ShHandle); + +// +// The return value of ShCompile is boolean, non-zero indicating +// success. +// +// The info-log should be written by ShCompile into +// ShHandle, so it can answer future queries. +// +GLSLANG_EXPORT int ShCompile( + const ShHandle, + const char* const shaderStrings[], + const int numStrings, + const int* lengths, + const EShOptimizationLevel, + const TBuiltInResource *resources, + int debugOptions, + int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader + bool forwardCompatible = false, // give errors for use of deprecated features + EShMessages messages = EShMsgDefault // warnings and errors + ); + +GLSLANG_EXPORT int ShLinkExt( + const ShHandle, // linker object + const ShHandle h[], // compiler objects to link together + const int numHandles); + +// +// ShSetEncrpytionMethod is a place-holder for specifying +// how source code is encrypted. +// +GLSLANG_EXPORT void ShSetEncryptionMethod(ShHandle); + +// +// All the following return 0 if the information is not +// available in the object passed down, or the object is bad. +// +GLSLANG_EXPORT const char* ShGetInfoLog(const ShHandle); +GLSLANG_EXPORT const void* ShGetExecutable(const ShHandle); +GLSLANG_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing +GLSLANG_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings +// +// Tell the linker to never assign a vertex attribute to this list of physical attributes +// +GLSLANG_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count); + +// +// Returns the location ID of the named uniform. +// Returns -1 if error. +// +GLSLANG_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name); + +#ifdef __cplusplus + } // end extern "C" +#endif + +//////////////////////////////////////////////////////////////////////////////////////////// +// +// Deferred-Lowering C++ Interface +// ----------------------------------- +// +// Below is a new alternate C++ interface, which deprecates the above +// opaque handle-based interface. +// +// The below is further designed to handle multiple compilation units per stage, where +// the intermediate results, including the parse tree, are preserved until link time, +// rather than the above interface which is designed to have each compilation unit +// lowered at compile time. In the above model, linking occurs on the lowered results, +// whereas in this model intra-stage linking can occur at the parse tree +// (treeRoot in TIntermediate) level, and then a full stage can be lowered. +// + +#include +#include +#include + +class TCompiler; +class TInfoSink; + +namespace glslang { + +struct Version { + int major; + int minor; + int patch; + const char* flavor; +}; + +GLSLANG_EXPORT Version GetVersion(); +GLSLANG_EXPORT const char* GetEsslVersionString(); +GLSLANG_EXPORT const char* GetGlslVersionString(); +GLSLANG_EXPORT int GetKhronosToolId(); + +class TIntermediate; +class TProgram; +class TPoolAllocator; + +// Call this exactly once per process before using anything else +GLSLANG_EXPORT bool InitializeProcess(); + +// Call once per process to tear down everything +GLSLANG_EXPORT void FinalizeProcess(); + +// Resource type for IO resolver +enum TResourceType { + EResSampler, + EResTexture, + EResImage, + EResUbo, + EResSsbo, + EResUav, + EResCount +}; + + +// Make one TShader per shader that you will link into a program. Then +// - provide the shader through setStrings() or setStringsWithLengths() +// - optionally call setEnv*(), see below for more detail +// - optionally use setPreamble() to set a special shader string that will be +// processed before all others but won't affect the validity of #version +// - optionally call addProcesses() for each setting/transform, +// see comment for class TProcesses +// - call parse(): source language and target environment must be selected +// either by correct setting of EShMessages sent to parse(), or by +// explicitly calling setEnv*() +// - query the info logs +// +// N.B.: Does not yet support having the same TShader instance being linked into +// multiple programs. +// +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. +// +class TShader { +public: + GLSLANG_EXPORT explicit TShader(EShLanguage); + GLSLANG_EXPORT virtual ~TShader(); + GLSLANG_EXPORT void setStrings(const char* const* s, int n); + GLSLANG_EXPORT void setStringsWithLengths( + const char* const* s, const int* l, int n); + GLSLANG_EXPORT void setStringsWithLengthsAndNames( + const char* const* s, const int* l, const char* const* names, int n); + void setPreamble(const char* s) { preamble = s; } + GLSLANG_EXPORT void setEntryPoint(const char* entryPoint); + GLSLANG_EXPORT void setSourceEntryPoint(const char* sourceEntryPointName); + GLSLANG_EXPORT void addProcesses(const std::vector&); + + // IO resolver binding data: see comments in ShaderLang.cpp + GLSLANG_EXPORT void setShiftBinding(TResourceType res, unsigned int base); + GLSLANG_EXPORT void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding + GLSLANG_EXPORT void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding + GLSLANG_EXPORT void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set); + GLSLANG_EXPORT void setResourceSetBinding(const std::vector& base); + GLSLANG_EXPORT void setAutoMapBindings(bool map); + GLSLANG_EXPORT void setAutoMapLocations(bool map); + GLSLANG_EXPORT void addUniformLocationOverride(const char* name, int loc); + GLSLANG_EXPORT void setUniformLocationBase(int base); + GLSLANG_EXPORT void setInvertY(bool invert); +#ifdef ENABLE_HLSL + GLSLANG_EXPORT void setHlslIoMapping(bool hlslIoMap); + GLSLANG_EXPORT void setFlattenUniformArrays(bool flatten); +#endif + GLSLANG_EXPORT void setNoStorageFormat(bool useUnknownFormat); + GLSLANG_EXPORT void setNanMinMaxClamp(bool nanMinMaxClamp); + GLSLANG_EXPORT void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode); + + // For setting up the environment (cleared to nothingness in the constructor). + // These must be called so that parsing is done for the right source language and + // target environment, either indirectly through TranslateEnvironment() based on + // EShMessages et. al., or directly by the user. + // + // setEnvInput: The input source language and stage. If generating code for a + // specific client, the input client semantics to use and the + // version of the that client's input semantics to use, otherwise + // use EShClientNone and version of 0, e.g. for validation mode. + // Note 'version' does not describe the target environment, + // just the version of the source dialect to compile under. + // + // See the definitions of TEnvironment, EShSource, EShLanguage, + // and EShClient for choices and more detail. + // + // setEnvClient: The client that will be hosting the execution, and it's version. + // Note 'version' is not the version of the languages involved, but + // the version of the client environment. + // Use EShClientNone and version of 0 if there is no client, e.g. + // for validation mode. + // + // See EShTargetClientVersion for choices. + // + // setEnvTarget: The language to translate to when generating code, and that + // language's version. + // Use EShTargetNone and version of 0 if there is no client, e.g. + // for validation mode. + // + void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version) + { + environment.input.languageFamily = lang; + environment.input.stage = envStage; + environment.input.dialect = client; + environment.input.dialectVersion = version; + } + void setEnvClient(EShClient client, EShTargetClientVersion version) + { + environment.client.client = client; + environment.client.version = version; + } + void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version) + { + environment.target.language = lang; + environment.target.version = version; + } + + void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; } + +#ifdef ENABLE_HLSL + void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; } + bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; } +#else + bool getEnvTargetHlslFunctionality1() const { return false; } +#endif + + // Interface to #include handlers. + // + // To support #include, a client of Glslang does the following: + // 1. Call setStringsWithNames to set the source strings and associated + // names. For example, the names could be the names of the files + // containing the shader sources. + // 2. Call parse with an Includer. + // + // When the Glslang parser encounters an #include directive, it calls + // the Includer's include method with the requested include name + // together with the current string name. The returned IncludeResult + // contains the fully resolved name of the included source, together + // with the source text that should replace the #include directive + // in the source stream. After parsing that source, Glslang will + // release the IncludeResult object. + class Includer { + public: + // An IncludeResult contains the resolved name and content of a source + // inclusion. + struct IncludeResult { + IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) : + headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { } + // For a successful inclusion, the fully resolved name of the requested + // include. For example, in a file system-based includer, full resolution + // should convert a relative path name into an absolute path name. + // For a failed inclusion, this is an empty string. + const std::string headerName; + // The content and byte length of the requested inclusion. The + // Includer producing this IncludeResult retains ownership of the + // storage. + // For a failed inclusion, the header + // field points to a string containing error details. + const char* const headerData; + const size_t headerLength; + // Include resolver's context. + void* userData; + protected: + IncludeResult& operator=(const IncludeResult&); + IncludeResult(); + }; + + // For both include methods below: + // + // Resolves an inclusion request by name, current source name, + // and include depth. + // On success, returns an IncludeResult containing the resolved name + // and content of the include. + // On failure, returns a nullptr, or an IncludeResult + // with an empty string for the headerName and error details in the + // header field. + // The Includer retains ownership of the contents + // of the returned IncludeResult value, and those contents must + // remain valid until the releaseInclude method is called on that + // IncludeResult object. + // + // Note "local" vs. "system" is not an "either/or": "local" is an + // extra thing to do over "system". Both might get called, as per + // the C++ specification. + + // For the "system" or <>-style includes; search the "system" paths. + virtual IncludeResult* includeSystem(const char* /*headerName*/, + const char* /*includerName*/, + size_t /*inclusionDepth*/) { return nullptr; } + + // For the "local"-only aspect of a "" include. Should not search in the + // "system" paths, because on returning a failure, the parser will + // call includeSystem() to look in the "system" locations. + virtual IncludeResult* includeLocal(const char* /*headerName*/, + const char* /*includerName*/, + size_t /*inclusionDepth*/) { return nullptr; } + + // Signals that the parser will no longer use the contents of the + // specified IncludeResult. + virtual void releaseInclude(IncludeResult*) = 0; + virtual ~Includer() {} + }; + + // Fail all Includer searches + class ForbidIncluder : public Includer { + public: + virtual void releaseInclude(IncludeResult*) override { } + }; + + GLSLANG_EXPORT bool parse( + const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, + bool forceDefaultVersionAndProfile, bool forwardCompatible, + EShMessages, Includer&); + + bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages messages) + { + TShader::ForbidIncluder includer; + return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer); + } + + // Equivalent to parse() without a default profile and without forcing defaults. + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages) + { + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages); + } + + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages, + Includer& includer) + { + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer); + } + + // NOTE: Doing just preprocessing to obtain a correct preprocessed shader string + // is not an officially supported or fully working path. + GLSLANG_EXPORT bool preprocess( + const TBuiltInResource* builtInResources, int defaultVersion, + EProfile defaultProfile, bool forceDefaultVersionAndProfile, + bool forwardCompatible, EShMessages message, std::string* outputString, + Includer& includer); + + GLSLANG_EXPORT const char* getInfoLog(); + GLSLANG_EXPORT const char* getInfoDebugLog(); + EShLanguage getStage() const { return stage; } + TIntermediate* getIntermediate() const { return intermediate; } + +protected: + TPoolAllocator* pool; + EShLanguage stage; + TCompiler* compiler; + TIntermediate* intermediate; + TInfoSink* infoSink; + // strings and lengths follow the standard for glShaderSource: + // strings is an array of numStrings pointers to string data. + // lengths can be null, but if not it is an array of numStrings + // integers containing the length of the associated strings. + // if lengths is null or lengths[n] < 0 the associated strings[n] is + // assumed to be null-terminated. + // stringNames is the optional names for all the strings. If stringNames + // is null, then none of the strings has name. If a certain element in + // stringNames is null, then the corresponding string does not have name. + const char* const* strings; // explicit code to compile, see previous comment + const int* lengths; + const char* const* stringNames; + int numStrings; // size of the above arrays + const char* preamble; // string of implicit code to compile before the explicitly provided code + + // a function in the source string can be renamed FROM this TO the name given in setEntryPoint. + std::string sourceEntryPointName; + + TEnvironment environment; + + friend class TProgram; + +private: + TShader& operator=(TShader&); +}; + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + +// +// A reflection database and its interface, consistent with the OpenGL API reflection queries. +// + +// Data needed for just a single object at the granularity exchanged by the reflection API +class TObjectReflection { +public: + GLSLANG_EXPORT TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex); + + GLSLANG_EXPORT const TType* getType() const { return type; } + GLSLANG_EXPORT int getBinding() const; + GLSLANG_EXPORT void dump() const; + static TObjectReflection badReflection() { return TObjectReflection(); } + + std::string name; + int offset; + int glDefineType; + int size; // data size in bytes for a block, array size for a (non-block) object that's an array + int index; + int counterIndex; + int numMembers; + int arrayStride; // stride of an array variable + int topLevelArraySize; // size of the top-level variable in a storage buffer member + int topLevelArrayStride; // stride of the top-level variable in a storage buffer member + EShLanguageMask stages; + +protected: + TObjectReflection() + : offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0), + topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr) + { + } + + const TType* type; +}; + +class TReflection; +class TIoMapper; +struct TVarEntryInfo; + +// Allows to customize the binding layout after linking. +// All used uniform variables will invoke at least validateBinding. +// If validateBinding returned true then the other resolveBinding, +// resolveSet, and resolveLocation are invoked to resolve the binding +// and descriptor set index respectively. +// +// Invocations happen in a particular order: +// 1) all shader inputs +// 2) all shader outputs +// 3) all uniforms with binding and set already defined +// 4) all uniforms with binding but no set defined +// 5) all uniforms with set but no binding defined +// 6) all uniforms with no binding and no set defined +// +// mapIO will use this resolver in two phases. The first +// phase is a notification phase, calling the corresponging +// notifiy callbacks, this phase ends with a call to endNotifications. +// Phase two starts directly after the call to endNotifications +// and calls all other callbacks to validate and to get the +// bindings, sets, locations, component and color indices. +// +// NOTE: that still limit checks are applied to bindings and sets +// and may result in an error. +class TIoMapResolver +{ +public: + virtual ~TIoMapResolver() {} + + // Should return true if the resulting/current binding would be okay. + // Basic idea is to do aliasing binding checks with this. + virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current binding should be overridden. + // Return -1 if the current binding (including no binding) should be kept. + virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current set should be overridden. + // Return -1 if the current set (including no set) should be kept. + virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current location should be overridden. + // Return -1 if the current location (including no location) should be kept. + virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return true if the resulting/current setup would be okay. + // Basic idea is to do aliasing checks and reject invalid semantic names. + virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current location should be overridden. + // Return -1 if the current location (including no location) should be kept. + virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current component index should be overridden. + // Return -1 if the current component index (including no index) should be kept. + virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Should return a value >= 0 if the current color index should be overridden. + // Return -1 if the current color index (including no index) should be kept. + virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Notification of a uniform variable + virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Notification of a in or out variable + virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; + // Called by mapIO when it starts its notify pass for the given stage + virtual void beginNotifications(EShLanguage stage) = 0; + // Called by mapIO when it has finished the notify pass + virtual void endNotifications(EShLanguage stage) = 0; + // Called by mipIO when it starts its resolve pass for the given stage + virtual void beginResolve(EShLanguage stage) = 0; + // Called by mapIO when it has finished the resolve pass + virtual void endResolve(EShLanguage stage) = 0; + // Called by mapIO when it starts its symbol collect for teh given stage + virtual void beginCollect(EShLanguage stage) = 0; + // Called by mapIO when it has finished the symbol collect + virtual void endCollect(EShLanguage stage) = 0; + // Called by TSlotCollector to resolve storage locations or bindings + virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; + // Called by TSlotCollector to resolve resource locations or bindings + virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; + // Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline + virtual void addStage(EShLanguage stage) = 0; +}; + +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE + +// Make one TProgram per set of shaders that will get linked together. Add all +// the shaders that are to be linked together. After calling shader.parse() +// for all shaders, call link(). +// +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. +// +class TProgram { +public: + GLSLANG_EXPORT TProgram(); + GLSLANG_EXPORT virtual ~TProgram(); + void addShader(TShader* shader) { stages[shader->stage].push_back(shader); } + std::list& getShaders(EShLanguage stage) { return stages[stage]; } + // Link Validation interface + GLSLANG_EXPORT bool link(EShMessages); + GLSLANG_EXPORT const char* getInfoLog(); + GLSLANG_EXPORT const char* getInfoDebugLog(); + + TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; } + +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + + // Reflection Interface + + // call first, to do liveness analysis, index mapping, etc.; returns false on failure + GLSLANG_EXPORT bool buildReflection(int opts = EShReflectionDefault); + GLSLANG_EXPORT unsigned getLocalSize(int dim) const; // return dim'th local size + GLSLANG_EXPORT int getReflectionIndex(const char *name) const; + GLSLANG_EXPORT int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const; + GLSLANG_EXPORT int getNumUniformVariables() const; + GLSLANG_EXPORT const TObjectReflection& getUniform(int index) const; + GLSLANG_EXPORT int getNumUniformBlocks() const; + GLSLANG_EXPORT const TObjectReflection& getUniformBlock(int index) const; + GLSLANG_EXPORT int getNumPipeInputs() const; + GLSLANG_EXPORT const TObjectReflection& getPipeInput(int index) const; + GLSLANG_EXPORT int getNumPipeOutputs() const; + GLSLANG_EXPORT const TObjectReflection& getPipeOutput(int index) const; + GLSLANG_EXPORT int getNumBufferVariables() const; + GLSLANG_EXPORT const TObjectReflection& getBufferVariable(int index) const; + GLSLANG_EXPORT int getNumBufferBlocks() const; + GLSLANG_EXPORT const TObjectReflection& getBufferBlock(int index) const; + GLSLANG_EXPORT int getNumAtomicCounters() const; + GLSLANG_EXPORT const TObjectReflection& getAtomicCounter(int index) const; + + // Legacy Reflection Interface - expressed in terms of above interface + + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS) + int getNumLiveUniformVariables() const { return getNumUniformVariables(); } + + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS) + int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); } + + // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES) + int getNumLiveAttributes() const { return getNumPipeInputs(); } + + // can be used for glGetUniformIndices() + int getUniformIndex(const char *name) const { return getReflectionIndex(name); } + + int getPipeIOIndex(const char *name, const bool inOrOut) const + { return getReflectionPipeIOIndex(name, inOrOut); } + + // can be used for "name" part of glGetActiveUniform() + const char *getUniformName(int index) const { return getUniform(index).name.c_str(); } + + // returns the binding number + int getUniformBinding(int index) const { return getUniform(index).getBinding(); } + + // returns Shaders Stages where a Uniform is present + EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX) + int getUniformBlockIndex(int index) const { return getUniform(index).index; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE) + int getUniformType(int index) const { return getUniform(index).glDefineType; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET) + int getUniformBufferOffset(int index) const { return getUniform(index).offset; } + + // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE) + int getUniformArraySize(int index) const { return getUniform(index).size; } + + // returns a TType* + const TType *getUniformTType(int index) const { return getUniform(index).getType(); } + + // can be used for glGetActiveUniformBlockName() + const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); } + + // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE) + int getUniformBlockSize(int index) const { return getUniformBlock(index).size; } + + // returns the block binding number + int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); } + + // returns block index of associated counter. + int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; } + + // returns a TType* + const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); } + + // can be used for glGetActiveAttrib() + const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); } + + // can be used for glGetActiveAttrib() + int getAttributeType(int index) const { return getPipeInput(index).glDefineType; } + + // returns a TType* + const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); } + + GLSLANG_EXPORT void dumpReflection(); + // I/O mapping: apply base offsets and map live unbound variables + // If resolver is not provided it uses the previous approach + // and respects auto assignment and offsets. + GLSLANG_EXPORT bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr); +#endif // !GLSLANG_WEB && !GLSLANG_ANGLE + +protected: + GLSLANG_EXPORT bool linkStage(EShLanguage, EShMessages); + + TPoolAllocator* pool; + std::list stages[EShLangCount]; + TIntermediate* intermediate[EShLangCount]; + bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage + TInfoSink* infoSink; +#if !defined(GLSLANG_WEB) && !defined(GLSLANG_ANGLE) + TReflection* reflection; +#endif + bool linked; + +private: + TProgram(TProgram&); + TProgram& operator=(TProgram&); +}; + +} // end namespace glslang + +#endif // _COMPILER_INTERFACE_INCLUDED_ diff --git a/src/third_party/glslang/glslang/build_info.h b/src/third_party/glslang/glslang/build_info.h new file mode 100644 index 0000000..319bfa5 --- /dev/null +++ b/src/third_party/glslang/glslang/build_info.h @@ -0,0 +1,62 @@ +// Copyright (C) 2020 The Khronos Group Inc. +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// +// Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// +// Neither the name of The Khronos Group Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#ifndef GLSLANG_BUILD_INFO +#define GLSLANG_BUILD_INFO + +#define GLSLANG_VERSION_MAJOR 11 +#define GLSLANG_VERSION_MINOR 0 +#define GLSLANG_VERSION_PATCH 0 +#define GLSLANG_VERSION_FLAVOR "" + +#define GLSLANG_VERSION_GREATER_THAN(major, minor, patch) \ + (((major) > GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \ + (((minor) > GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \ + ((patch) > GLSLANG_VERSION_PATCH))))) + +#define GLSLANG_VERSION_GREATER_OR_EQUAL_TO(major, minor, patch) \ + (((major) > GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \ + (((minor) > GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \ + ((patch) >= GLSLANG_VERSION_PATCH))))) + +#define GLSLANG_VERSION_LESS_THAN(major, minor, patch) \ + (((major) < GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \ + (((minor) < GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \ + ((patch) < GLSLANG_VERSION_PATCH))))) + +#define GLSLANG_VERSION_LESS_OR_EQUAL_TO(major, minor, patch) \ + (((major) < GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \ + (((minor) < GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \ + ((patch) <= GLSLANG_VERSION_PATCH))))) + +#endif // GLSLANG_BUILD_INFO diff --git a/src/third_party/spirv-reflect/include/spirv/unified1/spirv.h b/src/third_party/spirv-reflect/include/spirv/unified1/spirv.h new file mode 100644 index 0000000..a61a2d2 --- /dev/null +++ b/src/third_party/spirv-reflect/include/spirv/unified1/spirv.h @@ -0,0 +1,1077 @@ +/* +** Copyright (c) 2014-2018 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +/* +** This header is automatically generated by the same tool that creates +** the Binary Section of the SPIR-V specification. +*/ + +/* +** Enumeration tokens for SPIR-V, in various styles: +** C, C++, C++11, JSON, Lua, Python +** +** - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +** - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +** - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +** - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +** - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +** +** Some tokens act like mask values, which can be OR'd together, +** while others are mutually exclusive. The mask-like ones have +** "Mask" in their name, and a parallel enum that has the shift +** amount (1 << x) for each corresponding enumerant. +*/ + +#ifndef spirv_H +#define spirv_H + +typedef unsigned int SpvId; + +#define SPV_VERSION 0x10300 +#define SPV_REVISION 1 + +static const unsigned int SpvMagicNumber = 0x07230203; +static const unsigned int SpvVersion = 0x00010300; +static const unsigned int SpvRevision = 1; +static const unsigned int SpvOpCodeMask = 0xffff; +static const unsigned int SpvWordCountShift = 16; + +typedef enum SpvSourceLanguage_ { + SpvSourceLanguageUnknown = 0, + SpvSourceLanguageESSL = 1, + SpvSourceLanguageGLSL = 2, + SpvSourceLanguageOpenCL_C = 3, + SpvSourceLanguageOpenCL_CPP = 4, + SpvSourceLanguageHLSL = 5, + SpvSourceLanguageMax = 0x7fffffff, +} SpvSourceLanguage; + +typedef enum SpvExecutionModel_ { + SpvExecutionModelVertex = 0, + SpvExecutionModelTessellationControl = 1, + SpvExecutionModelTessellationEvaluation = 2, + SpvExecutionModelGeometry = 3, + SpvExecutionModelFragment = 4, + SpvExecutionModelGLCompute = 5, + SpvExecutionModelKernel = 6, + SpvExecutionModelMax = 0x7fffffff, +} SpvExecutionModel; + +typedef enum SpvAddressingModel_ { + SpvAddressingModelLogical = 0, + SpvAddressingModelPhysical32 = 1, + SpvAddressingModelPhysical64 = 2, + SpvAddressingModelMax = 0x7fffffff, +} SpvAddressingModel; + +typedef enum SpvMemoryModel_ { + SpvMemoryModelSimple = 0, + SpvMemoryModelGLSL450 = 1, + SpvMemoryModelOpenCL = 2, + SpvMemoryModelMax = 0x7fffffff, +} SpvMemoryModel; + +typedef enum SpvExecutionMode_ { + SpvExecutionModeInvocations = 0, + SpvExecutionModeSpacingEqual = 1, + SpvExecutionModeSpacingFractionalEven = 2, + SpvExecutionModeSpacingFractionalOdd = 3, + SpvExecutionModeVertexOrderCw = 4, + SpvExecutionModeVertexOrderCcw = 5, + SpvExecutionModePixelCenterInteger = 6, + SpvExecutionModeOriginUpperLeft = 7, + SpvExecutionModeOriginLowerLeft = 8, + SpvExecutionModeEarlyFragmentTests = 9, + SpvExecutionModePointMode = 10, + SpvExecutionModeXfb = 11, + SpvExecutionModeDepthReplacing = 12, + SpvExecutionModeDepthGreater = 14, + SpvExecutionModeDepthLess = 15, + SpvExecutionModeDepthUnchanged = 16, + SpvExecutionModeLocalSize = 17, + SpvExecutionModeLocalSizeHint = 18, + SpvExecutionModeInputPoints = 19, + SpvExecutionModeInputLines = 20, + SpvExecutionModeInputLinesAdjacency = 21, + SpvExecutionModeTriangles = 22, + SpvExecutionModeInputTrianglesAdjacency = 23, + SpvExecutionModeQuads = 24, + SpvExecutionModeIsolines = 25, + SpvExecutionModeOutputVertices = 26, + SpvExecutionModeOutputPoints = 27, + SpvExecutionModeOutputLineStrip = 28, + SpvExecutionModeOutputTriangleStrip = 29, + SpvExecutionModeVecTypeHint = 30, + SpvExecutionModeContractionOff = 31, + SpvExecutionModeInitializer = 33, + SpvExecutionModeFinalizer = 34, + SpvExecutionModeSubgroupSize = 35, + SpvExecutionModeSubgroupsPerWorkgroup = 36, + SpvExecutionModeSubgroupsPerWorkgroupId = 37, + SpvExecutionModeLocalSizeId = 38, + SpvExecutionModeLocalSizeHintId = 39, + SpvExecutionModePostDepthCoverage = 4446, + SpvExecutionModeStencilRefReplacingEXT = 5027, + SpvExecutionModeMax = 0x7fffffff, +} SpvExecutionMode; + +typedef enum SpvStorageClass_ { + SpvStorageClassUniformConstant = 0, + SpvStorageClassInput = 1, + SpvStorageClassUniform = 2, + SpvStorageClassOutput = 3, + SpvStorageClassWorkgroup = 4, + SpvStorageClassCrossWorkgroup = 5, + SpvStorageClassPrivate = 6, + SpvStorageClassFunction = 7, + SpvStorageClassGeneric = 8, + SpvStorageClassPushConstant = 9, + SpvStorageClassAtomicCounter = 10, + SpvStorageClassImage = 11, + SpvStorageClassStorageBuffer = 12, + SpvStorageClassMax = 0x7fffffff, +} SpvStorageClass; + +typedef enum SpvDim_ { + SpvDim1D = 0, + SpvDim2D = 1, + SpvDim3D = 2, + SpvDimCube = 3, + SpvDimRect = 4, + SpvDimBuffer = 5, + SpvDimSubpassData = 6, + SpvDimMax = 0x7fffffff, +} SpvDim; + +typedef enum SpvSamplerAddressingMode_ { + SpvSamplerAddressingModeNone = 0, + SpvSamplerAddressingModeClampToEdge = 1, + SpvSamplerAddressingModeClamp = 2, + SpvSamplerAddressingModeRepeat = 3, + SpvSamplerAddressingModeRepeatMirrored = 4, + SpvSamplerAddressingModeMax = 0x7fffffff, +} SpvSamplerAddressingMode; + +typedef enum SpvSamplerFilterMode_ { + SpvSamplerFilterModeNearest = 0, + SpvSamplerFilterModeLinear = 1, + SpvSamplerFilterModeMax = 0x7fffffff, +} SpvSamplerFilterMode; + +typedef enum SpvImageFormat_ { + SpvImageFormatUnknown = 0, + SpvImageFormatRgba32f = 1, + SpvImageFormatRgba16f = 2, + SpvImageFormatR32f = 3, + SpvImageFormatRgba8 = 4, + SpvImageFormatRgba8Snorm = 5, + SpvImageFormatRg32f = 6, + SpvImageFormatRg16f = 7, + SpvImageFormatR11fG11fB10f = 8, + SpvImageFormatR16f = 9, + SpvImageFormatRgba16 = 10, + SpvImageFormatRgb10A2 = 11, + SpvImageFormatRg16 = 12, + SpvImageFormatRg8 = 13, + SpvImageFormatR16 = 14, + SpvImageFormatR8 = 15, + SpvImageFormatRgba16Snorm = 16, + SpvImageFormatRg16Snorm = 17, + SpvImageFormatRg8Snorm = 18, + SpvImageFormatR16Snorm = 19, + SpvImageFormatR8Snorm = 20, + SpvImageFormatRgba32i = 21, + SpvImageFormatRgba16i = 22, + SpvImageFormatRgba8i = 23, + SpvImageFormatR32i = 24, + SpvImageFormatRg32i = 25, + SpvImageFormatRg16i = 26, + SpvImageFormatRg8i = 27, + SpvImageFormatR16i = 28, + SpvImageFormatR8i = 29, + SpvImageFormatRgba32ui = 30, + SpvImageFormatRgba16ui = 31, + SpvImageFormatRgba8ui = 32, + SpvImageFormatR32ui = 33, + SpvImageFormatRgb10a2ui = 34, + SpvImageFormatRg32ui = 35, + SpvImageFormatRg16ui = 36, + SpvImageFormatRg8ui = 37, + SpvImageFormatR16ui = 38, + SpvImageFormatR8ui = 39, + SpvImageFormatMax = 0x7fffffff, +} SpvImageFormat; + +typedef enum SpvImageChannelOrder_ { + SpvImageChannelOrderR = 0, + SpvImageChannelOrderA = 1, + SpvImageChannelOrderRG = 2, + SpvImageChannelOrderRA = 3, + SpvImageChannelOrderRGB = 4, + SpvImageChannelOrderRGBA = 5, + SpvImageChannelOrderBGRA = 6, + SpvImageChannelOrderARGB = 7, + SpvImageChannelOrderIntensity = 8, + SpvImageChannelOrderLuminance = 9, + SpvImageChannelOrderRx = 10, + SpvImageChannelOrderRGx = 11, + SpvImageChannelOrderRGBx = 12, + SpvImageChannelOrderDepth = 13, + SpvImageChannelOrderDepthStencil = 14, + SpvImageChannelOrdersRGB = 15, + SpvImageChannelOrdersRGBx = 16, + SpvImageChannelOrdersRGBA = 17, + SpvImageChannelOrdersBGRA = 18, + SpvImageChannelOrderABGR = 19, + SpvImageChannelOrderMax = 0x7fffffff, +} SpvImageChannelOrder; + +typedef enum SpvImageChannelDataType_ { + SpvImageChannelDataTypeSnormInt8 = 0, + SpvImageChannelDataTypeSnormInt16 = 1, + SpvImageChannelDataTypeUnormInt8 = 2, + SpvImageChannelDataTypeUnormInt16 = 3, + SpvImageChannelDataTypeUnormShort565 = 4, + SpvImageChannelDataTypeUnormShort555 = 5, + SpvImageChannelDataTypeUnormInt101010 = 6, + SpvImageChannelDataTypeSignedInt8 = 7, + SpvImageChannelDataTypeSignedInt16 = 8, + SpvImageChannelDataTypeSignedInt32 = 9, + SpvImageChannelDataTypeUnsignedInt8 = 10, + SpvImageChannelDataTypeUnsignedInt16 = 11, + SpvImageChannelDataTypeUnsignedInt32 = 12, + SpvImageChannelDataTypeHalfFloat = 13, + SpvImageChannelDataTypeFloat = 14, + SpvImageChannelDataTypeUnormInt24 = 15, + SpvImageChannelDataTypeUnormInt101010_2 = 16, + SpvImageChannelDataTypeMax = 0x7fffffff, +} SpvImageChannelDataType; + +typedef enum SpvImageOperandsShift_ { + SpvImageOperandsBiasShift = 0, + SpvImageOperandsLodShift = 1, + SpvImageOperandsGradShift = 2, + SpvImageOperandsConstOffsetShift = 3, + SpvImageOperandsOffsetShift = 4, + SpvImageOperandsConstOffsetsShift = 5, + SpvImageOperandsSampleShift = 6, + SpvImageOperandsMinLodShift = 7, + SpvImageOperandsMax = 0x7fffffff, +} SpvImageOperandsShift; + +typedef enum SpvImageOperandsMask_ { + SpvImageOperandsMaskNone = 0, + SpvImageOperandsBiasMask = 0x00000001, + SpvImageOperandsLodMask = 0x00000002, + SpvImageOperandsGradMask = 0x00000004, + SpvImageOperandsConstOffsetMask = 0x00000008, + SpvImageOperandsOffsetMask = 0x00000010, + SpvImageOperandsConstOffsetsMask = 0x00000020, + SpvImageOperandsSampleMask = 0x00000040, + SpvImageOperandsMinLodMask = 0x00000080, +} SpvImageOperandsMask; + +typedef enum SpvFPFastMathModeShift_ { + SpvFPFastMathModeNotNaNShift = 0, + SpvFPFastMathModeNotInfShift = 1, + SpvFPFastMathModeNSZShift = 2, + SpvFPFastMathModeAllowRecipShift = 3, + SpvFPFastMathModeFastShift = 4, + SpvFPFastMathModeMax = 0x7fffffff, +} SpvFPFastMathModeShift; + +typedef enum SpvFPFastMathModeMask_ { + SpvFPFastMathModeMaskNone = 0, + SpvFPFastMathModeNotNaNMask = 0x00000001, + SpvFPFastMathModeNotInfMask = 0x00000002, + SpvFPFastMathModeNSZMask = 0x00000004, + SpvFPFastMathModeAllowRecipMask = 0x00000008, + SpvFPFastMathModeFastMask = 0x00000010, +} SpvFPFastMathModeMask; + +typedef enum SpvFPRoundingMode_ { + SpvFPRoundingModeRTE = 0, + SpvFPRoundingModeRTZ = 1, + SpvFPRoundingModeRTP = 2, + SpvFPRoundingModeRTN = 3, + SpvFPRoundingModeMax = 0x7fffffff, +} SpvFPRoundingMode; + +typedef enum SpvLinkageType_ { + SpvLinkageTypeExport = 0, + SpvLinkageTypeImport = 1, + SpvLinkageTypeMax = 0x7fffffff, +} SpvLinkageType; + +typedef enum SpvAccessQualifier_ { + SpvAccessQualifierReadOnly = 0, + SpvAccessQualifierWriteOnly = 1, + SpvAccessQualifierReadWrite = 2, + SpvAccessQualifierMax = 0x7fffffff, +} SpvAccessQualifier; + +typedef enum SpvFunctionParameterAttribute_ { + SpvFunctionParameterAttributeZext = 0, + SpvFunctionParameterAttributeSext = 1, + SpvFunctionParameterAttributeByVal = 2, + SpvFunctionParameterAttributeSret = 3, + SpvFunctionParameterAttributeNoAlias = 4, + SpvFunctionParameterAttributeNoCapture = 5, + SpvFunctionParameterAttributeNoWrite = 6, + SpvFunctionParameterAttributeNoReadWrite = 7, + SpvFunctionParameterAttributeMax = 0x7fffffff, +} SpvFunctionParameterAttribute; + +typedef enum SpvDecoration_ { + SpvDecorationRelaxedPrecision = 0, + SpvDecorationSpecId = 1, + SpvDecorationBlock = 2, + SpvDecorationBufferBlock = 3, + SpvDecorationRowMajor = 4, + SpvDecorationColMajor = 5, + SpvDecorationArrayStride = 6, + SpvDecorationMatrixStride = 7, + SpvDecorationGLSLShared = 8, + SpvDecorationGLSLPacked = 9, + SpvDecorationCPacked = 10, + SpvDecorationBuiltIn = 11, + SpvDecorationNoPerspective = 13, + SpvDecorationFlat = 14, + SpvDecorationPatch = 15, + SpvDecorationCentroid = 16, + SpvDecorationSample = 17, + SpvDecorationInvariant = 18, + SpvDecorationRestrict = 19, + SpvDecorationAliased = 20, + SpvDecorationVolatile = 21, + SpvDecorationConstant = 22, + SpvDecorationCoherent = 23, + SpvDecorationNonWritable = 24, + SpvDecorationNonReadable = 25, + SpvDecorationUniform = 26, + SpvDecorationSaturatedConversion = 28, + SpvDecorationStream = 29, + SpvDecorationLocation = 30, + SpvDecorationComponent = 31, + SpvDecorationIndex = 32, + SpvDecorationBinding = 33, + SpvDecorationDescriptorSet = 34, + SpvDecorationOffset = 35, + SpvDecorationXfbBuffer = 36, + SpvDecorationXfbStride = 37, + SpvDecorationFuncParamAttr = 38, + SpvDecorationFPRoundingMode = 39, + SpvDecorationFPFastMathMode = 40, + SpvDecorationLinkageAttributes = 41, + SpvDecorationNoContraction = 42, + SpvDecorationInputAttachmentIndex = 43, + SpvDecorationAlignment = 44, + SpvDecorationMaxByteOffset = 45, + SpvDecorationAlignmentId = 46, + SpvDecorationMaxByteOffsetId = 47, + SpvDecorationExplicitInterpAMD = 4999, + SpvDecorationOverrideCoverageNV = 5248, + SpvDecorationPassthroughNV = 5250, + SpvDecorationViewportRelativeNV = 5252, + SpvDecorationSecondaryViewportRelativeNV = 5256, + SpvDecorationHlslCounterBufferGOOGLE = 5634, + SpvDecorationHlslSemanticGOOGLE = 5635, + SpvDecorationMax = 0x7fffffff, +} SpvDecoration; + +typedef enum SpvBuiltIn_ { + SpvBuiltInPosition = 0, + SpvBuiltInPointSize = 1, + SpvBuiltInClipDistance = 3, + SpvBuiltInCullDistance = 4, + SpvBuiltInVertexId = 5, + SpvBuiltInInstanceId = 6, + SpvBuiltInPrimitiveId = 7, + SpvBuiltInInvocationId = 8, + SpvBuiltInLayer = 9, + SpvBuiltInViewportIndex = 10, + SpvBuiltInTessLevelOuter = 11, + SpvBuiltInTessLevelInner = 12, + SpvBuiltInTessCoord = 13, + SpvBuiltInPatchVertices = 14, + SpvBuiltInFragCoord = 15, + SpvBuiltInPointCoord = 16, + SpvBuiltInFrontFacing = 17, + SpvBuiltInSampleId = 18, + SpvBuiltInSamplePosition = 19, + SpvBuiltInSampleMask = 20, + SpvBuiltInFragDepth = 22, + SpvBuiltInHelperInvocation = 23, + SpvBuiltInNumWorkgroups = 24, + SpvBuiltInWorkgroupSize = 25, + SpvBuiltInWorkgroupId = 26, + SpvBuiltInLocalInvocationId = 27, + SpvBuiltInGlobalInvocationId = 28, + SpvBuiltInLocalInvocationIndex = 29, + SpvBuiltInWorkDim = 30, + SpvBuiltInGlobalSize = 31, + SpvBuiltInEnqueuedWorkgroupSize = 32, + SpvBuiltInGlobalOffset = 33, + SpvBuiltInGlobalLinearId = 34, + SpvBuiltInSubgroupSize = 36, + SpvBuiltInSubgroupMaxSize = 37, + SpvBuiltInNumSubgroups = 38, + SpvBuiltInNumEnqueuedSubgroups = 39, + SpvBuiltInSubgroupId = 40, + SpvBuiltInSubgroupLocalInvocationId = 41, + SpvBuiltInVertexIndex = 42, + SpvBuiltInInstanceIndex = 43, + SpvBuiltInSubgroupEqMask = 4416, + SpvBuiltInSubgroupEqMaskKHR = 4416, + SpvBuiltInSubgroupGeMask = 4417, + SpvBuiltInSubgroupGeMaskKHR = 4417, + SpvBuiltInSubgroupGtMask = 4418, + SpvBuiltInSubgroupGtMaskKHR = 4418, + SpvBuiltInSubgroupLeMask = 4419, + SpvBuiltInSubgroupLeMaskKHR = 4419, + SpvBuiltInSubgroupLtMask = 4420, + SpvBuiltInSubgroupLtMaskKHR = 4420, + SpvBuiltInBaseVertex = 4424, + SpvBuiltInBaseInstance = 4425, + SpvBuiltInDrawIndex = 4426, + SpvBuiltInDeviceIndex = 4438, + SpvBuiltInViewIndex = 4440, + SpvBuiltInBaryCoordNoPerspAMD = 4992, + SpvBuiltInBaryCoordNoPerspCentroidAMD = 4993, + SpvBuiltInBaryCoordNoPerspSampleAMD = 4994, + SpvBuiltInBaryCoordSmoothAMD = 4995, + SpvBuiltInBaryCoordSmoothCentroidAMD = 4996, + SpvBuiltInBaryCoordSmoothSampleAMD = 4997, + SpvBuiltInBaryCoordPullModelAMD = 4998, + SpvBuiltInFragStencilRefEXT = 5014, + SpvBuiltInViewportMaskNV = 5253, + SpvBuiltInSecondaryPositionNV = 5257, + SpvBuiltInSecondaryViewportMaskNV = 5258, + SpvBuiltInPositionPerViewNV = 5261, + SpvBuiltInViewportMaskPerViewNV = 5262, + SpvBuiltInFullyCoveredEXT = 5264, + SpvBuiltInMax = 0x7fffffff, +} SpvBuiltIn; + +typedef enum SpvSelectionControlShift_ { + SpvSelectionControlFlattenShift = 0, + SpvSelectionControlDontFlattenShift = 1, + SpvSelectionControlMax = 0x7fffffff, +} SpvSelectionControlShift; + +typedef enum SpvSelectionControlMask_ { + SpvSelectionControlMaskNone = 0, + SpvSelectionControlFlattenMask = 0x00000001, + SpvSelectionControlDontFlattenMask = 0x00000002, +} SpvSelectionControlMask; + +typedef enum SpvLoopControlShift_ { + SpvLoopControlUnrollShift = 0, + SpvLoopControlDontUnrollShift = 1, + SpvLoopControlDependencyInfiniteShift = 2, + SpvLoopControlDependencyLengthShift = 3, + SpvLoopControlMax = 0x7fffffff, +} SpvLoopControlShift; + +typedef enum SpvLoopControlMask_ { + SpvLoopControlMaskNone = 0, + SpvLoopControlUnrollMask = 0x00000001, + SpvLoopControlDontUnrollMask = 0x00000002, + SpvLoopControlDependencyInfiniteMask = 0x00000004, + SpvLoopControlDependencyLengthMask = 0x00000008, +} SpvLoopControlMask; + +typedef enum SpvFunctionControlShift_ { + SpvFunctionControlInlineShift = 0, + SpvFunctionControlDontInlineShift = 1, + SpvFunctionControlPureShift = 2, + SpvFunctionControlConstShift = 3, + SpvFunctionControlMax = 0x7fffffff, +} SpvFunctionControlShift; + +typedef enum SpvFunctionControlMask_ { + SpvFunctionControlMaskNone = 0, + SpvFunctionControlInlineMask = 0x00000001, + SpvFunctionControlDontInlineMask = 0x00000002, + SpvFunctionControlPureMask = 0x00000004, + SpvFunctionControlConstMask = 0x00000008, +} SpvFunctionControlMask; + +typedef enum SpvMemorySemanticsShift_ { + SpvMemorySemanticsAcquireShift = 1, + SpvMemorySemanticsReleaseShift = 2, + SpvMemorySemanticsAcquireReleaseShift = 3, + SpvMemorySemanticsSequentiallyConsistentShift = 4, + SpvMemorySemanticsUniformMemoryShift = 6, + SpvMemorySemanticsSubgroupMemoryShift = 7, + SpvMemorySemanticsWorkgroupMemoryShift = 8, + SpvMemorySemanticsCrossWorkgroupMemoryShift = 9, + SpvMemorySemanticsAtomicCounterMemoryShift = 10, + SpvMemorySemanticsImageMemoryShift = 11, + SpvMemorySemanticsMax = 0x7fffffff, +} SpvMemorySemanticsShift; + +typedef enum SpvMemorySemanticsMask_ { + SpvMemorySemanticsMaskNone = 0, + SpvMemorySemanticsAcquireMask = 0x00000002, + SpvMemorySemanticsReleaseMask = 0x00000004, + SpvMemorySemanticsAcquireReleaseMask = 0x00000008, + SpvMemorySemanticsSequentiallyConsistentMask = 0x00000010, + SpvMemorySemanticsUniformMemoryMask = 0x00000040, + SpvMemorySemanticsSubgroupMemoryMask = 0x00000080, + SpvMemorySemanticsWorkgroupMemoryMask = 0x00000100, + SpvMemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + SpvMemorySemanticsAtomicCounterMemoryMask = 0x00000400, + SpvMemorySemanticsImageMemoryMask = 0x00000800, +} SpvMemorySemanticsMask; + +typedef enum SpvMemoryAccessShift_ { + SpvMemoryAccessVolatileShift = 0, + SpvMemoryAccessAlignedShift = 1, + SpvMemoryAccessNontemporalShift = 2, + SpvMemoryAccessMax = 0x7fffffff, +} SpvMemoryAccessShift; + +typedef enum SpvMemoryAccessMask_ { + SpvMemoryAccessMaskNone = 0, + SpvMemoryAccessVolatileMask = 0x00000001, + SpvMemoryAccessAlignedMask = 0x00000002, + SpvMemoryAccessNontemporalMask = 0x00000004, +} SpvMemoryAccessMask; + +typedef enum SpvScope_ { + SpvScopeCrossDevice = 0, + SpvScopeDevice = 1, + SpvScopeWorkgroup = 2, + SpvScopeSubgroup = 3, + SpvScopeInvocation = 4, + SpvScopeMax = 0x7fffffff, +} SpvScope; + +typedef enum SpvGroupOperation_ { + SpvGroupOperationReduce = 0, + SpvGroupOperationInclusiveScan = 1, + SpvGroupOperationExclusiveScan = 2, + SpvGroupOperationClusteredReduce = 3, + SpvGroupOperationPartitionedReduceNV = 6, + SpvGroupOperationPartitionedInclusiveScanNV = 7, + SpvGroupOperationPartitionedExclusiveScanNV = 8, + SpvGroupOperationMax = 0x7fffffff, +} SpvGroupOperation; + +typedef enum SpvKernelEnqueueFlags_ { + SpvKernelEnqueueFlagsNoWait = 0, + SpvKernelEnqueueFlagsWaitKernel = 1, + SpvKernelEnqueueFlagsWaitWorkGroup = 2, + SpvKernelEnqueueFlagsMax = 0x7fffffff, +} SpvKernelEnqueueFlags; + +typedef enum SpvKernelProfilingInfoShift_ { + SpvKernelProfilingInfoCmdExecTimeShift = 0, + SpvKernelProfilingInfoMax = 0x7fffffff, +} SpvKernelProfilingInfoShift; + +typedef enum SpvKernelProfilingInfoMask_ { + SpvKernelProfilingInfoMaskNone = 0, + SpvKernelProfilingInfoCmdExecTimeMask = 0x00000001, +} SpvKernelProfilingInfoMask; + +typedef enum SpvCapability_ { + SpvCapabilityMatrix = 0, + SpvCapabilityShader = 1, + SpvCapabilityGeometry = 2, + SpvCapabilityTessellation = 3, + SpvCapabilityAddresses = 4, + SpvCapabilityLinkage = 5, + SpvCapabilityKernel = 6, + SpvCapabilityVector16 = 7, + SpvCapabilityFloat16Buffer = 8, + SpvCapabilityFloat16 = 9, + SpvCapabilityFloat64 = 10, + SpvCapabilityInt64 = 11, + SpvCapabilityInt64Atomics = 12, + SpvCapabilityImageBasic = 13, + SpvCapabilityImageReadWrite = 14, + SpvCapabilityImageMipmap = 15, + SpvCapabilityPipes = 17, + SpvCapabilityGroups = 18, + SpvCapabilityDeviceEnqueue = 19, + SpvCapabilityLiteralSampler = 20, + SpvCapabilityAtomicStorage = 21, + SpvCapabilityInt16 = 22, + SpvCapabilityTessellationPointSize = 23, + SpvCapabilityGeometryPointSize = 24, + SpvCapabilityImageGatherExtended = 25, + SpvCapabilityStorageImageMultisample = 27, + SpvCapabilityUniformBufferArrayDynamicIndexing = 28, + SpvCapabilitySampledImageArrayDynamicIndexing = 29, + SpvCapabilityStorageBufferArrayDynamicIndexing = 30, + SpvCapabilityStorageImageArrayDynamicIndexing = 31, + SpvCapabilityClipDistance = 32, + SpvCapabilityCullDistance = 33, + SpvCapabilityImageCubeArray = 34, + SpvCapabilitySampleRateShading = 35, + SpvCapabilityImageRect = 36, + SpvCapabilitySampledRect = 37, + SpvCapabilityGenericPointer = 38, + SpvCapabilityInt8 = 39, + SpvCapabilityInputAttachment = 40, + SpvCapabilitySparseResidency = 41, + SpvCapabilityMinLod = 42, + SpvCapabilitySampled1D = 43, + SpvCapabilityImage1D = 44, + SpvCapabilitySampledCubeArray = 45, + SpvCapabilitySampledBuffer = 46, + SpvCapabilityImageBuffer = 47, + SpvCapabilityImageMSArray = 48, + SpvCapabilityStorageImageExtendedFormats = 49, + SpvCapabilityImageQuery = 50, + SpvCapabilityDerivativeControl = 51, + SpvCapabilityInterpolationFunction = 52, + SpvCapabilityTransformFeedback = 53, + SpvCapabilityGeometryStreams = 54, + SpvCapabilityStorageImageReadWithoutFormat = 55, + SpvCapabilityStorageImageWriteWithoutFormat = 56, + SpvCapabilityMultiViewport = 57, + SpvCapabilitySubgroupDispatch = 58, + SpvCapabilityNamedBarrier = 59, + SpvCapabilityPipeStorage = 60, + SpvCapabilityGroupNonUniform = 61, + SpvCapabilityGroupNonUniformVote = 62, + SpvCapabilityGroupNonUniformArithmetic = 63, + SpvCapabilityGroupNonUniformBallot = 64, + SpvCapabilityGroupNonUniformShuffle = 65, + SpvCapabilityGroupNonUniformShuffleRelative = 66, + SpvCapabilityGroupNonUniformClustered = 67, + SpvCapabilityGroupNonUniformQuad = 68, + SpvCapabilitySubgroupBallotKHR = 4423, + SpvCapabilityDrawParameters = 4427, + SpvCapabilitySubgroupVoteKHR = 4431, + SpvCapabilityStorageBuffer16BitAccess = 4433, + SpvCapabilityStorageUniformBufferBlock16 = 4433, + SpvCapabilityStorageUniform16 = 4434, + SpvCapabilityUniformAndStorageBuffer16BitAccess = 4434, + SpvCapabilityStoragePushConstant16 = 4435, + SpvCapabilityStorageInputOutput16 = 4436, + SpvCapabilityDeviceGroup = 4437, + SpvCapabilityMultiView = 4439, + SpvCapabilityVariablePointersStorageBuffer = 4441, + SpvCapabilityVariablePointers = 4442, + SpvCapabilityAtomicStorageOps = 4445, + SpvCapabilitySampleMaskPostDepthCoverage = 4447, + SpvCapabilityFloat16ImageAMD = 5008, + SpvCapabilityImageGatherBiasLodAMD = 5009, + SpvCapabilityFragmentMaskAMD = 5010, + SpvCapabilityStencilExportEXT = 5013, + SpvCapabilityImageReadWriteLodAMD = 5015, + SpvCapabilitySampleMaskOverrideCoverageNV = 5249, + SpvCapabilityGeometryShaderPassthroughNV = 5251, + SpvCapabilityShaderViewportIndexLayerEXT = 5254, + SpvCapabilityShaderViewportIndexLayerNV = 5254, + SpvCapabilityShaderViewportMaskNV = 5255, + SpvCapabilityShaderStereoViewNV = 5259, + SpvCapabilityPerViewAttributesNV = 5260, + SpvCapabilityFragmentFullyCoveredEXT = 5265, + SpvCapabilityGroupNonUniformPartitionedNV = 5297, + SpvCapabilitySubgroupShuffleINTEL = 5568, + SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, + SpvCapabilitySubgroupImageBlockIOINTEL = 5570, + SpvCapabilityMax = 0x7fffffff, +} SpvCapability; + +typedef enum SpvOp_ { + SpvOpNop = 0, + SpvOpUndef = 1, + SpvOpSourceContinued = 2, + SpvOpSource = 3, + SpvOpSourceExtension = 4, + SpvOpName = 5, + SpvOpMemberName = 6, + SpvOpString = 7, + SpvOpLine = 8, + SpvOpExtension = 10, + SpvOpExtInstImport = 11, + SpvOpExtInst = 12, + SpvOpMemoryModel = 14, + SpvOpEntryPoint = 15, + SpvOpExecutionMode = 16, + SpvOpCapability = 17, + SpvOpTypeVoid = 19, + SpvOpTypeBool = 20, + SpvOpTypeInt = 21, + SpvOpTypeFloat = 22, + SpvOpTypeVector = 23, + SpvOpTypeMatrix = 24, + SpvOpTypeImage = 25, + SpvOpTypeSampler = 26, + SpvOpTypeSampledImage = 27, + SpvOpTypeArray = 28, + SpvOpTypeRuntimeArray = 29, + SpvOpTypeStruct = 30, + SpvOpTypeOpaque = 31, + SpvOpTypePointer = 32, + SpvOpTypeFunction = 33, + SpvOpTypeEvent = 34, + SpvOpTypeDeviceEvent = 35, + SpvOpTypeReserveId = 36, + SpvOpTypeQueue = 37, + SpvOpTypePipe = 38, + SpvOpTypeForwardPointer = 39, + SpvOpConstantTrue = 41, + SpvOpConstantFalse = 42, + SpvOpConstant = 43, + SpvOpConstantComposite = 44, + SpvOpConstantSampler = 45, + SpvOpConstantNull = 46, + SpvOpSpecConstantTrue = 48, + SpvOpSpecConstantFalse = 49, + SpvOpSpecConstant = 50, + SpvOpSpecConstantComposite = 51, + SpvOpSpecConstantOp = 52, + SpvOpFunction = 54, + SpvOpFunctionParameter = 55, + SpvOpFunctionEnd = 56, + SpvOpFunctionCall = 57, + SpvOpVariable = 59, + SpvOpImageTexelPointer = 60, + SpvOpLoad = 61, + SpvOpStore = 62, + SpvOpCopyMemory = 63, + SpvOpCopyMemorySized = 64, + SpvOpAccessChain = 65, + SpvOpInBoundsAccessChain = 66, + SpvOpPtrAccessChain = 67, + SpvOpArrayLength = 68, + SpvOpGenericPtrMemSemantics = 69, + SpvOpInBoundsPtrAccessChain = 70, + SpvOpDecorate = 71, + SpvOpMemberDecorate = 72, + SpvOpDecorationGroup = 73, + SpvOpGroupDecorate = 74, + SpvOpGroupMemberDecorate = 75, + SpvOpVectorExtractDynamic = 77, + SpvOpVectorInsertDynamic = 78, + SpvOpVectorShuffle = 79, + SpvOpCompositeConstruct = 80, + SpvOpCompositeExtract = 81, + SpvOpCompositeInsert = 82, + SpvOpCopyObject = 83, + SpvOpTranspose = 84, + SpvOpSampledImage = 86, + SpvOpImageSampleImplicitLod = 87, + SpvOpImageSampleExplicitLod = 88, + SpvOpImageSampleDrefImplicitLod = 89, + SpvOpImageSampleDrefExplicitLod = 90, + SpvOpImageSampleProjImplicitLod = 91, + SpvOpImageSampleProjExplicitLod = 92, + SpvOpImageSampleProjDrefImplicitLod = 93, + SpvOpImageSampleProjDrefExplicitLod = 94, + SpvOpImageFetch = 95, + SpvOpImageGather = 96, + SpvOpImageDrefGather = 97, + SpvOpImageRead = 98, + SpvOpImageWrite = 99, + SpvOpImage = 100, + SpvOpImageQueryFormat = 101, + SpvOpImageQueryOrder = 102, + SpvOpImageQuerySizeLod = 103, + SpvOpImageQuerySize = 104, + SpvOpImageQueryLod = 105, + SpvOpImageQueryLevels = 106, + SpvOpImageQuerySamples = 107, + SpvOpConvertFToU = 109, + SpvOpConvertFToS = 110, + SpvOpConvertSToF = 111, + SpvOpConvertUToF = 112, + SpvOpUConvert = 113, + SpvOpSConvert = 114, + SpvOpFConvert = 115, + SpvOpQuantizeToF16 = 116, + SpvOpConvertPtrToU = 117, + SpvOpSatConvertSToU = 118, + SpvOpSatConvertUToS = 119, + SpvOpConvertUToPtr = 120, + SpvOpPtrCastToGeneric = 121, + SpvOpGenericCastToPtr = 122, + SpvOpGenericCastToPtrExplicit = 123, + SpvOpBitcast = 124, + SpvOpSNegate = 126, + SpvOpFNegate = 127, + SpvOpIAdd = 128, + SpvOpFAdd = 129, + SpvOpISub = 130, + SpvOpFSub = 131, + SpvOpIMul = 132, + SpvOpFMul = 133, + SpvOpUDiv = 134, + SpvOpSDiv = 135, + SpvOpFDiv = 136, + SpvOpUMod = 137, + SpvOpSRem = 138, + SpvOpSMod = 139, + SpvOpFRem = 140, + SpvOpFMod = 141, + SpvOpVectorTimesScalar = 142, + SpvOpMatrixTimesScalar = 143, + SpvOpVectorTimesMatrix = 144, + SpvOpMatrixTimesVector = 145, + SpvOpMatrixTimesMatrix = 146, + SpvOpOuterProduct = 147, + SpvOpDot = 148, + SpvOpIAddCarry = 149, + SpvOpISubBorrow = 150, + SpvOpUMulExtended = 151, + SpvOpSMulExtended = 152, + SpvOpAny = 154, + SpvOpAll = 155, + SpvOpIsNan = 156, + SpvOpIsInf = 157, + SpvOpIsFinite = 158, + SpvOpIsNormal = 159, + SpvOpSignBitSet = 160, + SpvOpLessOrGreater = 161, + SpvOpOrdered = 162, + SpvOpUnordered = 163, + SpvOpLogicalEqual = 164, + SpvOpLogicalNotEqual = 165, + SpvOpLogicalOr = 166, + SpvOpLogicalAnd = 167, + SpvOpLogicalNot = 168, + SpvOpSelect = 169, + SpvOpIEqual = 170, + SpvOpINotEqual = 171, + SpvOpUGreaterThan = 172, + SpvOpSGreaterThan = 173, + SpvOpUGreaterThanEqual = 174, + SpvOpSGreaterThanEqual = 175, + SpvOpULessThan = 176, + SpvOpSLessThan = 177, + SpvOpULessThanEqual = 178, + SpvOpSLessThanEqual = 179, + SpvOpFOrdEqual = 180, + SpvOpFUnordEqual = 181, + SpvOpFOrdNotEqual = 182, + SpvOpFUnordNotEqual = 183, + SpvOpFOrdLessThan = 184, + SpvOpFUnordLessThan = 185, + SpvOpFOrdGreaterThan = 186, + SpvOpFUnordGreaterThan = 187, + SpvOpFOrdLessThanEqual = 188, + SpvOpFUnordLessThanEqual = 189, + SpvOpFOrdGreaterThanEqual = 190, + SpvOpFUnordGreaterThanEqual = 191, + SpvOpShiftRightLogical = 194, + SpvOpShiftRightArithmetic = 195, + SpvOpShiftLeftLogical = 196, + SpvOpBitwiseOr = 197, + SpvOpBitwiseXor = 198, + SpvOpBitwiseAnd = 199, + SpvOpNot = 200, + SpvOpBitFieldInsert = 201, + SpvOpBitFieldSExtract = 202, + SpvOpBitFieldUExtract = 203, + SpvOpBitReverse = 204, + SpvOpBitCount = 205, + SpvOpDPdx = 207, + SpvOpDPdy = 208, + SpvOpFwidth = 209, + SpvOpDPdxFine = 210, + SpvOpDPdyFine = 211, + SpvOpFwidthFine = 212, + SpvOpDPdxCoarse = 213, + SpvOpDPdyCoarse = 214, + SpvOpFwidthCoarse = 215, + SpvOpEmitVertex = 218, + SpvOpEndPrimitive = 219, + SpvOpEmitStreamVertex = 220, + SpvOpEndStreamPrimitive = 221, + SpvOpControlBarrier = 224, + SpvOpMemoryBarrier = 225, + SpvOpAtomicLoad = 227, + SpvOpAtomicStore = 228, + SpvOpAtomicExchange = 229, + SpvOpAtomicCompareExchange = 230, + SpvOpAtomicCompareExchangeWeak = 231, + SpvOpAtomicIIncrement = 232, + SpvOpAtomicIDecrement = 233, + SpvOpAtomicIAdd = 234, + SpvOpAtomicISub = 235, + SpvOpAtomicSMin = 236, + SpvOpAtomicUMin = 237, + SpvOpAtomicSMax = 238, + SpvOpAtomicUMax = 239, + SpvOpAtomicAnd = 240, + SpvOpAtomicOr = 241, + SpvOpAtomicXor = 242, + SpvOpPhi = 245, + SpvOpLoopMerge = 246, + SpvOpSelectionMerge = 247, + SpvOpLabel = 248, + SpvOpBranch = 249, + SpvOpBranchConditional = 250, + SpvOpSwitch = 251, + SpvOpKill = 252, + SpvOpReturn = 253, + SpvOpReturnValue = 254, + SpvOpUnreachable = 255, + SpvOpLifetimeStart = 256, + SpvOpLifetimeStop = 257, + SpvOpGroupAsyncCopy = 259, + SpvOpGroupWaitEvents = 260, + SpvOpGroupAll = 261, + SpvOpGroupAny = 262, + SpvOpGroupBroadcast = 263, + SpvOpGroupIAdd = 264, + SpvOpGroupFAdd = 265, + SpvOpGroupFMin = 266, + SpvOpGroupUMin = 267, + SpvOpGroupSMin = 268, + SpvOpGroupFMax = 269, + SpvOpGroupUMax = 270, + SpvOpGroupSMax = 271, + SpvOpReadPipe = 274, + SpvOpWritePipe = 275, + SpvOpReservedReadPipe = 276, + SpvOpReservedWritePipe = 277, + SpvOpReserveReadPipePackets = 278, + SpvOpReserveWritePipePackets = 279, + SpvOpCommitReadPipe = 280, + SpvOpCommitWritePipe = 281, + SpvOpIsValidReserveId = 282, + SpvOpGetNumPipePackets = 283, + SpvOpGetMaxPipePackets = 284, + SpvOpGroupReserveReadPipePackets = 285, + SpvOpGroupReserveWritePipePackets = 286, + SpvOpGroupCommitReadPipe = 287, + SpvOpGroupCommitWritePipe = 288, + SpvOpEnqueueMarker = 291, + SpvOpEnqueueKernel = 292, + SpvOpGetKernelNDrangeSubGroupCount = 293, + SpvOpGetKernelNDrangeMaxSubGroupSize = 294, + SpvOpGetKernelWorkGroupSize = 295, + SpvOpGetKernelPreferredWorkGroupSizeMultiple = 296, + SpvOpRetainEvent = 297, + SpvOpReleaseEvent = 298, + SpvOpCreateUserEvent = 299, + SpvOpIsValidEvent = 300, + SpvOpSetUserEventStatus = 301, + SpvOpCaptureEventProfilingInfo = 302, + SpvOpGetDefaultQueue = 303, + SpvOpBuildNDRange = 304, + SpvOpImageSparseSampleImplicitLod = 305, + SpvOpImageSparseSampleExplicitLod = 306, + SpvOpImageSparseSampleDrefImplicitLod = 307, + SpvOpImageSparseSampleDrefExplicitLod = 308, + SpvOpImageSparseSampleProjImplicitLod = 309, + SpvOpImageSparseSampleProjExplicitLod = 310, + SpvOpImageSparseSampleProjDrefImplicitLod = 311, + SpvOpImageSparseSampleProjDrefExplicitLod = 312, + SpvOpImageSparseFetch = 313, + SpvOpImageSparseGather = 314, + SpvOpImageSparseDrefGather = 315, + SpvOpImageSparseTexelsResident = 316, + SpvOpNoLine = 317, + SpvOpAtomicFlagTestAndSet = 318, + SpvOpAtomicFlagClear = 319, + SpvOpImageSparseRead = 320, + SpvOpSizeOf = 321, + SpvOpTypePipeStorage = 322, + SpvOpConstantPipeStorage = 323, + SpvOpCreatePipeFromPipeStorage = 324, + SpvOpGetKernelLocalSizeForSubgroupCount = 325, + SpvOpGetKernelMaxNumSubgroups = 326, + SpvOpTypeNamedBarrier = 327, + SpvOpNamedBarrierInitialize = 328, + SpvOpMemoryNamedBarrier = 329, + SpvOpModuleProcessed = 330, + SpvOpExecutionModeId = 331, + SpvOpDecorateId = 332, + SpvOpGroupNonUniformElect = 333, + SpvOpGroupNonUniformAll = 334, + SpvOpGroupNonUniformAny = 335, + SpvOpGroupNonUniformAllEqual = 336, + SpvOpGroupNonUniformBroadcast = 337, + SpvOpGroupNonUniformBroadcastFirst = 338, + SpvOpGroupNonUniformBallot = 339, + SpvOpGroupNonUniformInverseBallot = 340, + SpvOpGroupNonUniformBallotBitExtract = 341, + SpvOpGroupNonUniformBallotBitCount = 342, + SpvOpGroupNonUniformBallotFindLSB = 343, + SpvOpGroupNonUniformBallotFindMSB = 344, + SpvOpGroupNonUniformShuffle = 345, + SpvOpGroupNonUniformShuffleXor = 346, + SpvOpGroupNonUniformShuffleUp = 347, + SpvOpGroupNonUniformShuffleDown = 348, + SpvOpGroupNonUniformIAdd = 349, + SpvOpGroupNonUniformFAdd = 350, + SpvOpGroupNonUniformIMul = 351, + SpvOpGroupNonUniformFMul = 352, + SpvOpGroupNonUniformSMin = 353, + SpvOpGroupNonUniformUMin = 354, + SpvOpGroupNonUniformFMin = 355, + SpvOpGroupNonUniformSMax = 356, + SpvOpGroupNonUniformUMax = 357, + SpvOpGroupNonUniformFMax = 358, + SpvOpGroupNonUniformBitwiseAnd = 359, + SpvOpGroupNonUniformBitwiseOr = 360, + SpvOpGroupNonUniformBitwiseXor = 361, + SpvOpGroupNonUniformLogicalAnd = 362, + SpvOpGroupNonUniformLogicalOr = 363, + SpvOpGroupNonUniformLogicalXor = 364, + SpvOpGroupNonUniformQuadBroadcast = 365, + SpvOpGroupNonUniformQuadSwap = 366, + SpvOpSubgroupBallotKHR = 4421, + SpvOpSubgroupFirstInvocationKHR = 4422, + SpvOpSubgroupAllKHR = 4428, + SpvOpSubgroupAnyKHR = 4429, + SpvOpSubgroupAllEqualKHR = 4430, + SpvOpSubgroupReadInvocationKHR = 4432, + SpvOpGroupIAddNonUniformAMD = 5000, + SpvOpGroupFAddNonUniformAMD = 5001, + SpvOpGroupFMinNonUniformAMD = 5002, + SpvOpGroupUMinNonUniformAMD = 5003, + SpvOpGroupSMinNonUniformAMD = 5004, + SpvOpGroupFMaxNonUniformAMD = 5005, + SpvOpGroupUMaxNonUniformAMD = 5006, + SpvOpGroupSMaxNonUniformAMD = 5007, + SpvOpFragmentMaskFetchAMD = 5011, + SpvOpFragmentFetchAMD = 5012, + SpvOpGroupNonUniformPartitionNV = 5296, + SpvOpSubgroupShuffleINTEL = 5571, + SpvOpSubgroupShuffleDownINTEL = 5572, + SpvOpSubgroupShuffleUpINTEL = 5573, + SpvOpSubgroupShuffleXorINTEL = 5574, + SpvOpSubgroupBlockReadINTEL = 5575, + SpvOpSubgroupBlockWriteINTEL = 5576, + SpvOpSubgroupImageBlockReadINTEL = 5577, + SpvOpSubgroupImageBlockWriteINTEL = 5578, + SpvOpDecorateStringGOOGLE = 5632, + SpvOpMemberDecorateStringGOOGLE = 5633, + SpvOpMax = 0x7fffffff, +} SpvOp; + +#endif // #ifndef spirv_H + diff --git a/src/third_party/spirv-reflect/spirv_reflect.c b/src/third_party/spirv-reflect/spirv_reflect.c new file mode 100644 index 0000000..3df963d --- /dev/null +++ b/src/third_party/spirv-reflect/spirv_reflect.c @@ -0,0 +1,4442 @@ +/* + Copyright 2017-2018 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +#include "spirv_reflect.h" +#include +#include +#include + +#if defined(WIN32) + #define _CRTDBG_MAP_ALLOC + #include + #include +#else + #include +#endif + +// Temporary enums until these make it into SPIR-V/Vulkan +// clang-format off +enum { + SpvReflectOpDecorateId = 332, + SpvReflectOpDecorateStringGOOGLE = 5632, + SpvReflectOpMemberDecorateStringGOOGLE = 5633, + SpvReflectDecorationHlslCounterBufferGOOGLE = 5634, + SpvReflectDecorationHlslSemanticGOOGLE = 5635 +}; +// clang-format on + +// clang-format off +enum { + SPIRV_STARTING_WORD_INDEX = 5, + SPIRV_WORD_SIZE = sizeof(uint32_t), + SPIRV_BYTE_WIDTH = 8, + SPIRV_MINIMUM_FILE_SIZE = SPIRV_STARTING_WORD_INDEX * SPIRV_WORD_SIZE, + SPIRV_DATA_ALIGNMENT = 4 * SPIRV_WORD_SIZE, // 16 + SPIRV_ACCESS_CHAIN_INDEX_OFFSET = 4, +}; +// clang-format on + +// clang-format off +enum { + INVALID_VALUE = 0xFFFFFFFF, +}; +// clang-format on + +// clang-format off +enum { + MAX_NODE_NAME_LENGTH = 1024, +}; +// clang-format on + +// clang-format off +enum { + IMAGE_SAMPLED = 1, + IMAGE_STORAGE = 2 +}; +// clang-format on + +// clang-format off +typedef struct ArrayTraits { + uint32_t element_type_id; + uint32_t length_id; +} ArrayTraits; +// clang-format on + +// clang-format off +typedef struct ImageTraits { + uint32_t sampled_type_id; + SpvDim dim; + uint32_t depth; + uint32_t arrayed; + uint32_t ms; + uint32_t sampled; + SpvImageFormat image_format; +} ImageTraits; +// clang-format on + +// clang-format off +typedef struct NumberDecoration { + uint32_t word_offset; + uint32_t value; +} NumberDecoration; +// clang-format on + +// clang-format off +typedef struct StringDecoration { + uint32_t word_offset; + const char* value; +} StringDecoration; +// clang-format on + +// clang-format off +typedef struct Decorations { + bool is_block; + bool is_buffer_block; + bool is_row_major; + bool is_column_major; + bool is_built_in; + bool is_noperspective; + bool is_flat; + bool is_non_writable; + NumberDecoration set; + NumberDecoration binding; + NumberDecoration input_attachment_index; + NumberDecoration location; + NumberDecoration offset; + NumberDecoration uav_counter_buffer; + StringDecoration semantic; + uint32_t array_stride; + uint32_t matrix_stride; + SpvBuiltIn built_in; +} Decorations; +// clang-format on + +// clang-format off +typedef struct Node { + uint32_t result_id; + SpvOp op; + uint32_t result_type_id; + uint32_t type_id; + SpvStorageClass storage_class; + uint32_t word_offset; + uint32_t word_count; + bool is_type; + + ArrayTraits array_traits; + ImageTraits image_traits; + uint32_t image_type_id; + + const char* name; + Decorations decorations; + uint32_t member_count; + const char** member_names; + Decorations* member_decorations; +} Node; +// clang-format on + +// clang-format off +typedef struct String { + uint32_t result_id; + const char* string; +} String; +// clang-format on + +// clang-format off +typedef struct Function { + uint32_t id; + uint32_t callee_count; + uint32_t* callees; + struct Function** callee_ptrs; + uint32_t accessed_ptr_count; + uint32_t* accessed_ptrs; +} Function; +// clang-format on + +// clang-format off +typedef struct AccessChain { + uint32_t result_id; + uint32_t result_type_id; + // + // Pointing to the base of a composite object. + // Generally the id of descriptor block variable + uint32_t base_id; + // + // From spec: + // The first index in Indexes will select the + // top-level member/element/component/element + // of the base composite + uint32_t index_count; + uint32_t* indexes; +} AccessChain; +// clang-format on + +// clang-format off +typedef struct Parser { + size_t spirv_word_count; + uint32_t* spirv_code; + uint32_t string_count; + String* strings; + SpvSourceLanguage source_language; + uint32_t source_language_version; + uint32_t source_file_id; + String source_embedded; + size_t node_count; + Node* nodes; + uint32_t entry_point_count; + uint32_t function_count; + Function* functions; + uint32_t access_chain_count; + AccessChain* access_chains; + + uint32_t type_count; + uint32_t descriptor_count; + uint32_t push_constant_count; +} Parser; +// clang-format on + +static uint32_t Max(uint32_t a, uint32_t b) +{ + return a > b ? a : b; +} + +static uint32_t RoundUp(uint32_t value, uint32_t multiple) +{ + assert(multiple && ((multiple & (multiple - 1)) == 0)); + return (value + multiple - 1) & ~(multiple - 1); +} + +#define IsNull(ptr) \ + (ptr == NULL) + +#define IsNotNull(ptr) \ + (ptr != NULL) + +#define SafeFree(ptr) \ + { \ + if (ptr != NULL) { \ + free((void*)ptr); \ + ptr = NULL; \ + } \ + } + +static int SortCompareUint32(const void* a, const void* b) +{ + const uint32_t* p_a = (const uint32_t*)a; + const uint32_t* p_b = (const uint32_t*)b; + + return (int)*p_a - (int)*p_b; +} + +// +// De-duplicates a sorted array and returns the new size. +// +// Note: The array doesn't actually need to be sorted, just +// arranged into "runs" so that all the entries with one +// value are adjacent. +// +static size_t DedupSortedUint32(uint32_t* arr, size_t size) +{ + if (size == 0) { + return 0; + } + size_t dedup_idx = 0; + for (size_t i = 0; i < size; ++i) { + if (arr[dedup_idx] != arr[i]) { + ++dedup_idx; + arr[dedup_idx] = arr[i]; + } + } + return dedup_idx+1; +} + +static bool SearchSortedUint32(const uint32_t* arr, size_t size, uint32_t target) +{ + size_t lo = 0; + size_t hi = size; + while (lo < hi) { + size_t mid = (hi - lo) / 2 + lo; + if (arr[mid] == target) { + return true; + } else if (arr[mid] < target) { + lo = mid+1; + } else { + hi = mid; + } + } + return false; +} + +static SpvReflectResult IntersectSortedUint32( + const uint32_t* p_arr0, + size_t arr0_size, + const uint32_t* p_arr1, + size_t arr1_size, + uint32_t** pp_res, + size_t* res_size +) +{ + *res_size = 0; + const uint32_t* arr0_end = p_arr0 + arr0_size; + const uint32_t* arr1_end = p_arr1 + arr1_size; + + const uint32_t* idx0 = p_arr0; + const uint32_t* idx1 = p_arr1; + while (idx0 != arr0_end && idx1 != arr1_end) { + if (*idx0 < *idx1) { + ++idx0; + } else if (*idx0 > *idx1) { + ++idx1; + } else { + ++*res_size; + ++idx0; + ++idx1; + } + } + + *pp_res = NULL; + if (*res_size > 0) { + *pp_res = (uint32_t*)calloc(*res_size, sizeof(**pp_res)); + if (IsNull(*pp_res)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + uint32_t* idxr = *pp_res; + idx0 = p_arr0; + idx1 = p_arr1; + while (idx0 != arr0_end && idx1 != arr1_end) { + if (*idx0 < *idx1) { + ++idx0; + } else if (*idx0 > *idx1) { + ++idx1; + } else { + *(idxr++) = *idx0; + ++idx0; + ++idx1; + } + } + } + return SPV_REFLECT_RESULT_SUCCESS; +} + + +static bool InRange(const Parser* p_parser, uint32_t index) +{ + bool in_range = false; + if (IsNotNull(p_parser)) { + in_range = (index < p_parser->spirv_word_count); + } + return in_range; +} + +static SpvReflectResult ReadU32(Parser* p_parser, uint32_t word_offset, uint32_t* p_value) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(InRange(p_parser, word_offset)); + SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, word_offset)) { + *p_value = *(p_parser->spirv_code + word_offset); + result = SPV_REFLECT_RESULT_SUCCESS; + } + return result; +} + +#define CHECKED_READU32(parser, word_offset, value) \ + { \ + SpvReflectResult checked_readu32_result = ReadU32(parser, \ + word_offset, (uint32_t*)&(value)); \ + if (checked_readu32_result != SPV_REFLECT_RESULT_SUCCESS) { \ + return checked_readu32_result; \ + } \ + } + +#define CHECKED_READU32_CAST(parser, word_offset, cast_to_type, value) \ + { \ + uint32_t checked_readu32_cast_u32 = UINT32_MAX; \ + SpvReflectResult checked_readu32_cast_result = ReadU32(parser, \ + word_offset, \ + (uint32_t*)&(checked_readu32_cast_u32)); \ + if (checked_readu32_cast_result != SPV_REFLECT_RESULT_SUCCESS) { \ + return checked_readu32_cast_result; \ + } \ + value = (cast_to_type)checked_readu32_cast_u32; \ + } + +#define IF_READU32(result, parser, word_offset, value) \ + if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ + result = ReadU32(parser, word_offset, (uint32_t*)&(value)); \ + } + +#define IF_READU32_CAST(result, parser, word_offset, cast_to_type, value) \ + if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ + uint32_t if_readu32_cast_u32 = UINT32_MAX; \ + result = ReadU32(parser, word_offset, &if_readu32_cast_u32); \ + if ((result) == SPV_REFLECT_RESULT_SUCCESS) { \ + value = (cast_to_type)if_readu32_cast_u32; \ + } \ + } + +static SpvReflectResult ReadStr( + Parser* p_parser, + uint32_t word_offset, + uint32_t word_index, + uint32_t word_count, + uint32_t* p_buf_size, + char* p_buf +) +{ + uint32_t limit = (word_offset + word_count); + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(InRange(p_parser, limit)); + SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && InRange(p_parser, limit)) { + const char* c_str = (const char*)(p_parser->spirv_code + word_offset + word_index); + uint32_t n = word_count * SPIRV_WORD_SIZE; + uint32_t length_with_terminator = 0; + for (uint32_t i = 0; i < n; ++i) { + char c = *(c_str + i); + if (c == 0) { + length_with_terminator = i + 1; + break; + } + } + + if (length_with_terminator > 0) { + result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + if (IsNotNull(p_buf_size) && IsNotNull(p_buf)) { + result = SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; + if (length_with_terminator <= *p_buf_size) { + memset(p_buf, 0, *p_buf_size); + memcpy(p_buf, c_str, length_with_terminator); + result = SPV_REFLECT_RESULT_SUCCESS; + } + } + else { + if (IsNotNull(p_buf_size)) { + *p_buf_size = length_with_terminator; + result = SPV_REFLECT_RESULT_SUCCESS; + } + } + } + } + return result; +} + +static SpvReflectDecorationFlags ApplyDecorations(const Decorations* p_decoration_fields) +{ + SpvReflectDecorationFlags decorations = SPV_REFLECT_DECORATION_NONE; + if (p_decoration_fields->is_block) { + decorations |= SPV_REFLECT_DECORATION_BLOCK; + } + if (p_decoration_fields->is_buffer_block) { + decorations |= SPV_REFLECT_DECORATION_BUFFER_BLOCK; + } + if (p_decoration_fields->is_row_major) { + decorations |= SPV_REFLECT_DECORATION_ROW_MAJOR; + } + if (p_decoration_fields->is_column_major) { + decorations |= SPV_REFLECT_DECORATION_COLUMN_MAJOR; + } + if (p_decoration_fields->is_built_in) { + decorations |= SPV_REFLECT_DECORATION_BUILT_IN; + } + if (p_decoration_fields->is_noperspective) { + decorations |= SPV_REFLECT_DECORATION_NOPERSPECTIVE; + } + if (p_decoration_fields->is_flat) { + decorations |= SPV_REFLECT_DECORATION_FLAT; + } + if (p_decoration_fields->is_non_writable) { + decorations |= SPV_REFLECT_DECORATION_NON_WRITABLE; + } + return decorations; +} + +static void ApplyNumericTraits(const SpvReflectTypeDescription* p_type, SpvReflectNumericTraits* p_numeric_traits) +{ + memcpy(p_numeric_traits, &p_type->traits.numeric, sizeof(p_type->traits.numeric)); +} + +static void ApplyArrayTraits(const SpvReflectTypeDescription* p_type, SpvReflectArrayTraits* p_array_traits) +{ + memcpy(p_array_traits, &p_type->traits.array, sizeof(p_type->traits.array)); +} + +static Node* FindNode(Parser* p_parser, uint32_t result_id) +{ + Node* p_node = NULL; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_elem = &(p_parser->nodes[i]); + if (p_elem->result_id == result_id) { + p_node = p_elem; + break; + } + } + return p_node; +} + +static SpvReflectTypeDescription* FindType(SpvReflectShaderModule* p_module, uint32_t type_id) +{ + SpvReflectTypeDescription* p_type = NULL; + for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { + SpvReflectTypeDescription* p_elem = &(p_module->_internal->type_descriptions[i]); + if (p_elem->id == type_id) { + p_type = p_elem; + break; + } + } + return p_type; +} + +static SpvReflectResult CreateParser(size_t size, void* p_code, Parser* p_parser) +{ + if (p_code == NULL) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (size < SPIRV_MINIMUM_FILE_SIZE) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE; + } + if ((size % 4) != 0) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE; + } + + p_parser->spirv_word_count = size / SPIRV_WORD_SIZE; + p_parser->spirv_code = (uint32_t*)p_code; + + if (p_parser->spirv_code[0] != SpvMagicNumber) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static void DestroyParser(Parser* p_parser) +{ + if (!IsNull(p_parser->nodes)) { + // Free nodes + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (IsNotNull(p_node->member_names)) { + SafeFree(p_node->member_names); + } + if (IsNotNull(p_node->member_decorations)) { + SafeFree(p_node->member_decorations); + } + } + + // Free functions + for (size_t i = 0; i < p_parser->function_count; ++i) { + SafeFree(p_parser->functions[i].callees); + SafeFree(p_parser->functions[i].callee_ptrs); + SafeFree(p_parser->functions[i].accessed_ptrs); + } + + // Free access chains + for (uint32_t i = 0; i < p_parser->access_chain_count; ++i) { + SafeFree(p_parser->access_chains[i].indexes); + } + + SafeFree(p_parser->nodes); + SafeFree(p_parser->strings); + SafeFree(p_parser->functions); + SafeFree(p_parser->access_chains); + p_parser->node_count = 0; + } +} + +static SpvReflectResult ParseNodes(Parser* p_parser) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + + uint32_t* p_spirv = p_parser->spirv_code; + uint32_t spirv_word_index = SPIRV_STARTING_WORD_INDEX; + + // Count nodes + uint32_t node_count = 0; + while (spirv_word_index < p_parser->spirv_word_count) { + uint32_t word = p_spirv[spirv_word_index]; + SpvOp op = (SpvOp)(word & 0xFFFF); + uint32_t node_word_count = (word >> 16) & 0xFFFF; + if (node_word_count == 0) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION; + } + if (op == SpvOpAccessChain) { + ++(p_parser->access_chain_count); + } + spirv_word_index += node_word_count; + ++node_count; + } + + if (node_count == 0) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF; + } + + // Allocate nodes + p_parser->node_count = node_count; + p_parser->nodes = (Node*)calloc(p_parser->node_count, sizeof(*(p_parser->nodes))); + if (IsNull(p_parser->nodes)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + // Mark all nodes with an invalid state + for (uint32_t i = 0; i < node_count; ++i) { + p_parser->nodes[i].op = (SpvOp)INVALID_VALUE; + p_parser->nodes[i].storage_class = (SpvStorageClass)INVALID_VALUE; + p_parser->nodes[i].decorations.set.value = (uint32_t)INVALID_VALUE; + p_parser->nodes[i].decorations.binding.value = (uint32_t)INVALID_VALUE; + p_parser->nodes[i].decorations.location.value = (uint32_t)INVALID_VALUE; + p_parser->nodes[i].decorations.offset.value = (uint32_t)INVALID_VALUE; + p_parser->nodes[i].decorations.uav_counter_buffer.value = (uint32_t)INVALID_VALUE; + p_parser->nodes[i].decorations.built_in = (SpvBuiltIn)INVALID_VALUE; + } + // Mark source file id node + p_parser->source_file_id = (uint32_t)INVALID_VALUE; + + // Function node + uint32_t function_node = (uint32_t)INVALID_VALUE; + + // Allocate access chain + if (p_parser->access_chain_count > 0) { + p_parser->access_chains = (AccessChain*)calloc(p_parser->access_chain_count, sizeof(*(p_parser->access_chains))); + if (IsNull(p_parser->access_chains)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + // Parse nodes + uint32_t node_index = 0; + uint32_t access_chain_index = 0; + spirv_word_index = SPIRV_STARTING_WORD_INDEX; + while (spirv_word_index < p_parser->spirv_word_count) { + uint32_t word = p_spirv[spirv_word_index]; + SpvOp op = (SpvOp)(word & 0xFFFF); + uint32_t node_word_count = (word >> 16) & 0xFFFF; + + Node* p_node = &(p_parser->nodes[node_index]); + p_node->op = op; + p_node->word_offset = spirv_word_index; + p_node->word_count = node_word_count; + + switch (p_node->op) { + default: break; + + case SpvOpString: { + ++(p_parser->string_count); + } + break; + + case SpvOpSource: { + CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvSourceLanguage, p_parser->source_language); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_parser->source_language_version); + if (p_node->word_count >= 4) { + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_parser->source_file_id); + } + } + break; + + case SpvOpEntryPoint: { + ++(p_parser->entry_point_count); + } + break; + + case SpvOpName: + case SpvOpMemberName: + { + uint32_t member_offset = (p_node->op == SpvOpMemberName) ? 1 : 0; + uint32_t name_start = p_node->word_offset + member_offset + 2; + p_node->name = (const char*)(p_parser->spirv_code + name_start); + } + break; + + case SpvOpTypeStruct: + { + p_node->member_count = p_node->word_count - 2; + } // Fall through + case SpvOpTypeVoid: + case SpvOpTypeBool: + case SpvOpTypeInt: + case SpvOpTypeFloat: + case SpvOpTypeVector: + case SpvOpTypeMatrix: + case SpvOpTypeSampler: + case SpvOpTypeOpaque: + case SpvOpTypeFunction: + case SpvOpTypeEvent: + case SpvOpTypeDeviceEvent: + case SpvOpTypeReserveId: + case SpvOpTypeQueue: + case SpvOpTypePipe: + { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + p_node->is_type = true; + } + break; + + case SpvOpTypeImage: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_traits.sampled_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->image_traits.dim); + CHECKED_READU32(p_parser, p_node->word_offset + 4, p_node->image_traits.depth); + CHECKED_READU32(p_parser, p_node->word_offset + 5, p_node->image_traits.arrayed); + CHECKED_READU32(p_parser, p_node->word_offset + 6, p_node->image_traits.ms); + CHECKED_READU32(p_parser, p_node->word_offset + 7, p_node->image_traits.sampled); + CHECKED_READU32(p_parser, p_node->word_offset + 8, p_node->image_traits.image_format); + p_node->is_type = true; + } + break; + + case SpvOpTypeSampledImage: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->image_type_id); + p_node->is_type = true; + } + break; + + case SpvOpTypeArray: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->array_traits.length_id); + p_node->is_type = true; + } + break; + + case SpvOpTypeRuntimeArray: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->array_traits.element_type_id); + p_node->is_type = true; + } + break; + + case SpvOpTypePointer: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class); + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->type_id); + p_node->is_type = true; + } + break; + + case SpvOpTypeForwardPointer: + { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->storage_class); + p_node->is_type = true; + } + break; + + case SpvOpConstantTrue: + case SpvOpConstantFalse: + case SpvOpConstant: + case SpvOpConstantComposite: + case SpvOpConstantSampler: + case SpvOpConstantNull: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); + } + break; + + case SpvOpSpecConstantTrue: + case SpvOpSpecConstantFalse: + case SpvOpSpecConstant: + case SpvOpSpecConstantComposite: + case SpvOpSpecConstantOp: { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); + } + break; + + case SpvOpVariable: + { + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_node->storage_class); + } + break; + + case SpvOpLoad: + { + // Only load enough so OpDecorate can reference the node, skip the remaining operands. + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_node->result_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); + } + break; + + case SpvOpAccessChain: + { + AccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]); + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_access_chain->result_type_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_access_chain->result_id); + CHECKED_READU32(p_parser, p_node->word_offset + 3, p_access_chain->base_id); + // + // SPIRV_ACCESS_CHAIN_INDEX_OFFSET (4) is the number of words up until the first index: + // [Node, Result Type Id, Result Id, Base Id, ] + // + p_access_chain->index_count = (node_word_count - SPIRV_ACCESS_CHAIN_INDEX_OFFSET); + if (p_access_chain->index_count > 0) { + p_access_chain->indexes = (uint32_t*)calloc(p_access_chain->index_count, sizeof(*(p_access_chain->indexes))); + if (IsNull( p_access_chain->indexes)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + // Parse any index values for access chain + for (uint32_t index_index = 0; index_index < p_access_chain->index_count; ++index_index) { + // Read index id + uint32_t index_id = 0; + CHECKED_READU32(p_parser, p_node->word_offset + SPIRV_ACCESS_CHAIN_INDEX_OFFSET + index_index, index_id); + // Find OpConstant node that contains index value + Node* p_index_value_node = FindNode(p_parser, index_id); + if ((p_index_value_node != NULL) && (p_index_value_node->op == SpvOpConstant)) { + // Read index value + uint32_t index_value = UINT32_MAX; + CHECKED_READU32(p_parser, p_index_value_node->word_offset + 3, index_value); + assert(index_value != UINT32_MAX); + // Write index value to array + p_access_chain->indexes[index_index] = index_value; + } + } + } + ++access_chain_index; + } + break; + + case SpvOpFunction: + { + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_node->result_id); + // Count function definitions, not function declarations. To determine + // the difference, set an in-function variable, and then if an OpLabel + // is reached before the end of the function increment the function + // count. + function_node = node_index; + } + break; + + case SpvOpLabel: + { + if (function_node != (uint32_t)INVALID_VALUE) { + Node* p_func_node = &(p_parser->nodes[function_node]); + CHECKED_READU32(p_parser, p_func_node->word_offset + 2, p_func_node->result_id); + ++(p_parser->function_count); + } + } // Fall through + + case SpvOpFunctionEnd: + { + function_node = (uint32_t)INVALID_VALUE; + } + break; + } + + if (p_node->is_type) { + ++(p_parser->type_count); + } + + spirv_word_index += node_word_count; + ++node_index; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseStrings(Parser* p_parser) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(IsNotNull(p_parser->nodes)); + + // Early out + if (p_parser->string_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { + // Allocate string storage + p_parser->strings = (String*)calloc(p_parser->string_count, sizeof(*(p_parser->strings))); + + uint32_t string_index = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->op != SpvOpString) { + continue; + } + + // Paranoid check against string count + assert(string_index < p_parser->string_count); + if (string_index >= p_parser->string_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + // Result id + String* p_string = &(p_parser->strings[string_index]); + CHECKED_READU32(p_parser, p_node->word_offset + 1, p_string->result_id); + + // String + uint32_t string_start = p_node->word_offset + 2; + p_string->string = (const char*)(p_parser->spirv_code + string_start); + + // Increment string index + ++string_index; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseSource(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code)) { + // Source file + if (IsNotNull(p_parser->strings)) { + for (uint32_t i = 0; i < p_parser->string_count; ++i) { + String* p_string = &(p_parser->strings[i]); + if (p_string->result_id == p_parser->source_file_id) { + p_module->source_file = p_string->string; + break; + } + } + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseFunction(Parser* p_parser, Node* p_func_node, Function* p_func, size_t first_label_index) +{ + p_func->id = p_func_node->result_id; + + p_func->callee_count = 0; + p_func->accessed_ptr_count = 0; + + for (size_t i = first_label_index; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->op == SpvOpFunctionEnd) { + break; + } + switch (p_node->op) { + case SpvOpFunctionCall: { + ++(p_func->callee_count); + } + break; + case SpvOpLoad: + case SpvOpAccessChain: + case SpvOpInBoundsAccessChain: + case SpvOpPtrAccessChain: + case SpvOpArrayLength: + case SpvOpGenericPtrMemSemantics: + case SpvOpInBoundsPtrAccessChain: + case SpvOpStore: + { + ++(p_func->accessed_ptr_count); + } + break; + case SpvOpCopyMemory: + case SpvOpCopyMemorySized: + { + p_func->accessed_ptr_count += 2; + } + break; + default: break; + } + } + + if (p_func->callee_count > 0) { + p_func->callees = (uint32_t*)calloc(p_func->callee_count, + sizeof(*(p_func->callees))); + if (IsNull(p_func->callees)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + if (p_func->accessed_ptr_count > 0) { + p_func->accessed_ptrs = (uint32_t*)calloc(p_func->accessed_ptr_count, + sizeof(*(p_func->accessed_ptrs))); + if (IsNull(p_func->accessed_ptrs)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + p_func->callee_count = 0; + p_func->accessed_ptr_count = 0; + for (size_t i = first_label_index; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->op == SpvOpFunctionEnd) { + break; + } + switch (p_node->op) { + case SpvOpFunctionCall: { + CHECKED_READU32(p_parser, p_node->word_offset + 3, + p_func->callees[p_func->callee_count]); + (++p_func->callee_count); + } + break; + case SpvOpLoad: + case SpvOpAccessChain: + case SpvOpInBoundsAccessChain: + case SpvOpPtrAccessChain: + case SpvOpArrayLength: + case SpvOpGenericPtrMemSemantics: + case SpvOpInBoundsPtrAccessChain: + { + CHECKED_READU32(p_parser, p_node->word_offset + 3, + p_func->accessed_ptrs[p_func->accessed_ptr_count]); + (++p_func->accessed_ptr_count); + } + break; + case SpvOpStore: + { + CHECKED_READU32(p_parser, p_node->word_offset + 2, + p_func->accessed_ptrs[p_func->accessed_ptr_count]); + (++p_func->accessed_ptr_count); + } + break; + case SpvOpCopyMemory: + case SpvOpCopyMemorySized: + { + CHECKED_READU32(p_parser, p_node->word_offset + 2, + p_func->accessed_ptrs[p_func->accessed_ptr_count]); + (++p_func->accessed_ptr_count); + CHECKED_READU32(p_parser, p_node->word_offset + 3, + p_func->accessed_ptrs[p_func->accessed_ptr_count]); + (++p_func->accessed_ptr_count); + } + break; + default: break; + } + } + + if (p_func->callee_count > 0) { + qsort(p_func->callees, p_func->callee_count, + sizeof(*(p_func->callees)), SortCompareUint32); + } + p_func->callee_count = (uint32_t)DedupSortedUint32(p_func->callees, + p_func->callee_count); + + if (p_func->accessed_ptr_count > 0) { + qsort(p_func->accessed_ptrs, p_func->accessed_ptr_count, + sizeof(*(p_func->accessed_ptrs)), SortCompareUint32); + } + p_func->accessed_ptr_count = (uint32_t)DedupSortedUint32(p_func->accessed_ptrs, + p_func->accessed_ptr_count); + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static int SortCompareFunctions(const void* a, const void* b) +{ + const Function* af = (const Function*)a; + const Function* bf = (const Function*)b; + return (int)af->id - (int)bf->id; +} + +static SpvReflectResult ParseFunctions(Parser* p_parser) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(IsNotNull(p_parser->nodes)); + + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { + if (p_parser->function_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_parser->functions = (Function*)calloc(p_parser->function_count, + sizeof(*(p_parser->functions))); + if (IsNull(p_parser->functions)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + size_t function_index = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->op != SpvOpFunction) { + continue; + } + + // Skip over function declarations that aren't definitions + bool func_definition = false; + // Intentionally reuse i to avoid iterating over these nodes more than + // once + for (; i < p_parser->node_count; ++i) { + if (p_parser->nodes[i].op == SpvOpLabel) { + func_definition = true; + break; + } + if (p_parser->nodes[i].op == SpvOpFunctionEnd) { + break; + } + } + if (!func_definition) { + continue; + } + + Function* p_function = &(p_parser->functions[function_index]); + + SpvReflectResult result = ParseFunction(p_parser, p_node, p_function, i); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + ++function_index; + } + + qsort(p_parser->functions, p_parser->function_count, + sizeof(*(p_parser->functions)), SortCompareFunctions); + + // Once they're sorted, link the functions with pointers to improve graph + // traversal efficiency + for (size_t i = 0; i < p_parser->function_count; ++i) { + Function* p_func = &(p_parser->functions[i]); + if (p_func->callee_count == 0) { + continue; + } + p_func->callee_ptrs = (Function**)calloc(p_func->callee_count, + sizeof(*(p_func->callee_ptrs))); + for (size_t j = 0, k = 0; j < p_func->callee_count; ++j) { + while (p_parser->functions[k].id != p_func->callees[j]) { + ++k; + if (k >= p_parser->function_count) { + // Invalid called function ID somewhere + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + p_func->callee_ptrs[j] = &(p_parser->functions[k]); + } + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseMemberCounts(Parser* p_parser) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(IsNotNull(p_parser->nodes)); + + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpMemberName) && (p_node->op != SpvOpMemberDecorate)) { + continue; + } + + uint32_t target_id = 0; + uint32_t member_index = (uint32_t)INVALID_VALUE; + CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); + CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); + Node* p_target_node = FindNode(p_parser, target_id); + // Not all nodes get parsed, so FindNode returning NULL is expected. + if (IsNull(p_target_node)) { + continue; + } + + if (member_index == INVALID_VALUE) { + return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; + } + + p_target_node->member_count = Max(p_target_node->member_count, member_index + 1); + } + + for (uint32_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->member_count == 0) { + continue; + } + + p_node->member_names = (const char **)calloc(p_node->member_count, sizeof(*(p_node->member_names))); + if (IsNull(p_node->member_names)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + p_node->member_decorations = (Decorations*)calloc(p_node->member_count, sizeof(*(p_node->member_decorations))); + if (IsNull(p_node->member_decorations)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseNames(Parser* p_parser) +{ + assert(IsNotNull(p_parser)); + assert(IsNotNull(p_parser->spirv_code)); + assert(IsNotNull(p_parser->nodes)); + + if (IsNotNull(p_parser) && IsNotNull(p_parser->spirv_code) && IsNotNull(p_parser->nodes)) { + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpName) && (p_node->op != SpvOpMemberName)) { + continue; + } + + uint32_t target_id = 0; + CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); + Node* p_target_node = FindNode(p_parser, target_id); + // Not all nodes get parsed, so FindNode returning NULL is expected. + if (IsNull(p_target_node)) { + continue; + } + + const char** pp_target_name = &(p_target_node->name); + if (p_node->op == SpvOpMemberName) { + uint32_t member_index = UINT32_MAX; + CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); + pp_target_name = &(p_target_node->member_names[member_index]); + } + + *pp_target_name = p_node->name; + } + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDecorations(Parser* p_parser) +{ + for (uint32_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + + if (((uint32_t)p_node->op != (uint32_t)SpvOpDecorate) && + ((uint32_t)p_node->op != (uint32_t)SpvOpMemberDecorate) && + ((uint32_t)p_node->op != (uint32_t)SpvReflectOpDecorateId) && + ((uint32_t)p_node->op != (uint32_t)SpvReflectOpDecorateStringGOOGLE) && + ((uint32_t)p_node->op != (uint32_t)SpvReflectOpMemberDecorateStringGOOGLE)) + { + continue; + } + + // Need to adjust the read offset if this is a member decoration + uint32_t member_offset = 0; + if (p_node->op == SpvOpMemberDecorate) { + member_offset = 1; + } + + // Get decoration + uint32_t decoration = (uint32_t)INVALID_VALUE; + CHECKED_READU32(p_parser, p_node->word_offset + member_offset + 2, decoration); + + // Filter out the decoration that do not affect reflection, otherwise + // there will be random crashes because the nodes aren't found. + bool skip = false; + switch (decoration) { + default: { + skip = true; + } + break; + case SpvDecorationBlock: + case SpvDecorationBufferBlock: + case SpvDecorationColMajor: + case SpvDecorationRowMajor: + case SpvDecorationArrayStride: + case SpvDecorationMatrixStride: + case SpvDecorationBuiltIn: + case SpvDecorationNoPerspective: + case SpvDecorationFlat: + case SpvDecorationNonWritable: + case SpvDecorationLocation: + case SpvDecorationBinding: + case SpvDecorationDescriptorSet: + case SpvDecorationOffset: + case SpvDecorationInputAttachmentIndex: + case SpvReflectDecorationHlslCounterBufferGOOGLE: + case SpvReflectDecorationHlslSemanticGOOGLE: { + skip = false; + } + break; + } + if (skip) { + continue; + } + + // Find target target node + uint32_t target_id = 0; + CHECKED_READU32(p_parser, p_node->word_offset + 1, target_id); + Node* p_target_node = FindNode(p_parser, target_id); + if (IsNull(p_target_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Get decorations + Decorations* p_target_decorations = &(p_target_node->decorations); + // Update pointer if this is a member member decoration + if (p_node->op == SpvOpMemberDecorate) { + uint32_t member_index = (uint32_t)INVALID_VALUE; + CHECKED_READU32(p_parser, p_node->word_offset + 2, member_index); + p_target_decorations = &(p_target_node->member_decorations[member_index]); + } + + switch (decoration) { + default: break; + + case SpvDecorationBlock: { + p_target_decorations->is_block = true; + } + break; + + case SpvDecorationBufferBlock: { + p_target_decorations->is_buffer_block = true; + } + break; + + case SpvDecorationColMajor: { + p_target_decorations->is_column_major = true; + } + break; + + case SpvDecorationRowMajor: { + p_target_decorations->is_row_major = true; + } + break; + + case SpvDecorationArrayStride: { + uint32_t word_offset = p_node->word_offset + member_offset + 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->array_stride); + } + break; + + case SpvDecorationMatrixStride: { + uint32_t word_offset = p_node->word_offset + member_offset + 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->matrix_stride); + } + break; + + case SpvDecorationBuiltIn: { + p_target_decorations->is_built_in = true; + uint32_t word_offset = p_node->word_offset + member_offset + 3; + CHECKED_READU32_CAST(p_parser, word_offset, SpvBuiltIn, p_target_decorations->built_in); + } + break; + + case SpvDecorationNoPerspective: { + p_target_decorations->is_noperspective = true; + } + break; + + case SpvDecorationFlat: { + p_target_decorations->is_flat = true; + } + break; + + case SpvDecorationNonWritable: { + p_target_decorations->is_non_writable = true; + } + break; + + case SpvDecorationLocation: { + uint32_t word_offset = p_node->word_offset + member_offset + 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->location.value); + p_target_decorations->location.word_offset = word_offset; + } + break; + + case SpvDecorationBinding: { + uint32_t word_offset = p_node->word_offset + member_offset+ 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->binding.value); + p_target_decorations->binding.word_offset = word_offset; + } + break; + + case SpvDecorationDescriptorSet: { + uint32_t word_offset = p_node->word_offset + member_offset+ 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->set.value); + p_target_decorations->set.word_offset = word_offset; + } + break; + + case SpvDecorationOffset: { + uint32_t word_offset = p_node->word_offset + member_offset+ 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->offset.value); + p_target_decorations->offset.word_offset = word_offset; + } + break; + + case SpvDecorationInputAttachmentIndex: { + uint32_t word_offset = p_node->word_offset + member_offset+ 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->input_attachment_index.value); + p_target_decorations->input_attachment_index.word_offset = word_offset; + } + break; + + case SpvReflectDecorationHlslCounterBufferGOOGLE: { + uint32_t word_offset = p_node->word_offset + member_offset+ 3; + CHECKED_READU32(p_parser, word_offset, p_target_decorations->uav_counter_buffer.value); + p_target_decorations->uav_counter_buffer.word_offset = word_offset; + } + break; + + case SpvReflectDecorationHlslSemanticGOOGLE: { + uint32_t word_offset = p_node->word_offset + member_offset + 3; + p_target_decorations->semantic.value = (const char*)(p_parser->spirv_code + word_offset); + p_target_decorations->semantic.word_offset = word_offset; + } + break; + } + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult EnumerateAllUniforms( + SpvReflectShaderModule* p_module, + size_t* p_uniform_count, + uint32_t** pp_uniforms +) +{ + *p_uniform_count = p_module->descriptor_binding_count; + if (*p_uniform_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + *pp_uniforms = (uint32_t*)calloc(*p_uniform_count, sizeof(**pp_uniforms)); + + if (IsNull(*pp_uniforms)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + for (size_t i = 0; i < *p_uniform_count; ++i) { + (*pp_uniforms)[i] = p_module->descriptor_bindings[i].spirv_id; + } + qsort(*pp_uniforms, *p_uniform_count, sizeof(**pp_uniforms), + SortCompareUint32); + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseType( + Parser* p_parser, + Node* p_node, + Decorations* p_struct_member_decorations, + SpvReflectShaderModule* p_module, + SpvReflectTypeDescription* p_type +) +{ + SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; + + if (p_node->member_count > 0) { + p_type->member_count = p_node->member_count; + p_type->members = (SpvReflectTypeDescription*)calloc(p_type->member_count, sizeof(*(p_type->members))); + if (IsNotNull(p_type->members)) { + // Mark all members types with an invalid state + for (size_t i = 0; i < p_type->members->member_count; ++i) { + SpvReflectTypeDescription* p_member_type = &(p_type->members[i]); + p_member_type->id = (uint32_t)INVALID_VALUE; + p_member_type->op = (SpvOp)INVALID_VALUE; + p_member_type->storage_class = (SpvStorageClass)INVALID_VALUE; + } + } + else { + result = SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + if (result == SPV_REFLECT_RESULT_SUCCESS) { + // Since the parse descends on type information, these will get overwritten + // if not guarded against assignment. Only assign if the id is invalid. + if (p_type->id == INVALID_VALUE) { + p_type->id = p_node->result_id; + p_type->op = p_node->op; + p_type->decoration_flags = 0; + } + // Top level types need to pick up decorations from all types below it. + // Issue and fix here: https://github.com/chaoticbob/SPIRV-Reflect/issues/64 + p_type->decoration_flags = ApplyDecorations(&p_node->decorations); + + switch (p_node->op) { + default: break; + case SpvOpTypeVoid: + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VOID; + break; + + case SpvOpTypeBool: + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_BOOL; + break; + + case SpvOpTypeInt: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_INT; + IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width); + IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.scalar.signedness); + } + break; + + case SpvOpTypeFloat: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_FLOAT; + IF_READU32(result, p_parser, p_node->word_offset + 2, p_type->traits.numeric.scalar.width); + } + break; + + case SpvOpTypeVector: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_VECTOR; + uint32_t component_type_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 2, component_type_id); + IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.vector.component_count); + // Parse component type + Node* p_next_node = FindNode(p_parser, component_type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + break; + + case SpvOpTypeMatrix: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_MATRIX; + uint32_t column_type_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 2, column_type_id); + IF_READU32(result, p_parser, p_node->word_offset + 3, p_type->traits.numeric.matrix.column_count); + Node* p_next_node = FindNode(p_parser, column_type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + p_type->traits.numeric.matrix.row_count = p_type->traits.numeric.vector.component_count; + p_type->traits.numeric.matrix.stride = p_node->decorations.matrix_stride; + // NOTE: Matrix stride is decorated using OpMemberDecoreate - not OpDecoreate. + if (IsNotNull(p_struct_member_decorations)) { + p_type->traits.numeric.matrix.stride = p_struct_member_decorations->matrix_stride; + } + } + break; + + case SpvOpTypeImage: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE; + IF_READU32_CAST(result, p_parser, p_node->word_offset + 3, SpvDim, p_type->traits.image.dim); + IF_READU32(result, p_parser, p_node->word_offset + 4, p_type->traits.image.depth); + IF_READU32(result, p_parser, p_node->word_offset + 5, p_type->traits.image.arrayed); + IF_READU32(result, p_parser, p_node->word_offset + 6, p_type->traits.image.ms); + IF_READU32(result, p_parser, p_node->word_offset + 7, p_type->traits.image.sampled); + IF_READU32_CAST(result, p_parser, p_node->word_offset + 8, SpvImageFormat, p_type->traits.image.image_format); + } + break; + + case SpvOpTypeSampler: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER; + } + break; + + case SpvOpTypeSampledImage: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE; + uint32_t image_type_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 2, image_type_id); + Node* p_next_node = FindNode(p_parser, image_type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + break; + + case SpvOpTypeArray: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_ARRAY; + if (result == SPV_REFLECT_RESULT_SUCCESS) { + uint32_t element_type_id = (uint32_t)INVALID_VALUE; + uint32_t length_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id); + IF_READU32(result, p_parser, p_node->word_offset + 3, length_id); + // NOTE: Array stride is decorated using OpDecorate instead of + // OpMemberDecorate, even if the array is apart of a struct. + p_type->traits.array.stride = p_node->decorations.array_stride; + // Get length for current dimension + Node* p_length_node = FindNode(p_parser, length_id); + if (IsNotNull(p_length_node)) { + if (p_length_node->op == SpvOpSpecConstant || + p_length_node->op == SpvOpSpecConstantOp) { + p_type->traits.array.dims[p_type->traits.array.dims_count] = 0xFFFFFFFF; + p_type->traits.array.dims_count += 1; + } else { + uint32_t length = 0; + IF_READU32(result, p_parser, p_length_node->word_offset + 3, length); + if (result == SPV_REFLECT_RESULT_SUCCESS) { + // Write the array dim and increment the count and offset + p_type->traits.array.dims[p_type->traits.array.dims_count] = length; + p_type->traits.array.dims_count += 1; + } else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + // Parse next dimension or element type + Node* p_next_node = FindNode(p_parser, element_type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + } + break; + + case SpvOpTypeRuntimeArray: { + uint32_t element_type_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 2, element_type_id); + // Parse next dimension or element type + Node* p_next_node = FindNode(p_parser, element_type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + break; + + case SpvOpTypeStruct: { + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_STRUCT; + p_type->type_flags |= SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK; + uint32_t word_index = 2; + uint32_t member_index = 0; + for (; word_index < p_node->word_count; ++word_index, ++member_index) { + uint32_t member_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + word_index, member_id); + // Find member node + Node* p_member_node = FindNode(p_parser, member_id); + if (IsNull(p_member_node)) { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + break; + } + + // Member decorations + Decorations* p_member_decorations = &p_node->member_decorations[member_index]; + + assert(member_index < p_type->member_count); + // Parse member type + SpvReflectTypeDescription* p_member_type = &(p_type->members[member_index]); + p_member_type->id = member_id; + p_member_type->op = p_member_node->op; + result = ParseType(p_parser, p_member_node, p_member_decorations, p_module, p_member_type); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + break; + } + // This looks wrong + //p_member_type->type_name = p_member_node->name; + p_member_type->struct_member_name = p_node->member_names[member_index]; + } + } + break; + + case SpvOpTypeOpaque: break; + + case SpvOpTypePointer: { + IF_READU32_CAST(result, p_parser, p_node->word_offset + 2, SpvStorageClass, p_type->storage_class); + uint32_t type_id = (uint32_t)INVALID_VALUE; + IF_READU32(result, p_parser, p_node->word_offset + 3, type_id); + // Parse type + Node* p_next_node = FindNode(p_parser, type_id); + if (IsNotNull(p_next_node)) { + result = ParseType(p_parser, p_next_node, NULL, p_module, p_type); + } + else { + result = SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + break; + } + + if (result == SPV_REFLECT_RESULT_SUCCESS) { + // Names get assigned on the way down. Guard against names + // get overwritten on the way up. + if (IsNull(p_type->type_name)) { + p_type->type_name = p_node->name; + } + } + } + + return result; +} + +static SpvReflectResult ParseTypes(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + if (p_parser->type_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_module->_internal->type_description_count = p_parser->type_count; + p_module->_internal->type_descriptions = (SpvReflectTypeDescription*)calloc(p_module->_internal->type_description_count, + sizeof(*(p_module->_internal->type_descriptions))); + if (IsNull(p_module->_internal->type_descriptions)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + // Mark all types with an invalid state + for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { + SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[i]); + p_type->id = (uint32_t)INVALID_VALUE; + p_type->op = (SpvOp)INVALID_VALUE; + p_type->storage_class = (SpvStorageClass)INVALID_VALUE; + } + + size_t type_index = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (! p_node->is_type) { + continue; + } + + SpvReflectTypeDescription* p_type = &(p_module->_internal->type_descriptions[type_index]); + SpvReflectResult result = ParseType(p_parser, p_node, NULL, p_module, p_type); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + ++type_index; + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +static int SortCompareDescriptorBinding(const void* a, const void* b) +{ + const SpvReflectDescriptorBinding* p_elem_a = (const SpvReflectDescriptorBinding*)a; + const SpvReflectDescriptorBinding* p_elem_b = (const SpvReflectDescriptorBinding*)b; + int value = (int)(p_elem_a->binding) - (int)(p_elem_b->binding); + if (value == 0) { + // use spirv-id as a tiebreaker to ensure a stable ordering, as they're guaranteed + // unique. + assert(p_elem_a->spirv_id != p_elem_b->spirv_id); + value = (int)(p_elem_a->spirv_id) - (int)(p_elem_b->spirv_id); + } + return value; +} + +static SpvReflectResult ParseDescriptorBindings(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + p_module->descriptor_binding_count = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpVariable) || + ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassUniformConstant))) + { + continue; + } + if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) { + continue; + } + + p_module->descriptor_binding_count += 1; + } + + if (p_module->descriptor_binding_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_module->descriptor_bindings = (SpvReflectDescriptorBinding*)calloc(p_module->descriptor_binding_count, sizeof(*(p_module->descriptor_bindings))); + if (IsNull(p_module->descriptor_bindings)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + // Mark all types with an invalid state + for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); + p_descriptor->binding = (uint32_t)INVALID_VALUE; + p_descriptor->input_attachment_index = (uint32_t)INVALID_VALUE; + p_descriptor->set = (uint32_t)INVALID_VALUE; + p_descriptor->descriptor_type = (SpvReflectDescriptorType)INVALID_VALUE; + p_descriptor->uav_counter_id = (uint32_t)INVALID_VALUE; + } + + size_t descriptor_index = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpVariable) || + ((p_node->storage_class != SpvStorageClassUniform) && (p_node->storage_class != SpvStorageClassUniformConstant)))\ + { + continue; + } + if ((p_node->decorations.set.value == INVALID_VALUE) || (p_node->decorations.binding.value == INVALID_VALUE)) { + continue; + } + + SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); + if (IsNull(p_type)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // If the type is a pointer, resolve it + if (p_type->op == SpvOpTypePointer) { + // Find the type's node + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Should be the resolved type + p_type = FindType(p_module, p_type_node->type_id); + if (IsNull(p_type)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + + SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[descriptor_index]; + p_descriptor->spirv_id = p_node->result_id; + p_descriptor->name = p_node->name; + p_descriptor->binding = p_node->decorations.binding.value; + p_descriptor->input_attachment_index = p_node->decorations.input_attachment_index.value; + p_descriptor->set = p_node->decorations.set.value; + p_descriptor->count = 1; + p_descriptor->uav_counter_id = p_node->decorations.uav_counter_buffer.value; + p_descriptor->type_description = p_type; + + // Copy image traits + if ((p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) == SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE) { + memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image)); + } + + // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096 + { + const uint32_t resource_mask = SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE; + if ((p_type->type_flags & resource_mask) == resource_mask) { + memcpy(&p_descriptor->image, &p_type->traits.image, sizeof(p_descriptor->image)); + } + } + + // Copy array traits + if (p_type->traits.array.dims_count > 0) { + p_descriptor->array.dims_count = p_type->traits.array.dims_count; + for (uint32_t dim_index = 0; dim_index < p_type->traits.array.dims_count; ++dim_index) { + uint32_t dim_value = p_type->traits.array.dims[dim_index]; + p_descriptor->array.dims[dim_index] = dim_value; + p_descriptor->count *= dim_value; + } + } + + // Count + + + p_descriptor->word_offset.binding = p_node->decorations.binding.word_offset; + p_descriptor->word_offset.set = p_node->decorations.set.word_offset; + + ++descriptor_index; + } + + if (p_module->descriptor_binding_count > 0) { + qsort(p_module->descriptor_bindings, + p_module->descriptor_binding_count, + sizeof(*(p_module->descriptor_bindings)), + SortCompareDescriptorBinding); + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorType(SpvReflectShaderModule* p_module) +{ + if (p_module->descriptor_binding_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); + SpvReflectTypeDescription* p_type = p_descriptor->type_description; + + switch (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK) { + default: assert(false && "unknown type flag"); break; + + case SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE: { + if (p_descriptor->image.dim == SpvDimBuffer) { + switch (p_descriptor->image.sampled) { + default: assert(false && "unknown texel buffer sampled value"); break; + case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; break; + case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; break; + } + } + else if(p_descriptor->image.dim == SpvDimSubpassData) { + p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; + } + else { + switch (p_descriptor->image.sampled) { + default: assert(false && "unknown image sampled value"); break; + case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE; break; + case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE; break; + } + } + } + break; + + case SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER: { + p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER; + } + break; + + case (SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE | SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE): { + // This is a workaround for: https://github.com/KhronosGroup/glslang/issues/1096 + if (p_descriptor->image.dim == SpvDimBuffer) { + switch (p_descriptor->image.sampled) { + default: assert(false && "unknown texel buffer sampled value"); break; + case IMAGE_SAMPLED: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; break; + case IMAGE_STORAGE: p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; break; + } + } + else { + p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; + } + } + break; + + case SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK: { + if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BLOCK) { + p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER; + } + else if (p_type->decoration_flags & SPV_REFLECT_DECORATION_BUFFER_BLOCK) { + p_descriptor->descriptor_type = SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER; + } + else { + assert(false && "unknown struct"); + } + } + break; + } + + switch (p_descriptor->descriptor_type) { + case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SAMPLER; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER : p_descriptor->resource_type = (SpvReflectResourceType)(SPV_REFLECT_RESOURCE_FLAG_SAMPLER | SPV_REFLECT_RESOURCE_FLAG_SRV); break; + case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_CBV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; + case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC : p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_UAV; break; + + case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: + break; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseUAVCounterBindings(SpvReflectShaderModule* p_module) +{ + char name[MAX_NODE_NAME_LENGTH]; + const char* k_count_tag = "@count"; + + for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); + + if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { + continue; + } + + SpvReflectDescriptorBinding* p_counter_descriptor = NULL; + // Use UAV counter buffer id if present... + if (p_descriptor->uav_counter_id != UINT32_MAX) { + for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count; ++counter_descriptor_index) { + SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]); + if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { + continue; + } + if (p_descriptor->uav_counter_id == p_test_counter_descriptor->spirv_id) { + p_counter_descriptor = p_test_counter_descriptor; + break; + } + } + } + // ...otherwise use old @count convention. + else { + const size_t descriptor_name_length = p_descriptor->name? strlen(p_descriptor->name): 0; + + memset(name, 0, MAX_NODE_NAME_LENGTH); + memcpy(name, p_descriptor->name, descriptor_name_length); +#if defined(WIN32) + strcat_s(name, MAX_NODE_NAME_LENGTH, k_count_tag); +#else + strcat(name, k_count_tag); +#endif + + for (uint32_t counter_descriptor_index = 0; counter_descriptor_index < p_module->descriptor_binding_count; ++counter_descriptor_index) { + SpvReflectDescriptorBinding* p_test_counter_descriptor = &(p_module->descriptor_bindings[counter_descriptor_index]); + if (p_test_counter_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { + continue; + } + if (p_test_counter_descriptor->name && strcmp(name, p_test_counter_descriptor->name) == 0) { + p_counter_descriptor = p_test_counter_descriptor; + break; + } + } + } + + if (p_counter_descriptor != NULL) { + p_descriptor->uav_counter_binding = p_counter_descriptor; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorBlockVariable( + Parser* p_parser, + SpvReflectShaderModule* p_module, + SpvReflectTypeDescription* p_type, + SpvReflectBlockVariable* p_var +) +{ + bool has_non_writable = false; + + if (IsNotNull(p_type->members) && (p_type->member_count > 0)) { + p_var->member_count = p_type->member_count; + p_var->members = (SpvReflectBlockVariable*)calloc(p_var->member_count, sizeof(*p_var->members)); + if (IsNull(p_var->members)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Resolve to element type if current type is array or run time array + if (p_type_node->op == SpvOpTypeArray) { + while (p_type_node->op == SpvOpTypeArray) { + p_type_node = FindNode(p_parser, p_type_node->array_traits.element_type_id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + } + else if(p_type_node->op == SpvOpTypeRuntimeArray) { + // Element type description + p_type = FindType(p_module, p_type_node->array_traits.element_type_id); + if (IsNull(p_type)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Element type node + p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + + // Parse members + for (uint32_t member_index = 0; member_index < p_type->member_count; ++member_index) { + SpvReflectTypeDescription* p_member_type = &p_type->members[member_index]; + SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; + bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; + if (is_struct) { + SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_member_type, p_member_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + + p_member_var->name = p_type_node->member_names[member_index]; + p_member_var->offset = p_type_node->member_decorations[member_index].offset.value; + p_member_var->decoration_flags = ApplyDecorations(&p_type_node->member_decorations[member_index]); + p_member_var->flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED; + if (!has_non_writable && (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE)) { + has_non_writable = true; + } + ApplyNumericTraits(p_member_type, &p_member_var->numeric); + if (p_member_type->op == SpvOpTypeArray) { + ApplyArrayTraits(p_member_type, &p_member_var->array); + } + + p_member_var->type_description = p_member_type; + } + } + + p_var->name = p_type->type_name; + p_var->type_description = p_type; + if (has_non_writable) { + p_var->decoration_flags |= SPV_REFLECT_DECORATION_NON_WRITABLE; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorBlockVariableSizes( + Parser* p_parser, + SpvReflectShaderModule* p_module, + bool is_parent_root, + bool is_parent_aos, + bool is_parent_rta, + SpvReflectBlockVariable* p_var +) +{ + if (p_var->member_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + // Absolute offsets + for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) { + SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; + if (is_parent_root) { + p_member_var->absolute_offset = p_member_var->offset; + } + else { + p_member_var->absolute_offset = is_parent_aos ? 0 : p_member_var->offset + p_var->absolute_offset; + } + } + + // Size + for (uint32_t member_index = 0; member_index < p_var->member_count; ++member_index) { + SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; + SpvReflectTypeDescription* p_member_type = p_member_var->type_description; + + switch (p_member_type->op) { + case SpvOpTypeBool: { + p_member_var->size = SPIRV_WORD_SIZE; + } + break; + + case SpvOpTypeInt: + case SpvOpTypeFloat: { + p_member_var->size = p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH; + } + break; + + case SpvOpTypeVector: { + uint32_t size = p_member_type->traits.numeric.vector.component_count * + (p_member_type->traits.numeric.scalar.width / SPIRV_BYTE_WIDTH); + p_member_var->size = size; + } + break; + + case SpvOpTypeMatrix: { + if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_COLUMN_MAJOR) { + p_member_var->size = p_member_var->numeric.matrix.column_count * p_member_var->numeric.matrix.stride; + } + else if (p_member_var->decoration_flags & SPV_REFLECT_DECORATION_ROW_MAJOR) { + p_member_var->size = p_member_var->numeric.matrix.row_count * p_member_var->numeric.matrix.stride; + } + } + break; + + case SpvOpTypeArray: { + // If array of structs, parse members first... + bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; + if (is_struct) { + SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, is_parent_rta, p_member_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + // ...then array + uint32_t element_count = (p_member_var->array.dims_count > 0 ? 1 : 0); + for (uint32_t i = 0; i < p_member_var->array.dims_count; ++i) { + element_count *= p_member_var->array.dims[i]; + } + p_member_var->size = element_count * p_member_var->array.stride; + } + break; + + case SpvOpTypeRuntimeArray: { + bool is_struct = (p_member_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) == SPV_REFLECT_TYPE_FLAG_STRUCT; + if (is_struct) { + SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, true, true, p_member_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + } + break; + + case SpvOpTypeStruct: { + SpvReflectResult result = ParseDescriptorBlockVariableSizes(p_parser, p_module, false, is_parent_aos, is_parent_rta, p_member_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + break; + + default: + break; + } + } + + // Parse padded size using offset difference for all member except for the last entry... + for (uint32_t member_index = 0; member_index < (p_var->member_count - 1); ++member_index) { + SpvReflectBlockVariable* p_member_var = &p_var->members[member_index]; + SpvReflectBlockVariable* p_next_member_var = &p_var->members[member_index + 1]; + p_member_var->padded_size = p_next_member_var->offset - p_member_var->offset; + if (p_member_var->size > p_member_var->padded_size) { + p_member_var->size = p_member_var->padded_size; + } + if (is_parent_rta) { + p_member_var->padded_size = p_member_var->size; + } + } + // ...last entry just gets rounded up to near multiple of SPIRV_DATA_ALIGNMENT, which is 16 and + // subtract the offset. + if (p_var->member_count > 0) { + SpvReflectBlockVariable* p_member_var = &p_var->members[p_var->member_count - 1]; + p_member_var->padded_size = RoundUp(p_member_var->offset + p_member_var->size, SPIRV_DATA_ALIGNMENT) - p_member_var->offset; + if (p_member_var->size > p_member_var->padded_size) { + p_member_var->size = p_member_var->padded_size; + } + if (is_parent_rta) { + p_member_var->padded_size = p_member_var->size; + } + } + + // @TODO validate this with assertion + p_var->size = p_var->members[p_var->member_count - 1].offset + + p_var->members[p_var->member_count - 1].padded_size; + p_var->padded_size = p_var->size; + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorBlockVariableUsage( + Parser* p_parser, + SpvReflectShaderModule* p_module, + AccessChain* p_access_chain, + uint32_t index_index, + SpvOp override_op_type, + SpvReflectBlockVariable* p_var +) +{ + (void)p_parser; + (void)p_access_chain; + (void)p_var; + + // Clear the current variable's USED flag + p_var->flags &= ~SPV_REFLECT_VARIABLE_FLAGS_UNUSED; + + // Parsing arrays requires overriding the op type for + // for the lowest dim's element type. + SpvOp op_type = p_var->type_description->op; + if (override_op_type != (SpvOp)INVALID_VALUE) { + op_type = override_op_type; + } + + switch (op_type) { + default: break; + + case SpvOpTypeArray: { + // Parse through array's type hierarchy to find the actual/non-array element type + SpvReflectTypeDescription* p_type = p_var->type_description; + while ((p_type->op == SpvOpTypeArray) && (index_index < p_access_chain->index_count)) { + // Find the array element type id + Node* p_node = FindNode(p_parser, p_type->id); + if (p_node == NULL) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + uint32_t element_type_id = p_node->array_traits.element_type_id; + // Get the array element type + p_type = FindType(p_module, element_type_id); + if (p_type == NULL) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Next access index + index_index += 1; + } + // Parse current var again with a type override and advanced index index + SpvReflectResult result = ParseDescriptorBlockVariableUsage( + p_parser, + p_module, + p_access_chain, + index_index, + p_type->op, + p_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + break; + + case SpvOpTypeStruct: { + assert(p_var->member_count > 0); + if (p_var->member_count == 0) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA; + } + + uint32_t index = p_access_chain->indexes[index_index]; + + if (index >= p_var->member_count) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE; + } + + SpvReflectBlockVariable* p_member_var = &p_var->members[index]; + if (index_index < p_access_chain->index_count) { + SpvReflectResult result = ParseDescriptorBlockVariableUsage( + p_parser, + p_module, + p_access_chain, + index_index + 1, + (SpvOp)INVALID_VALUE, + p_member_var); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + } + break; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorBlocks(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + if (p_module->descriptor_binding_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); + SpvReflectTypeDescription* p_type = p_descriptor->type_description; + if ((p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER) && + (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) ) + { + continue; + } + + // Mark UNUSED + p_descriptor->block.flags |= SPV_REFLECT_VARIABLE_FLAGS_UNUSED; + // Parse descriptor block + SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, &p_descriptor->block); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + for (uint32_t access_chain_index = 0; access_chain_index < p_parser->access_chain_count; ++access_chain_index) { + AccessChain* p_access_chain = &(p_parser->access_chains[access_chain_index]); + // Skip any access chains that aren't touching this descriptor block + if (p_descriptor->spirv_id != p_access_chain->base_id) { + continue; + } + result = ParseDescriptorBlockVariableUsage( + p_parser, + p_module, + p_access_chain, + 0, + (SpvOp)INVALID_VALUE, + &p_descriptor->block); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + + p_descriptor->block.name = p_descriptor->name; + + bool is_parent_rta = (p_descriptor->descriptor_type == SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER); + result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, is_parent_rta, &p_descriptor->block); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + if (is_parent_rta) { + p_descriptor->block.size = 0; + p_descriptor->block.padded_size = 0; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseFormat( + const SpvReflectTypeDescription* p_type, + SpvReflectFormat* p_format +) +{ + SpvReflectResult result = SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR; + bool signedness = p_type->traits.numeric.scalar.signedness; + if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_VECTOR) { + uint32_t component_count = p_type->traits.numeric.vector.component_count; + if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) { + switch (component_count) { + case 2: *p_format = SPV_REFLECT_FORMAT_R32G32_SFLOAT; break; + case 3: *p_format = SPV_REFLECT_FORMAT_R32G32B32_SFLOAT; break; + case 4: *p_format = SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT; break; + } + result = SPV_REFLECT_RESULT_SUCCESS; + } + else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) { + switch (component_count) { + case 2: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32_SINT : SPV_REFLECT_FORMAT_R32G32_UINT; break; + case 3: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32_SINT : SPV_REFLECT_FORMAT_R32G32B32_UINT; break; + case 4: *p_format = signedness ? SPV_REFLECT_FORMAT_R32G32B32A32_SINT : SPV_REFLECT_FORMAT_R32G32B32A32_UINT; break; + } + result = SPV_REFLECT_RESULT_SUCCESS; + } + } + else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_FLOAT) { + *p_format = SPV_REFLECT_FORMAT_R32_SFLOAT; + result = SPV_REFLECT_RESULT_SUCCESS; + } + else if (p_type->type_flags & (SPV_REFLECT_TYPE_FLAG_INT | SPV_REFLECT_TYPE_FLAG_BOOL)) { + if (signedness) { + *p_format = SPV_REFLECT_FORMAT_R32_SINT; + result = SPV_REFLECT_RESULT_SUCCESS; + } + else { + *p_format = SPV_REFLECT_FORMAT_R32_UINT; + result = SPV_REFLECT_RESULT_SUCCESS; + } + } + else if (p_type->type_flags & SPV_REFLECT_TYPE_FLAG_STRUCT) { + *p_format = SPV_REFLECT_FORMAT_UNDEFINED; + result = SPV_REFLECT_RESULT_SUCCESS; + } + return result; +} + +static SpvReflectResult ParseInterfaceVariable( + Parser* p_parser, + const Decorations* p_type_node_decorations, + SpvReflectShaderModule* p_module, + SpvReflectTypeDescription* p_type, + SpvReflectInterfaceVariable* p_var, + bool* p_has_built_in +) +{ + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + if (p_type->member_count > 0) { + p_var->member_count = p_type->member_count; + p_var->members = (SpvReflectInterfaceVariable*)calloc(p_var->member_count, sizeof(*p_var->members)); + if (IsNull(p_var->members)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + for (uint32_t member_index = 0; member_index < p_type_node->member_count; ++member_index) { + Decorations* p_member_decorations = &p_type_node->member_decorations[member_index]; + SpvReflectTypeDescription* p_member_type = &p_type->members[member_index]; + SpvReflectInterfaceVariable* p_member_var = &p_var->members[member_index]; + SpvReflectResult result = ParseInterfaceVariable(p_parser, p_member_decorations, p_module, p_member_type, p_member_var, p_has_built_in); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + } + + p_var->name = p_type_node->name; + p_var->decoration_flags = ApplyDecorations(p_type_node_decorations); + p_var->built_in = p_type_node_decorations->built_in; + ApplyNumericTraits(p_type, &p_var->numeric); + if (p_type->op == SpvOpTypeArray) { + ApplyArrayTraits(p_type, &p_var->array); + } + + p_var->type_description = p_type; + + *p_has_built_in |= p_type_node_decorations->is_built_in; + + SpvReflectResult result = ParseFormat(p_var->type_description, &p_var->format); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseInterfaceVariables( + Parser* p_parser, + SpvReflectShaderModule* p_module, + SpvReflectEntryPoint* p_entry, + size_t io_var_count, + uint32_t* io_vars +) +{ + if (io_var_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_entry->input_variable_count = 0; + p_entry->output_variable_count = 0; + for (size_t i = 0; i < io_var_count; ++i) { + uint32_t var_result_id = *(io_vars + i); + Node* p_node = FindNode(p_parser, var_result_id); + if (IsNull(p_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + if (p_node->storage_class == SpvStorageClassInput) { + p_entry->input_variable_count += 1; + } + else if (p_node->storage_class == SpvStorageClassOutput) { + p_entry->output_variable_count += 1; + } + } + + if (p_entry->input_variable_count > 0) { + p_entry->input_variables = (SpvReflectInterfaceVariable*)calloc(p_entry->input_variable_count, sizeof(*(p_entry->input_variables))); + if (IsNull(p_entry->input_variables)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + + if (p_entry->output_variable_count > 0) { + p_entry->output_variables = (SpvReflectInterfaceVariable*)calloc(p_entry->output_variable_count, sizeof(*(p_entry->output_variables))); + if (IsNull(p_entry->output_variables)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + size_t input_index = 0; + size_t output_index = 0; + for (size_t i = 0; i < io_var_count; ++i) { + uint32_t var_result_id = *(io_vars + i); + Node* p_node = FindNode(p_parser, var_result_id); + if (IsNull(p_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); + if (IsNull(p_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // If the type is a pointer, resolve it + if (p_type->op == SpvOpTypePointer) { + // Find the type's node + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Should be the resolved type + p_type = FindType(p_module, p_type_node->type_id); + if (IsNull(p_type)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + SpvReflectInterfaceVariable* p_var = NULL; + if (p_node->storage_class == SpvStorageClassInput) { + p_var = &(p_entry->input_variables[input_index]); + p_var->storage_class = SpvStorageClassInput; + ++input_index; + } + else if (p_node->storage_class == SpvStorageClassOutput) { + p_var = &(p_entry->output_variables[output_index]); + p_var->storage_class = SpvStorageClassOutput; + ++output_index; + } else { + // interface variables can only have input or output storage classes; + // anything else is either a new addition or an error. + assert(false && "Unsupported storage class for interface variable"); + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_STORAGE_CLASS; + } + + bool has_built_in = p_node->decorations.is_built_in; + SpvReflectResult result = ParseInterfaceVariable( + p_parser, + &p_type_node->decorations, + p_module, + p_type, + p_var, + &has_built_in); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + // SPIR-V result id + p_var->spirv_id = p_node->result_id; + // Name + p_var->name = p_node->name; + // Semantic + p_var->semantic = p_node->decorations.semantic.value; + + // Decorate with built-in if any member is built-in + if (has_built_in) { + p_var->decoration_flags |= SPV_REFLECT_DECORATION_BUILT_IN; + } + + // Location is decorated on OpVariable node, not the type node. + p_var->location = p_node->decorations.location.value; + p_var->word_offset.location = p_node->decorations.location.word_offset; + + // Built in + if (p_node->decorations.is_built_in) { + p_var->built_in = p_node->decorations.built_in; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult EnumerateAllPushConstants( + SpvReflectShaderModule* p_module, + size_t* p_push_constant_count, + uint32_t** p_push_constants +) +{ + *p_push_constant_count = p_module->push_constant_block_count; + if (*p_push_constant_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + *p_push_constants = (uint32_t*)calloc(*p_push_constant_count, sizeof(**p_push_constants)); + + if (IsNull(*p_push_constants)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + for (size_t i = 0; i < *p_push_constant_count; ++i) { + (*p_push_constants)[i] = p_module->push_constant_blocks[i].spirv_id; + } + qsort(*p_push_constants, *p_push_constant_count, sizeof(**p_push_constants), + SortCompareUint32); + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult TraverseCallGraph( + Parser* p_parser, + Function* p_func, + size_t* p_func_count, + uint32_t* p_func_ids, + uint32_t depth +) +{ + if (depth > p_parser->function_count) { + // Vulkan does not permit recursion (Vulkan spec Appendix A): + // "Recursion: The static function-call graph for an entry point must not + // contain cycles." + return SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION; + } + if (IsNotNull(p_func_ids)) { + p_func_ids[(*p_func_count)++] = p_func->id; + } else { + ++*p_func_count; + } + for (size_t i = 0; i < p_func->callee_count; ++i) { + SpvReflectResult result = TraverseCallGraph( + p_parser, p_func->callee_ptrs[i], p_func_count, p_func_ids, depth + 1); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseStaticallyUsedResources( + Parser* p_parser, + SpvReflectShaderModule* p_module, + SpvReflectEntryPoint* p_entry, + size_t uniform_count, + uint32_t* uniforms, + size_t push_constant_count, + uint32_t* push_constants +) +{ + // Find function with the right id + Function* p_func = NULL; + for (size_t i = 0; i < p_parser->function_count; ++i) { + if (p_parser->functions[i].id == p_entry->id) { + p_func = &(p_parser->functions[i]); + break; + } + } + if (p_func == NULL) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + size_t called_function_count = 0; + SpvReflectResult result = TraverseCallGraph( + p_parser, + p_func, + &called_function_count, + NULL, + 0); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + uint32_t* p_called_functions = NULL; + if (called_function_count > 0) { + p_called_functions = (uint32_t*)calloc(called_function_count, sizeof(*p_called_functions)); + if (IsNull(p_called_functions)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + called_function_count = 0; + result = TraverseCallGraph( + p_parser, + p_func, + &called_function_count, + p_called_functions, + 0); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + if (called_function_count > 0) { + qsort( + p_called_functions, + called_function_count, + sizeof(*p_called_functions), + SortCompareUint32); + } + called_function_count = DedupSortedUint32(p_called_functions, called_function_count); + + uint32_t used_variable_count = 0; + for (size_t i = 0, j = 0; i < called_function_count; ++i) { + // No need to bounds check j because a missing ID issue would have been + // found during TraverseCallGraph + while (p_parser->functions[j].id != p_called_functions[i]) { + ++j; + } + used_variable_count += p_parser->functions[j].accessed_ptr_count; + } + uint32_t* used_variables = NULL; + if (used_variable_count > 0) { + used_variables = (uint32_t*)calloc(used_variable_count, + sizeof(*used_variables)); + if (IsNull(used_variables)) { + SafeFree(p_called_functions); + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + used_variable_count = 0; + for (size_t i = 0, j = 0; i < called_function_count; ++i) { + while (p_parser->functions[j].id != p_called_functions[i]) { + ++j; + } + + memcpy(&used_variables[used_variable_count], + p_parser->functions[j].accessed_ptrs, + p_parser->functions[j].accessed_ptr_count * sizeof(*used_variables)); + used_variable_count += p_parser->functions[j].accessed_ptr_count; + } + SafeFree(p_called_functions); + + if (used_variable_count > 0) { + qsort(used_variables, used_variable_count, sizeof(*used_variables), + SortCompareUint32); + } + used_variable_count = (uint32_t)DedupSortedUint32(used_variables, + used_variable_count); + + // Do set intersection to find the used uniform and push constants + size_t used_uniform_count = 0; + // + SpvReflectResult result0 = IntersectSortedUint32( + used_variables, + used_variable_count, + uniforms, + uniform_count, + &p_entry->used_uniforms, + &used_uniform_count); + + size_t used_push_constant_count = 0; + // + SpvReflectResult result1 = IntersectSortedUint32( + used_variables, + used_variable_count, + push_constants, + push_constant_count, + &p_entry->used_push_constants, + &used_push_constant_count); + + for (uint32_t j = 0; j < p_module->descriptor_binding_count; ++j) { + SpvReflectDescriptorBinding* p_binding = &p_module->descriptor_bindings[j]; + bool found = SearchSortedUint32( + used_variables, + used_variable_count, + p_binding->spirv_id); + if (found) { + p_binding->accessed = 1; + } + } + + SafeFree(used_variables); + if (result0 != SPV_REFLECT_RESULT_SUCCESS) { + return result0; + } + if (result1 != SPV_REFLECT_RESULT_SUCCESS) { + return result1; + } + + p_entry->used_uniform_count = (uint32_t)used_uniform_count; + p_entry->used_push_constant_count = (uint32_t)used_push_constant_count; + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseEntryPoints(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + if (p_parser->entry_point_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_module->entry_point_count = p_parser->entry_point_count; + p_module->entry_points = (SpvReflectEntryPoint*)calloc(p_module->entry_point_count, + sizeof(*(p_module->entry_points))); + if (IsNull(p_module->entry_points)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + SpvReflectResult result; + size_t uniform_count = 0; + uint32_t* uniforms = NULL; + if ((result = EnumerateAllUniforms(p_module, &uniform_count, &uniforms)) != + SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + size_t push_constant_count = 0; + uint32_t* push_constants = NULL; + if ((result = EnumerateAllPushConstants(p_module, &push_constant_count, &push_constants)) != + SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + size_t entry_point_index = 0; + for (size_t i = 0; entry_point_index < p_parser->entry_point_count && i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if (p_node->op != SpvOpEntryPoint) { + continue; + } + + SpvReflectEntryPoint* p_entry_point = &(p_module->entry_points[entry_point_index]); + CHECKED_READU32_CAST(p_parser, p_node->word_offset + 1, SpvExecutionModel, p_entry_point->spirv_execution_model); + CHECKED_READU32(p_parser, p_node->word_offset + 2, p_entry_point->id); + + switch (p_entry_point->spirv_execution_model) { + default: break; + case SpvExecutionModelVertex : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_VERTEX_BIT; break; + case SpvExecutionModelTessellationControl : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT; break; + case SpvExecutionModelTessellationEvaluation : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; break; + case SpvExecutionModelGeometry : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT; break; + case SpvExecutionModelFragment : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT; break; + case SpvExecutionModelGLCompute : p_entry_point->shader_stage = SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT; break; + } + + ++entry_point_index; + + // Name length is required to calculate next operand + uint32_t name_start_word_offset = 3; + uint32_t name_length_with_terminator = 0; + result = ReadStr(p_parser, p_node->word_offset + name_start_word_offset, 0, p_node->word_count, &name_length_with_terminator, NULL); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + p_entry_point->name = (const char*)(p_parser->spirv_code + p_node->word_offset + name_start_word_offset); + + uint32_t name_word_count = RoundUp(name_length_with_terminator, SPIRV_WORD_SIZE) / SPIRV_WORD_SIZE; + size_t interface_variable_count = (p_node->word_count - (name_start_word_offset + name_word_count)); + uint32_t* interface_variables = NULL; + if (interface_variable_count > 0) { + interface_variables = (uint32_t*)calloc(interface_variable_count, sizeof(*(interface_variables))); + if (IsNull(interface_variables)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + + for (uint32_t var_index = 0; var_index < interface_variable_count; ++var_index) { + uint32_t var_result_id = (uint32_t)INVALID_VALUE; + uint32_t offset = name_start_word_offset + name_word_count + var_index; + CHECKED_READU32(p_parser, p_node->word_offset + offset, var_result_id); + interface_variables[var_index] = var_result_id; + } + + result = ParseInterfaceVariables( + p_parser, + p_module, + p_entry_point, + interface_variable_count, + interface_variables); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + SafeFree(interface_variables); + + result = ParseStaticallyUsedResources( + p_parser, + p_module, + p_entry_point, + uniform_count, + uniforms, + push_constant_count, + push_constants); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + } + + SafeFree(uniforms); + SafeFree(push_constants); + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParsePushConstantBlocks(Parser* p_parser, SpvReflectShaderModule* p_module) +{ + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) { + continue; + } + + p_module->push_constant_block_count += 1; + } + + if (p_module->push_constant_block_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + p_module->push_constant_blocks = (SpvReflectBlockVariable*)calloc(p_module->push_constant_block_count, sizeof(*p_module->push_constant_blocks)); + if (IsNull(p_module->push_constant_blocks)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + + uint32_t push_constant_index = 0; + for (size_t i = 0; i < p_parser->node_count; ++i) { + Node* p_node = &(p_parser->nodes[i]); + if ((p_node->op != SpvOpVariable) || (p_node->storage_class != SpvStorageClassPushConstant)) { + continue; + } + + SpvReflectTypeDescription* p_type = FindType(p_module, p_node->type_id); + if (IsNull(p_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // If the type is a pointer, resolve it + if (p_type->op == SpvOpTypePointer) { + // Find the type's node + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + // Should be the resolved type + p_type = FindType(p_module, p_type_node->type_id); + if (IsNull(p_type)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + } + + Node* p_type_node = FindNode(p_parser, p_type->id); + if (IsNull(p_type_node)) { + return SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE; + } + + SpvReflectBlockVariable* p_push_constant = &p_module->push_constant_blocks[push_constant_index]; + p_push_constant->spirv_id = p_node->result_id; + SpvReflectResult result = ParseDescriptorBlockVariable(p_parser, p_module, p_type, p_push_constant); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + result = ParseDescriptorBlockVariableSizes(p_parser, p_module, true, false, false, p_push_constant); + if (result != SPV_REFLECT_RESULT_SUCCESS) { + return result; + } + + ++push_constant_index; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static int SortCompareDescriptorSet(const void* a, const void* b) +{ + const SpvReflectDescriptorSet* p_elem_a = (const SpvReflectDescriptorSet*)a; + const SpvReflectDescriptorSet* p_elem_b = (const SpvReflectDescriptorSet*)b; + int value = (int)(p_elem_a->set) - (int)(p_elem_b->set); + // We should never see duplicate descriptor set numbers in a shader; if so, a tiebreaker + // would be needed here. + assert(value != 0); + return value; +} + +static SpvReflectResult ParseEntrypointDescriptorSets(SpvReflectShaderModule* p_module) { + // Update the entry point's sets + for (uint32_t i = 0; i < p_module->entry_point_count; ++i) { + SpvReflectEntryPoint* p_entry = &p_module->entry_points[i]; + for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) { + SafeFree(p_entry->descriptor_sets[j].bindings); + } + SafeFree(p_entry->descriptor_sets); + p_entry->descriptor_set_count = 0; + for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) { + const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; + for (uint32_t k = 0; k < p_set->binding_count; ++k) { + bool found = SearchSortedUint32( + p_entry->used_uniforms, + p_entry->used_uniform_count, + p_set->bindings[k]->spirv_id); + if (found) { + ++p_entry->descriptor_set_count; + break; + } + } + } + + p_entry->descriptor_sets = NULL; + if (p_entry->descriptor_set_count > 0) { + p_entry->descriptor_sets = (SpvReflectDescriptorSet*)calloc(p_entry->descriptor_set_count, + sizeof(*p_entry->descriptor_sets)); + if (IsNull(p_entry->descriptor_sets)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + } + p_entry->descriptor_set_count = 0; + for (uint32_t j = 0; j < p_module->descriptor_set_count; ++j) { + const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; + uint32_t count = 0; + for (uint32_t k = 0; k < p_set->binding_count; ++k) { + bool found = SearchSortedUint32( + p_entry->used_uniforms, + p_entry->used_uniform_count, + p_set->bindings[k]->spirv_id); + if (found) { + ++count; + } + } + if (count == 0) { + continue; + } + SpvReflectDescriptorSet* p_entry_set = &p_entry->descriptor_sets[ + p_entry->descriptor_set_count++]; + p_entry_set->set = p_set->set; + p_entry_set->bindings = (SpvReflectDescriptorBinding**)calloc(count, + sizeof(*p_entry_set->bindings)); + if (IsNull(p_entry_set->bindings)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + for (uint32_t k = 0; k < p_set->binding_count; ++k) { + bool found = SearchSortedUint32( + p_entry->used_uniforms, + p_entry->used_uniform_count, + p_set->bindings[k]->spirv_id); + if (found) { + p_entry_set->bindings[p_entry_set->binding_count++] = p_set->bindings[k]; + } + } + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult ParseDescriptorSets(SpvReflectShaderModule* p_module) +{ + // Count the descriptors in each set + for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[i]); + + // Look for a target set using the descriptor's set number + SpvReflectDescriptorSet* p_target_set = NULL; + for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) { + SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; + if (p_set->set == p_descriptor->set) { + p_target_set = p_set; + break; + } + } + + // If a target set isn't found, find the first available one. + if (IsNull(p_target_set)) { + for (uint32_t j = 0; j < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++j) { + SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[j]; + if (p_set->set == (uint32_t)INVALID_VALUE) { + p_target_set = p_set; + p_target_set->set = p_descriptor->set; + break; + } + } + } + + if (IsNull(p_target_set)) { + return SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR; + } + + p_target_set->binding_count += 1; + } + + // Count the descriptor sets + for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) { + const SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; + if (p_set->set != (uint32_t)INVALID_VALUE) { + p_module->descriptor_set_count += 1; + } + } + + // Sort the descriptor sets based on numbers + if (p_module->descriptor_set_count > 0) { + qsort(p_module->descriptor_sets, + p_module->descriptor_set_count, + sizeof(*(p_module->descriptor_sets)), + SortCompareDescriptorSet); + } + + // Build descriptor pointer array + for (uint32_t i = 0; i descriptor_set_count; ++i) { + SpvReflectDescriptorSet* p_set = &(p_module->descriptor_sets[i]); + p_set->bindings = (SpvReflectDescriptorBinding **)calloc(p_set->binding_count, sizeof(*(p_set->bindings))); + + uint32_t descriptor_index = 0; + for (uint32_t j = 0; j < p_module->descriptor_binding_count; ++j) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[j]); + if (p_descriptor->set == p_set->set) { + assert(descriptor_index < p_set->binding_count); + p_set->bindings[descriptor_index] = p_descriptor; + ++descriptor_index; + } + } + } + + return ParseEntrypointDescriptorSets(p_module); +} + +static SpvReflectResult DisambiguateStorageBufferSrvUav(SpvReflectShaderModule* p_module) +{ + if (p_module->descriptor_binding_count == 0) { + return SPV_REFLECT_RESULT_SUCCESS; + } + + for (uint32_t descriptor_index = 0; descriptor_index < p_module->descriptor_binding_count; ++descriptor_index) { + SpvReflectDescriptorBinding* p_descriptor = &(p_module->descriptor_bindings[descriptor_index]); + // Skip everything that isn't a STORAGE_BUFFER descriptor + if (p_descriptor->descriptor_type != SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER) { + continue; + } + + // + // Vulkan doesn't disambiguate between SRVs and UAVs so they + // come back as STORAGE_BUFFER. The block parsing process will + // mark a block as non-writable should any member of the block + // or its descendants are non-writable. + // + if (p_descriptor->block.decoration_flags & SPV_REFLECT_DECORATION_NON_WRITABLE) { + p_descriptor->resource_type = SPV_REFLECT_RESOURCE_FLAG_SRV; + } + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +static SpvReflectResult SynchronizeDescriptorSets(SpvReflectShaderModule* p_module) +{ + // Free and reset all descriptor set numbers + for (uint32_t i = 0; i < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++i) { + SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; + SafeFree(p_set->bindings); + p_set->binding_count = 0; + p_set->set = (uint32_t)INVALID_VALUE; + } + // Set descriptor set count to zero + p_module->descriptor_set_count = 0; + + SpvReflectResult result = ParseDescriptorSets(p_module); + return result; +} + +SpvReflectResult spvReflectGetShaderModule( + size_t size, + const void* p_code, + SpvReflectShaderModule* p_module +) +{ + return spvReflectCreateShaderModule(size, p_code, p_module); +} + +SpvReflectResult spvReflectCreateShaderModule( + size_t size, + const void* p_code, + SpvReflectShaderModule* p_module +) +{ + // Initialize all module fields to zero + memset(p_module, 0, sizeof(*p_module)); + + // Allocate module internals +#ifdef __cplusplus + p_module->_internal = (SpvReflectShaderModule::Internal*)calloc(1, sizeof(*(p_module->_internal))); +#else + p_module->_internal = calloc(1, sizeof(*(p_module->_internal))); +#endif + if (IsNull(p_module->_internal)) { + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + // Allocate SPIR-V code storage + p_module->_internal->spirv_size = size; + p_module->_internal->spirv_code = (uint32_t*)calloc(1, p_module->_internal->spirv_size); + p_module->_internal->spirv_word_count = (uint32_t)(size / SPIRV_WORD_SIZE); + if (IsNull(p_module->_internal->spirv_code)) { + SafeFree(p_module->_internal); + return SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED; + } + memcpy(p_module->_internal->spirv_code, p_code, size); + + Parser parser = { 0 }; + SpvReflectResult result = CreateParser(p_module->_internal->spirv_size, + p_module->_internal->spirv_code, + &parser); + + // Generator + { + const uint32_t* p_ptr = (const uint32_t*)p_module->_internal->spirv_code; + p_module->generator = (SpvReflectGenerator)((*(p_ptr + 2) & 0xFFFF0000) >> 16); + } + + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseNodes(&parser); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseStrings(&parser); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseSource(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseFunctions(&parser); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseMemberCounts(&parser); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseNames(&parser); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseDecorations(&parser); + } + + // Start of reflection data parsing + if (result == SPV_REFLECT_RESULT_SUCCESS) { + p_module->source_language = parser.source_language; + p_module->source_language_version = parser.source_language_version; + + // Zero out descriptor set data + p_module->descriptor_set_count = 0; + memset(p_module->descriptor_sets, 0, SPV_REFLECT_MAX_DESCRIPTOR_SETS * sizeof(*p_module->descriptor_sets)); + // Initialize descriptor set numbers + for (uint32_t set_number = 0; set_number < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++set_number) { + p_module->descriptor_sets[set_number].set = (uint32_t)INVALID_VALUE; + } + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseTypes(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseDescriptorBindings(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseDescriptorType(p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseUAVCounterBindings(p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseDescriptorBlocks(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParsePushConstantBlocks(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = ParseEntryPoints(&parser, p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS && p_module->entry_point_count > 0) { + SpvReflectEntryPoint* p_entry = &(p_module->entry_points[0]); + p_module->entry_point_name = p_entry->name; + p_module->entry_point_id = p_entry->id; + p_module->spirv_execution_model = p_entry->spirv_execution_model; + p_module->shader_stage = p_entry->shader_stage; + p_module->input_variable_count = p_entry->input_variable_count; + p_module->input_variables = p_entry->input_variables; + p_module->output_variable_count = p_entry->output_variable_count; + p_module->output_variables = p_entry->output_variables; + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = DisambiguateStorageBufferSrvUav(p_module); + } + if (result == SPV_REFLECT_RESULT_SUCCESS) { + result = SynchronizeDescriptorSets(p_module); + } + + // Destroy module if parse was not successful + if (result != SPV_REFLECT_RESULT_SUCCESS) { + spvReflectDestroyShaderModule(p_module); + } + + DestroyParser(&parser); + + return result; +} + +static void SafeFreeTypes(SpvReflectTypeDescription* p_type) +{ + if (IsNull(p_type)) { + return; + } + + if (IsNotNull(p_type->members)) { + for (size_t i = 0; i < p_type->member_count; ++i) { + SpvReflectTypeDescription* p_member = &p_type->members[i]; + SafeFreeTypes(p_member); + } + + SafeFree(p_type->members); + p_type->members = NULL; + } +} + +static void SafeFreeBlockVariables(SpvReflectBlockVariable* p_block) +{ + if (IsNull(p_block)) { + return; + } + + if (IsNotNull(p_block->members)) { + for (size_t i = 0; i < p_block->member_count; ++i) { + SpvReflectBlockVariable* p_member = &p_block->members[i]; + SafeFreeBlockVariables(p_member); + } + + SafeFree(p_block->members); + p_block->members = NULL; + } +} + +static void SafeFreeInterfaceVariable(SpvReflectInterfaceVariable* p_interface) +{ + if (IsNull(p_interface)) { + return; + } + + if (IsNotNull(p_interface->members)) { + for (size_t i = 0; i < p_interface->member_count; ++i) { + SpvReflectInterfaceVariable* p_member = &p_interface->members[i]; + SafeFreeInterfaceVariable(p_member); + } + + SafeFree(p_interface->members); + p_interface->members = NULL; + } +} + +void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module) +{ + if (IsNull(p_module->_internal)) { + return; + } + + // Descriptor set bindings + for (size_t i = 0; i < p_module->descriptor_set_count; ++i) { + SpvReflectDescriptorSet* p_set = &p_module->descriptor_sets[i]; + free(p_set->bindings); + } + + // Descriptor binding blocks + for (size_t i = 0; i < p_module->descriptor_binding_count; ++i) { + SpvReflectDescriptorBinding* p_descriptor = &p_module->descriptor_bindings[i]; + SafeFreeBlockVariables(&p_descriptor->block); + } + SafeFree(p_module->descriptor_bindings); + + // Entry points + for (size_t i = 0; i < p_module->entry_point_count; ++i) { + SpvReflectEntryPoint* p_entry = &p_module->entry_points[i]; + for (size_t j = 0; j < p_entry->input_variable_count; j++) { + SafeFreeInterfaceVariable(&p_entry->input_variables[j]); + } + for (size_t j = 0; j < p_entry->output_variable_count; j++) { + SafeFreeInterfaceVariable(&p_entry->output_variables[j]); + } + for (uint32_t j = 0; j < p_entry->descriptor_set_count; ++j) { + SafeFree(p_entry->descriptor_sets[j].bindings); + } + SafeFree(p_entry->descriptor_sets); + SafeFree(p_entry->input_variables); + SafeFree(p_entry->output_variables); + SafeFree(p_entry->used_uniforms); + SafeFree(p_entry->used_push_constants); + } + SafeFree(p_module->entry_points); + + // Push constants + for (size_t i = 0; i < p_module->push_constant_block_count; ++i) { + SafeFreeBlockVariables(&p_module->push_constant_blocks[i]); + } + SafeFree(p_module->push_constant_blocks); + + // Type infos + for (size_t i = 0; i < p_module->_internal->type_description_count; ++i) { + SpvReflectTypeDescription* p_type = &p_module->_internal->type_descriptions[i]; + if (IsNotNull(p_type->members)) { + SafeFreeTypes(p_type); + } + SafeFree(p_type->members); + } + SafeFree(p_module->_internal->type_descriptions); + + // Free SPIR-V code + SafeFree(p_module->_internal->spirv_code); + // Free internal + SafeFree(p_module->_internal); +} + +uint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module) +{ + if (IsNull(p_module)) { + return 0; + } + + return (uint32_t)(p_module->_internal->spirv_size); +} + +const uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module) +{ + if (IsNull(p_module)) { + return NULL; + } + + return p_module->_internal->spirv_code; +} + +const SpvReflectEntryPoint* spvReflectGetEntryPoint( + const SpvReflectShaderModule* p_module, + const char* entry_point +) { + if (IsNull(p_module) || IsNull(entry_point)) { + return NULL; + } + + for (uint32_t i = 0; i < p_module->entry_point_count; ++i) { + if (strcmp(p_module->entry_points[i].name, entry_point) == 0) { + return &p_module->entry_points[i]; + } + } + return NULL; +} + +SpvReflectResult spvReflectEnumerateDescriptorBindings( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (IsNotNull(pp_bindings)) { + if (*p_count != p_module->descriptor_binding_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectDescriptorBinding* p_bindings = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[index]; + pp_bindings[index] = p_bindings; + } + } + else { + *p_count = p_module->descriptor_binding_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + + uint32_t count = 0; + for (uint32_t i = 0; i < p_module->descriptor_binding_count; ++i) { + bool found = SearchSortedUint32( + p_entry->used_uniforms, + p_entry->used_uniform_count, + p_module->descriptor_bindings[i].spirv_id); + if (found) { + if (IsNotNull(pp_bindings)) { + if (count >= *p_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + pp_bindings[count++] = (SpvReflectDescriptorBinding*)&p_module->descriptor_bindings[i]; + } else { + ++count; + } + } + } + if (IsNotNull(pp_bindings)) { + if (count != *p_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + } else { + *p_count = count; + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateDescriptorSets( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (IsNotNull(pp_sets)) { + if (*p_count != p_module->descriptor_set_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_module->descriptor_sets[index]; + pp_sets[index] = p_set; + } + } + else { + *p_count = p_module->descriptor_set_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateEntryPointDescriptorSets( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + + if (IsNotNull(pp_sets)) { + if (*p_count != p_entry->descriptor_set_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectDescriptorSet* p_set = (SpvReflectDescriptorSet*)&p_entry->descriptor_sets[index]; + pp_sets[index] = p_set; + } + } + else { + *p_count = p_entry->descriptor_set_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateInputVariables( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (IsNotNull(pp_variables)) { + if (*p_count != p_module->input_variable_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectInterfaceVariable* p_var = (SpvReflectInterfaceVariable*)&p_module->input_variables[index]; + pp_variables[index] = p_var; + } + } + else { + *p_count = p_module->input_variable_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateEntryPointInputVariables( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + + if (IsNotNull(pp_variables)) { + if (*p_count != p_entry->input_variable_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectInterfaceVariable* p_var = (SpvReflectInterfaceVariable*)&p_entry->input_variables[index]; + pp_variables[index] = p_var; + } + } + else { + *p_count = p_entry->input_variable_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateOutputVariables( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (IsNotNull(pp_variables)) { + if (*p_count != p_module->output_variable_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectInterfaceVariable* p_var = (SpvReflectInterfaceVariable*)&p_module->output_variables[index]; + pp_variables[index] = p_var; + } + } + else { + *p_count = p_module->output_variable_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumerateEntryPointOutputVariables( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + + if (IsNotNull(pp_variables)) { + if (*p_count != p_entry->output_variable_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectInterfaceVariable* p_var = (SpvReflectInterfaceVariable*)&p_entry->output_variables[index]; + pp_variables[index] = p_var; + } + } + else { + *p_count = p_entry->output_variable_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectEnumeratePushConstantBlocks( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + if (pp_blocks != NULL) { + if (*p_count != p_module->push_constant_block_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + + for (uint32_t index = 0; index < *p_count; ++index) { + SpvReflectBlockVariable* p_push_constant_blocks = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[index]; + pp_blocks[index] = p_push_constant_blocks; + } + } + else { + *p_count = p_module->push_constant_block_count; + } + + return SPV_REFLECT_RESULT_SUCCESS; +} +SpvReflectResult spvReflectEnumeratePushConstants( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +) +{ + return spvReflectEnumeratePushConstantBlocks(p_module, p_count, pp_blocks); +} + +SpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_count)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + + uint32_t count = 0; + for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) { + bool found = SearchSortedUint32(p_entry->used_push_constants, + p_entry->used_push_constant_count, + p_module->push_constant_blocks[i].spirv_id); + if (found) { + if (IsNotNull(pp_blocks)) { + if (count >= *p_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + pp_blocks[count++] = (SpvReflectBlockVariable*)&p_module->push_constant_blocks[i]; + } else { + ++count; + } + } + } + if (IsNotNull(pp_blocks)) { + if (count != *p_count) { + return SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH; + } + } else { + *p_count = count; + } + return SPV_REFLECT_RESULT_SUCCESS; +} + +const SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding( + const SpvReflectShaderModule* p_module, + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +) +{ + const SpvReflectDescriptorBinding* p_descriptor = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { + const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index]; + if ((p_potential->binding == binding_number) && (p_potential->set == set_number)) { + p_descriptor = p_potential; + break; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_descriptor) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_descriptor; +} + +const SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +) +{ + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectDescriptorBinding* p_descriptor = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { + const SpvReflectDescriptorBinding* p_potential = &p_module->descriptor_bindings[index]; + bool found = SearchSortedUint32( + p_entry->used_uniforms, + p_entry->used_uniform_count, + p_potential->spirv_id); + if ((p_potential->binding == binding_number) && (p_potential->set == set_number) && found) { + p_descriptor = p_potential; + break; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_descriptor) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_descriptor; +} + +const SpvReflectDescriptorSet* spvReflectGetDescriptorSet( + const SpvReflectShaderModule* p_module, + uint32_t set_number, + SpvReflectResult* p_result +) +{ + const SpvReflectDescriptorSet* p_set = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->descriptor_set_count; ++index) { + const SpvReflectDescriptorSet* p_potential = &p_module->descriptor_sets[index]; + if (p_potential->set == set_number) { + p_set = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_set) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_set; +} + +const SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t set_number, + SpvReflectResult* p_result) +{ + const SpvReflectDescriptorSet* p_set = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t index = 0; index < p_entry->descriptor_set_count; ++index) { + const SpvReflectDescriptorSet* p_potential = &p_entry->descriptor_sets[index]; + if (p_potential->set == set_number) { + p_set = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_set) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_set; +} + + +const SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +) +{ + if (location == INVALID_VALUE) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_module->input_variables[index]; + if (p_potential->location == location) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} +const SpvReflectInterfaceVariable* spvReflectGetInputVariable( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +) +{ + return spvReflectGetInputVariableByLocation(p_module, location, p_result); +} + +const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +) +{ + if (location == INVALID_VALUE) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_entry->input_variables[index]; + if (p_potential->location == location) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* semantic, + SpvReflectResult* p_result +) +{ + if (IsNull(semantic)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + return NULL; + } + if (semantic[0] == '\0') { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_module->input_variables[index]; + if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result +) +{ + if (IsNull(semantic)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + return NULL; + } + if (semantic[0] == '\0') { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t index = 0; index < p_entry->input_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_entry->input_variables[index]; + if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +) +{ + if (location == INVALID_VALUE) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_module->output_variables[index]; + if (p_potential->location == location) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} +const SpvReflectInterfaceVariable* spvReflectGetOutputVariable( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +) +{ + return spvReflectGetOutputVariableByLocation(p_module, location, p_result); +} + +const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +) +{ + if (location == INVALID_VALUE) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_entry->output_variables[index]; + if (p_potential->location == location) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* semantic, + SpvReflectResult* p_result +) +{ + if (IsNull(semantic)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + return NULL; + } + if (semantic[0] == '\0') { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_module->output_variables[index]; + if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result) +{ + if (IsNull(semantic)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + return NULL; + } + if (semantic[0] == '\0') { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + const SpvReflectInterfaceVariable* p_var = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t index = 0; index < p_entry->output_variable_count; ++index) { + const SpvReflectInterfaceVariable* p_potential = &p_entry->output_variables[index]; + if (p_potential->semantic != NULL && strcmp(p_potential->semantic, semantic) == 0) { + p_var = p_potential; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_var) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_var; +} + +const SpvReflectBlockVariable* spvReflectGetPushConstantBlock( + const SpvReflectShaderModule* p_module, + uint32_t index, + SpvReflectResult* p_result +) +{ + const SpvReflectBlockVariable* p_push_constant = NULL; + if (IsNotNull(p_module)) { + if (index < p_module->push_constant_block_count) { + p_push_constant = &p_module->push_constant_blocks[index]; + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_push_constant) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_push_constant; +} +const SpvReflectBlockVariable* spvReflectGetPushConstant( + const SpvReflectShaderModule* p_module, + uint32_t index, + SpvReflectResult* p_result +) +{ + return spvReflectGetPushConstantBlock(p_module, index, p_result); +} + +const SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock( + const SpvReflectShaderModule* p_module, + const char* entry_point, + SpvReflectResult* p_result) +{ + const SpvReflectBlockVariable* p_push_constant = NULL; + if (IsNotNull(p_module)) { + const SpvReflectEntryPoint* p_entry = + spvReflectGetEntryPoint(p_module, entry_point); + if (IsNull(p_entry)) { + if (IsNotNull(p_result)) { + *p_result = SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; + } + return NULL; + } + for (uint32_t i = 0; i < p_module->push_constant_block_count; ++i) { + bool found = SearchSortedUint32( + p_entry->used_push_constants, + p_entry->used_push_constant_count, + p_module->push_constant_blocks[i].spirv_id); + if (found) { + p_push_constant = &p_module->push_constant_blocks[i]; + break; + } + } + } + if (IsNotNull(p_result)) { + *p_result = IsNotNull(p_push_constant) + ? SPV_REFLECT_RESULT_SUCCESS + : (IsNull(p_module) ? SPV_REFLECT_RESULT_ERROR_NULL_POINTER + : SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND); + } + return p_push_constant; +} + +SpvReflectResult spvReflectChangeDescriptorBindingNumbers( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorBinding* p_binding, + uint32_t new_binding_number, + uint32_t new_set_binding +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_binding)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + + SpvReflectDescriptorBinding* p_target_descriptor = NULL; + for (uint32_t index = 0; index < p_module->descriptor_binding_count; ++index) { + if(&p_module->descriptor_bindings[index] == p_binding) { + p_target_descriptor = &p_module->descriptor_bindings[index]; + break; + } + } + + if (IsNotNull(p_target_descriptor)) { + if (p_target_descriptor->word_offset.binding > (p_module->_internal->spirv_word_count - 1)) { + return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; + } + // Binding number + if (new_binding_number != SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE) { + uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.binding; + *p_code = new_binding_number; + p_target_descriptor->binding = new_binding_number; + } + // Set number + if (new_set_binding != SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { + uint32_t* p_code = p_module->_internal->spirv_code + p_target_descriptor->word_offset.set; + *p_code = new_set_binding; + p_target_descriptor->set = new_set_binding; + } + } + + SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; + if (new_set_binding != SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { + result = SynchronizeDescriptorSets(p_module); + } + return result; +} +SpvReflectResult spvReflectChangeDescriptorBindingNumber( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorBinding* p_descriptor_binding, + uint32_t new_binding_number, + uint32_t optional_new_set_number +) +{ + return spvReflectChangeDescriptorBindingNumbers( + p_module,p_descriptor_binding, + new_binding_number, + optional_new_set_number); +} + +SpvReflectResult spvReflectChangeDescriptorSetNumber( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorSet* p_set, + uint32_t new_set_number +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_set)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + SpvReflectDescriptorSet* p_target_set = NULL; + for (uint32_t index = 0; index < SPV_REFLECT_MAX_DESCRIPTOR_SETS; ++index) { + // The descriptor sets for specific entry points might not be in this set, + // so just match on set index. + if (p_module->descriptor_sets[index].set == p_set->set) { + p_target_set = (SpvReflectDescriptorSet*)p_set; + break; + } + } + + SpvReflectResult result = SPV_REFLECT_RESULT_SUCCESS; + if (IsNotNull(p_target_set) && new_set_number != SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { + for (uint32_t index = 0; index < p_target_set->binding_count; ++index) { + SpvReflectDescriptorBinding* p_descriptor = p_target_set->bindings[index]; + if (p_descriptor->word_offset.set > (p_module->_internal->spirv_word_count - 1)) { + return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; + } + + uint32_t* p_code = p_module->_internal->spirv_code + p_descriptor->word_offset.set; + *p_code = new_set_number; + p_descriptor->set = new_set_number; + } + + result = SynchronizeDescriptorSets(p_module); + } + + return result; +} + +static SpvReflectResult ChangeVariableLocation( + SpvReflectShaderModule* p_module, + SpvReflectInterfaceVariable* p_variable, + uint32_t new_location +) +{ + if (p_variable->word_offset.location > (p_module->_internal->spirv_word_count - 1)) { + return SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED; + } + uint32_t* p_code = p_module->_internal->spirv_code + p_variable->word_offset.location; + *p_code = new_location; + p_variable->location = new_location; + return SPV_REFLECT_RESULT_SUCCESS; +} + +SpvReflectResult spvReflectChangeInputVariableLocation( + SpvReflectShaderModule* p_module, + const SpvReflectInterfaceVariable* p_input_variable, + uint32_t new_location +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_input_variable)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + for (uint32_t index = 0; index < p_module->input_variable_count; ++index) { + if(&p_module->input_variables[index] == p_input_variable) { + return ChangeVariableLocation(p_module, &p_module->input_variables[index], new_location); + } + } + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; +} + +SpvReflectResult spvReflectChangeOutputVariableLocation( + SpvReflectShaderModule* p_module, + const SpvReflectInterfaceVariable* p_output_variable, + uint32_t new_location +) +{ + if (IsNull(p_module)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + if (IsNull(p_output_variable)) { + return SPV_REFLECT_RESULT_ERROR_NULL_POINTER; + } + for (uint32_t index = 0; index < p_module->output_variable_count; ++index) { + if(&p_module->output_variables[index] == p_output_variable) { + return ChangeVariableLocation(p_module, &p_module->output_variables[index], new_location); + } + } + return SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND; +} + +const char* spvReflectSourceLanguage(SpvSourceLanguage source_lang) +{ + switch (source_lang) { + case SpvSourceLanguageUnknown : return "Unknown"; + case SpvSourceLanguageESSL : return "ESSL"; + case SpvSourceLanguageGLSL : return "GLSL"; + case SpvSourceLanguageOpenCL_C : return "OpenCL_C"; + case SpvSourceLanguageOpenCL_CPP : return "OpenCL_CPP"; + case SpvSourceLanguageHLSL : return "HLSL"; + + case SpvSourceLanguageMax: + break; + } + return ""; +} diff --git a/src/third_party/spirv-reflect/spirv_reflect.h b/src/third_party/spirv-reflect/spirv_reflect.h new file mode 100644 index 0000000..6554aaa --- /dev/null +++ b/src/third_party/spirv-reflect/spirv_reflect.h @@ -0,0 +1,2045 @@ +/* + Copyright 2017-2018 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + +VERSION HISTORY + + 1.0 (2018-03-27) Initial public release + +*/ + +/*! + + @file spirv_reflect.h + +*/ +#ifndef SPIRV_REFLECT_H +#define SPIRV_REFLECT_H + +#include "./include/spirv/unified1/spirv.h" + +#include +#include + +#ifdef _MSC_VER + #define SPV_REFLECT_DEPRECATED(msg_str) __declspec(deprecated("This symbol is deprecated. Details: " msg_str)) +#elif defined(__clang__) + #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str))) +#elif defined(__GNUC__) + #if GCC_VERSION >= 40500 + #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated(msg_str))) + #else + #define SPV_REFLECT_DEPRECATED(msg_str) __attribute__((deprecated)) + #endif +#else + #define SPV_REFLECT_DEPRECATED(msg_str) +#endif + +/*! @enum SpvReflectResult + +*/ +typedef enum SpvReflectResult { + SPV_REFLECT_RESULT_SUCCESS, + SPV_REFLECT_RESULT_NOT_READY, + SPV_REFLECT_RESULT_ERROR_PARSE_FAILED, + SPV_REFLECT_RESULT_ERROR_ALLOC_FAILED, + SPV_REFLECT_RESULT_ERROR_RANGE_EXCEEDED, + SPV_REFLECT_RESULT_ERROR_NULL_POINTER, + SPV_REFLECT_RESULT_ERROR_INTERNAL_ERROR, + SPV_REFLECT_RESULT_ERROR_COUNT_MISMATCH, + SPV_REFLECT_RESULT_ERROR_ELEMENT_NOT_FOUND, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_CODE_SIZE, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_MAGIC_NUMBER, + SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_EOF, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_ID_REFERENCE, + SPV_REFLECT_RESULT_ERROR_SPIRV_SET_NUMBER_OVERFLOW, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_STORAGE_CLASS, + SPV_REFLECT_RESULT_ERROR_SPIRV_RECURSION, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_INSTRUCTION, + SPV_REFLECT_RESULT_ERROR_SPIRV_UNEXPECTED_BLOCK_DATA, + SPV_REFLECT_RESULT_ERROR_SPIRV_INVALID_BLOCK_MEMBER_REFERENCE, +} SpvReflectResult; + +/*! @enum SpvReflectTypeFlagBits + +*/ +typedef enum SpvReflectTypeFlagBits { + SPV_REFLECT_TYPE_FLAG_UNDEFINED = 0x00000000, + SPV_REFLECT_TYPE_FLAG_VOID = 0x00000001, + SPV_REFLECT_TYPE_FLAG_BOOL = 0x00000002, + SPV_REFLECT_TYPE_FLAG_INT = 0x00000004, + SPV_REFLECT_TYPE_FLAG_FLOAT = 0x00000008, + SPV_REFLECT_TYPE_FLAG_VECTOR = 0x00000100, + SPV_REFLECT_TYPE_FLAG_MATRIX = 0x00000200, + SPV_REFLECT_TYPE_FLAG_EXTERNAL_IMAGE = 0x00010000, + SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLER = 0x00020000, + SPV_REFLECT_TYPE_FLAG_EXTERNAL_SAMPLED_IMAGE = 0x00040000, + SPV_REFLECT_TYPE_FLAG_EXTERNAL_BLOCK = 0x00080000, + SPV_REFLECT_TYPE_FLAG_EXTERNAL_MASK = 0x000F0000, + SPV_REFLECT_TYPE_FLAG_STRUCT = 0x10000000, + SPV_REFLECT_TYPE_FLAG_ARRAY = 0x20000000, +} SpvReflectTypeFlagBits; + +typedef uint32_t SpvReflectTypeFlags; + +/*! @enum SpvReflectDecorationBits + +*/ +typedef enum SpvReflectDecorationFlagBits { + SPV_REFLECT_DECORATION_NONE = 0x00000000, + SPV_REFLECT_DECORATION_BLOCK = 0x00000001, + SPV_REFLECT_DECORATION_BUFFER_BLOCK = 0x00000002, + SPV_REFLECT_DECORATION_ROW_MAJOR = 0x00000004, + SPV_REFLECT_DECORATION_COLUMN_MAJOR = 0x00000008, + SPV_REFLECT_DECORATION_BUILT_IN = 0x00000010, + SPV_REFLECT_DECORATION_NOPERSPECTIVE = 0x00000020, + SPV_REFLECT_DECORATION_FLAT = 0x00000040, + SPV_REFLECT_DECORATION_NON_WRITABLE = 0x00000080, +} SpvReflectDecorationFlagBits; + +typedef uint32_t SpvReflectDecorationFlags; + +/*! @enum SpvReflectResourceType + +*/ +typedef enum SpvReflectResourceType { + SPV_REFLECT_RESOURCE_FLAG_UNDEFINED = 0x00000000, + SPV_REFLECT_RESOURCE_FLAG_SAMPLER = 0x00000001, + SPV_REFLECT_RESOURCE_FLAG_CBV = 0x00000002, + SPV_REFLECT_RESOURCE_FLAG_SRV = 0x00000004, + SPV_REFLECT_RESOURCE_FLAG_UAV = 0x00000008, +} SpvReflectResourceType; + +/*! @enum SpvReflectFormat + +*/ +typedef enum SpvReflectFormat { + SPV_REFLECT_FORMAT_UNDEFINED = 0, // = VK_FORMAT_UNDEFINED + SPV_REFLECT_FORMAT_R32_UINT = 98, // = VK_FORMAT_R32_UINT + SPV_REFLECT_FORMAT_R32_SINT = 99, // = VK_FORMAT_R32_SINT + SPV_REFLECT_FORMAT_R32_SFLOAT = 100, // = VK_FORMAT_R32_SFLOAT + SPV_REFLECT_FORMAT_R32G32_UINT = 101, // = VK_FORMAT_R32G32_UINT + SPV_REFLECT_FORMAT_R32G32_SINT = 102, // = VK_FORMAT_R32G32_SINT + SPV_REFLECT_FORMAT_R32G32_SFLOAT = 103, // = VK_FORMAT_R32G32_SFLOAT + SPV_REFLECT_FORMAT_R32G32B32_UINT = 104, // = VK_FORMAT_R32G32B32_UINT + SPV_REFLECT_FORMAT_R32G32B32_SINT = 105, // = VK_FORMAT_R32G32B32_SINT + SPV_REFLECT_FORMAT_R32G32B32_SFLOAT = 106, // = VK_FORMAT_R32G32B32_SFLOAT + SPV_REFLECT_FORMAT_R32G32B32A32_UINT = 107, // = VK_FORMAT_R32G32B32A32_UINT + SPV_REFLECT_FORMAT_R32G32B32A32_SINT = 108, // = VK_FORMAT_R32G32B32A32_SINT + SPV_REFLECT_FORMAT_R32G32B32A32_SFLOAT = 109, // = VK_FORMAT_R32G32B32A32_SFLOAT +} SpvReflectFormat; + +/*! @enum SpvReflectVariableFlagBits + +*/ +enum SpvReflectVariableFlagBits{ + SPV_REFLECT_VARIABLE_FLAGS_NONE = 0x00000000, + SPV_REFLECT_VARIABLE_FLAGS_UNUSED = 0x00000001, +}; + +typedef uint32_t SpvReflectVariableFlags; + +/*! @enum SpvReflectDescriptorType + +*/ +typedef enum SpvReflectDescriptorType { + SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER = 0, // = VK_DESCRIPTOR_TYPE_SAMPLER + SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, // = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER + SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, // = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE + SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, // = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE + SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, // = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER + SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, // = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER + SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER + SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER + SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, // = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC + SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, // = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC + SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, // = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT +} SpvReflectDescriptorType; + +/*! @enum SpvReflectShaderStageFlagBits + +*/ +typedef enum SpvReflectShaderStageFlagBits { + SPV_REFLECT_SHADER_STAGE_VERTEX_BIT = 0x00000001, // = VK_SHADER_STAGE_VERTEX_BIT + SPV_REFLECT_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, // = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT + SPV_REFLECT_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, // = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT + SPV_REFLECT_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, // = VK_SHADER_STAGE_GEOMETRY_BIT + SPV_REFLECT_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, // = VK_SHADER_STAGE_FRAGMENT_BIT + SPV_REFLECT_SHADER_STAGE_COMPUTE_BIT = 0x00000020, // = VK_SHADER_STAGE_COMPUTE_BIT +} SpvReflectShaderStageFlagBits; + +/*! @enum SpvReflectGenerator + +*/ +typedef enum SpvReflectGenerator { + SPV_REFLECT_GENERATOR_KHRONOS_LLVM_SPIRV_TRANSLATOR = 6, + SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_ASSEMBLER = 7, + SPV_REFLECT_GENERATOR_KHRONOS_GLSLANG_REFERENCE_FRONT_END = 8, + SPV_REFLECT_GENERATOR_GOOGLE_SHADERC_OVER_GLSLANG = 13, + SPV_REFLECT_GENERATOR_GOOGLE_SPIREGG = 14, + SPV_REFLECT_GENERATOR_GOOGLE_RSPIRV = 15, + SPV_REFLECT_GENERATOR_X_LEGEND_MESA_MESAIR_SPIRV_TRANSLATOR = 16, + SPV_REFLECT_GENERATOR_KHRONOS_SPIRV_TOOLS_LINKER = 17, + SPV_REFLECT_GENERATOR_WINE_VKD3D_SHADER_COMPILER = 18, + SPV_REFLECT_GENERATOR_CLAY_CLAY_SHADER_COMPILER = 19, +} SpvReflectGenerator; + +enum { + SPV_REFLECT_MAX_ARRAY_DIMS = 32, + SPV_REFLECT_MAX_DESCRIPTOR_SETS = 64, +}; + +enum { + SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE = ~0, + SPV_REFLECT_SET_NUMBER_DONT_CHANGE = ~0 +}; + +typedef struct SpvReflectNumericTraits { + struct Scalar { + uint32_t width; + uint32_t signedness; + } scalar; + + struct Vector { + uint32_t component_count; + } vector; + + struct Matrix { + uint32_t column_count; + uint32_t row_count; + uint32_t stride; // Measured in bytes + } matrix; +} SpvReflectNumericTraits; + +typedef struct SpvReflectImageTraits { + SpvDim dim; + uint32_t depth; + uint32_t arrayed; + uint32_t ms; // 0: single-sampled; 1: multisampled + uint32_t sampled; + SpvImageFormat image_format; +} SpvReflectImageTraits; + +typedef struct SpvReflectArrayTraits { + uint32_t dims_count; + uint32_t dims[SPV_REFLECT_MAX_ARRAY_DIMS]; + uint32_t stride; // Measured in bytes +} SpvReflectArrayTraits; + +typedef struct SpvReflectBindingArrayTraits { + uint32_t dims_count; + uint32_t dims[SPV_REFLECT_MAX_ARRAY_DIMS]; +} SpvReflectBindingArrayTraits; + +/*! @struct SpvReflectTypeDescription + +*/ +typedef struct SpvReflectTypeDescription { + uint32_t id; + SpvOp op; + const char* type_name; + const char* struct_member_name; + SpvStorageClass storage_class; + SpvReflectTypeFlags type_flags; + SpvReflectDecorationFlags decoration_flags; + + struct Traits { + SpvReflectNumericTraits numeric; + SpvReflectImageTraits image; + SpvReflectArrayTraits array; + } traits; + + uint32_t member_count; + struct SpvReflectTypeDescription* members; +} SpvReflectTypeDescription; + + +/*! @struct SpvReflectInterfaceVariable + +*/ +typedef struct SpvReflectInterfaceVariable { + uint32_t spirv_id; + const char* name; + uint32_t location; + SpvStorageClass storage_class; + const char* semantic; + SpvReflectDecorationFlags decoration_flags; + SpvBuiltIn built_in; + SpvReflectNumericTraits numeric; + SpvReflectArrayTraits array; + + uint32_t member_count; + struct SpvReflectInterfaceVariable* members; + + SpvReflectFormat format; + + // NOTE: SPIR-V shares type references for variables + // that have the same underlying type. This means + // that the same type name will appear for multiple + // variables. + SpvReflectTypeDescription* type_description; + + struct { + uint32_t location; + } word_offset; +} SpvReflectInterfaceVariable; + +/*! @struct SpvReflectBlockVariable + +*/ +typedef struct SpvReflectBlockVariable { + uint32_t spirv_id; + const char* name; + uint32_t offset; // Measured in bytes + uint32_t absolute_offset; // Measured in bytes + uint32_t size; // Measured in bytes + uint32_t padded_size; // Measured in bytes + SpvReflectDecorationFlags decoration_flags; + SpvReflectNumericTraits numeric; + SpvReflectArrayTraits array; + SpvReflectVariableFlags flags; + + uint32_t member_count; + struct SpvReflectBlockVariable* members; + + SpvReflectTypeDescription* type_description; +} SpvReflectBlockVariable; + +/*! @struct SpvReflectDescriptorBinding + +*/ +typedef struct SpvReflectDescriptorBinding { + uint32_t spirv_id; + const char* name; + uint32_t binding; + uint32_t input_attachment_index; + uint32_t set; + SpvReflectDescriptorType descriptor_type; + SpvReflectResourceType resource_type; + SpvReflectImageTraits image; + SpvReflectBlockVariable block; + SpvReflectBindingArrayTraits array; + uint32_t count; + uint32_t accessed; + uint32_t uav_counter_id; + struct SpvReflectDescriptorBinding* uav_counter_binding; + + SpvReflectTypeDescription* type_description; + + struct { + uint32_t binding; + uint32_t set; + } word_offset; +} SpvReflectDescriptorBinding; + +/*! @struct SpvReflectDescriptorSet + +*/ +typedef struct SpvReflectDescriptorSet { + uint32_t set; + uint32_t binding_count; + SpvReflectDescriptorBinding** bindings; +} SpvReflectDescriptorSet; + +/*! @struct SpvReflectEntryPoint + + */ +typedef struct SpvReflectEntryPoint { + const char* name; + uint32_t id; + + SpvExecutionModel spirv_execution_model; + SpvReflectShaderStageFlagBits shader_stage; + + uint32_t input_variable_count; + SpvReflectInterfaceVariable* input_variables; + uint32_t output_variable_count; + SpvReflectInterfaceVariable* output_variables; + + uint32_t descriptor_set_count; + SpvReflectDescriptorSet* descriptor_sets; + + uint32_t used_uniform_count; + uint32_t* used_uniforms; + uint32_t used_push_constant_count; + uint32_t* used_push_constants; +} SpvReflectEntryPoint; + +/*! @struct SpvReflectShaderModule + +*/ +typedef struct SpvReflectShaderModule { + SpvReflectGenerator generator; + const char* entry_point_name; + uint32_t entry_point_id; + uint32_t entry_point_count; + SpvReflectEntryPoint* entry_points; + SpvSourceLanguage source_language; + uint32_t source_language_version; + const char* source_file; + const char* source_source; + SpvExecutionModel spirv_execution_model; + SpvReflectShaderStageFlagBits shader_stage; + uint32_t descriptor_binding_count; + SpvReflectDescriptorBinding* descriptor_bindings; + uint32_t descriptor_set_count; + SpvReflectDescriptorSet descriptor_sets[SPV_REFLECT_MAX_DESCRIPTOR_SETS]; + uint32_t input_variable_count; + SpvReflectInterfaceVariable* input_variables; + uint32_t output_variable_count; + SpvReflectInterfaceVariable* output_variables; + uint32_t push_constant_block_count; + SpvReflectBlockVariable* push_constant_blocks; + + struct Internal { + size_t spirv_size; + uint32_t* spirv_code; + uint32_t spirv_word_count; + + size_t type_description_count; + SpvReflectTypeDescription* type_descriptions; + } * _internal; + +} SpvReflectShaderModule; + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! @fn spvReflectCreateShaderModule + + @param size Size in bytes of SPIR-V code. + @param p_code Pointer to SPIR-V code. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @return SPV_REFLECT_RESULT_SUCCESS on success. + +*/ +SpvReflectResult spvReflectCreateShaderModule( + size_t size, + const void* p_code, + SpvReflectShaderModule* p_module +); + +SPV_REFLECT_DEPRECATED("renamed to spvReflectCreateShaderModule") +SpvReflectResult spvReflectGetShaderModule( + size_t size, + const void* p_code, + SpvReflectShaderModule* p_module +); + + +/*! @fn spvReflectDestroyShaderModule + + @param p_module Pointer to an instance of SpvReflectShaderModule. + +*/ +void spvReflectDestroyShaderModule(SpvReflectShaderModule* p_module); + + +/*! @fn spvReflectGetCodeSize + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @return Returns the size of the SPIR-V in bytes + +*/ +uint32_t spvReflectGetCodeSize(const SpvReflectShaderModule* p_module); + + +/*! @fn spvReflectGetCode + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @return Returns a const pointer to the compiled SPIR-V bytecode. + +*/ +const uint32_t* spvReflectGetCode(const SpvReflectShaderModule* p_module); + +/*! @fn spvReflectGetEntryPoint + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point Name of the requested entry point. + @return Returns a const pointer to the requested entry point, + or NULL if it's not found. +*/ +const SpvReflectEntryPoint* spvReflectGetEntryPoint( + const SpvReflectShaderModule* p_module, + const char* entry_point +); + +/*! @fn spvReflectEnumerateDescriptorBindings + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_bindings is NULL, the module's descriptor binding + count (across all descriptor sets) will be stored here. + If pp_bindings is not NULL, *p_count must contain the + module's descriptor binding count. + @param pp_bindings If NULL, the module's total descriptor binding count + will be written to *p_count. + If non-NULL, pp_bindings must point to an array with + *p_count entries, where pointers to the module's + descriptor bindings will be written. The caller must not + free the binding pointers written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateDescriptorBindings( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +); + +/*! @fn spvReflectEnumerateEntryPointDescriptorBindings + @brief Creates a listing of all descriptor bindings that are used in the + static call tree of the given entry point. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The name of the entry point to get the descriptor bindings for. + @param p_count If pp_bindings is NULL, the entry point's descriptor binding + count (across all descriptor sets) will be stored here. + If pp_bindings is not NULL, *p_count must contain the + entry points's descriptor binding count. + @param pp_bindings If NULL, the entry point's total descriptor binding count + will be written to *p_count. + If non-NULL, pp_bindings must point to an array with + *p_count entries, where pointers to the entry point's + descriptor bindings will be written. The caller must not + free the binding pointers written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateEntryPointDescriptorBindings( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +); + +/*! @fn spvReflectEnumerateDescriptorSets + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_sets is NULL, the module's descriptor set + count will be stored here. + If pp_sets is not NULL, *p_count must contain the + module's descriptor set count. + @param pp_sets If NULL, the module's total descriptor set count + will be written to *p_count. + If non-NULL, pp_sets must point to an array with + *p_count entries, where pointers to the module's + descriptor sets will be written. The caller must not + free the descriptor set pointers written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateDescriptorSets( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +); + +/*! @fn spvReflectEnumerateEntryPointDescriptorSets + @brief Creates a listing of all descriptor sets and their bindings that are + used in the static call tree of a given entry point. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The name of the entry point to get the descriptor bindings for. + @param p_count If pp_sets is NULL, the module's descriptor set + count will be stored here. + If pp_sets is not NULL, *p_count must contain the + module's descriptor set count. + @param pp_sets If NULL, the module's total descriptor set count + will be written to *p_count. + If non-NULL, pp_sets must point to an array with + *p_count entries, where pointers to the module's + descriptor sets will be written. The caller must not + free the descriptor set pointers written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateEntryPointDescriptorSets( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +); + + +/*! @fn spvReflectEnumerateInputVariables + @brief If the module contains multiple entry points, this will only get + the input variables for the first one. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_variables is NULL, the module's input variable + count will be stored here. + If pp_variables is not NULL, *p_count must contain + the module's input variable count. + @param pp_variables If NULL, the module's input variable count will be + written to *p_count. + If non-NULL, pp_variables must point to an array with + *p_count entries, where pointers to the module's + input variables will be written. The caller must not + free the interface variables written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateInputVariables( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +); + +/*! @fn spvReflectEnumerateEntryPointInputVariables + @brief Enumerate the input variables for a given entry point. + @param entry_point The name of the entry point to get the input variables for. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_variables is NULL, the entry point's input variable + count will be stored here. + If pp_variables is not NULL, *p_count must contain + the entry point's input variable count. + @param pp_variables If NULL, the entry point's input variable count will be + written to *p_count. + If non-NULL, pp_variables must point to an array with + *p_count entries, where pointers to the entry point's + input variables will be written. The caller must not + free the interface variables written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateEntryPointInputVariables( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +); + + +/*! @fn spvReflectEnumerateOutputVariables + @brief Note: If the module contains multiple entry points, this will only get + the output variables for the first one. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_variables is NULL, the module's output variable + count will be stored here. + If pp_variables is not NULL, *p_count must contain + the module's output variable count. + @param pp_variables If NULL, the module's output variable count will be + written to *p_count. + If non-NULL, pp_variables must point to an array with + *p_count entries, where pointers to the module's + output variables will be written. The caller must not + free the interface variables written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateOutputVariables( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +); + +/*! @fn spvReflectEnumerateEntryPointOutputVariables + @brief Enumerate the output variables for a given entry point. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The name of the entry point to get the output variables for. + @param p_count If pp_variables is NULL, the entry point's output variable + count will be stored here. + If pp_variables is not NULL, *p_count must contain + the entry point's output variable count. + @param pp_variables If NULL, the entry point's output variable count will be + written to *p_count. + If non-NULL, pp_variables must point to an array with + *p_count entries, where pointers to the entry point's + output variables will be written. The caller must not + free the interface variables written to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateEntryPointOutputVariables( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +); + + +/*! @fn spvReflectEnumeratePushConstantBlocks + @brief Note: If the module contains multiple entry points, this will only get + the push constant blocks for the first one. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_blocks is NULL, the module's push constant + block count will be stored here. + If pp_blocks is not NULL, *p_count must + contain the module's push constant block count. + @param pp_blocks If NULL, the module's push constant block count + will be written to *p_count. + If non-NULL, pp_blocks must point to an + array with *p_count entries, where pointers to + the module's push constant blocks will be written. + The caller must not free the block variables written + to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumeratePushConstantBlocks( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +); +SPV_REFLECT_DEPRECATED("renamed to spvReflectEnumeratePushConstantBlocks") +SpvReflectResult spvReflectEnumeratePushConstants( + const SpvReflectShaderModule* p_module, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +); + +/*! @fn spvReflectEnumerateEntryPointPushConstantBlocks + @brief Enumerate the push constant blocks used in the static call tree of a + given entry point. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_count If pp_blocks is NULL, the entry point's push constant + block count will be stored here. + If pp_blocks is not NULL, *p_count must + contain the entry point's push constant block count. + @param pp_blocks If NULL, the entry point's push constant block count + will be written to *p_count. + If non-NULL, pp_blocks must point to an + array with *p_count entries, where pointers to + the entry point's push constant blocks will be written. + The caller must not free the block variables written + to this array. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of the + failure. + +*/ +SpvReflectResult spvReflectEnumerateEntryPointPushConstantBlocks( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +); + + +/*! @fn spvReflectGetDescriptorBinding + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param binding_number The "binding" value of the requested descriptor + binding. + @param set_number The "set" value of the requested descriptor binding. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains a descriptor binding that + matches the provided [binding_number, set_number] + values, a pointer to that binding is returned. The + caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note If the module contains multiple desriptor bindings + with the same set and binding numbers, there are + no guarantees about which binding will be returned. + +*/ +const SpvReflectDescriptorBinding* spvReflectGetDescriptorBinding( + const SpvReflectShaderModule* p_module, + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +); + +/*! @fn spvReflectGetEntryPointDescriptorBinding + @brief Get the descriptor binding with the given binding number and set + number that is used in the static call tree of a certain entry + point. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the binding from. + @param binding_number The "binding" value of the requested descriptor + binding. + @param set_number The "set" value of the requested descriptor binding. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains a descriptor binding that + matches the provided [binding_number, set_number] + values, a pointer to that binding is returned. The + caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note If the entry point contains multiple desriptor bindings + with the same set and binding numbers, there are + no guarantees about which binding will be returned. + +*/ +const SpvReflectDescriptorBinding* spvReflectGetEntryPointDescriptorBinding( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +); + + +/*! @fn spvReflectGetDescriptorSet + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param set_number The "set" value of the requested descriptor set. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains a descriptor set with the + provided set_number, a pointer to that set is + returned. The caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. + +*/ +const SpvReflectDescriptorSet* spvReflectGetDescriptorSet( + const SpvReflectShaderModule* p_module, + uint32_t set_number, + SpvReflectResult* p_result +); + +/*! @fn spvReflectGetEntryPointDescriptorSet + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the descriptor set from. + @param set_number The "set" value of the requested descriptor set. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains a descriptor set with the + provided set_number, a pointer to that set is + returned. The caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. + +*/ +const SpvReflectDescriptorSet* spvReflectGetEntryPointDescriptorSet( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t set_number, + SpvReflectResult* p_result +); + + +/* @fn spvReflectGetInputVariableByLocation + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param location The "location" value of the requested input variable. + A location of 0xFFFFFFFF will always return NULL + with *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains an input interface variable + with the provided location value, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetInputVariableByLocation( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +); +SPV_REFLECT_DEPRECATED("renamed to spvReflectGetInputVariableByLocation") +const SpvReflectInterfaceVariable* spvReflectGetInputVariable( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetEntryPointInputVariableByLocation + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the input variable from. + @param location The "location" value of the requested input variable. + A location of 0xFFFFFFFF will always return NULL + with *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains an input interface variable + with the provided location value, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableByLocation( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetInputVariableBySemantic + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param semantic The "semantic" value of the requested input variable. + A semantic of NULL will return NULL. + A semantic of "" will always return NULL with + *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains an input interface variable + with the provided semantic, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetInputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* semantic, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetEntryPointInputVariableBySemantic + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the input variable from. + @param semantic The "semantic" value of the requested input variable. + A semantic of NULL will return NULL. + A semantic of "" will always return NULL with + *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains an input interface variable + with the provided semantic, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetEntryPointInputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetOutputVariableByLocation + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param location The "location" value of the requested output variable. + A location of 0xFFFFFFFF will always return NULL + with *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains an output interface variable + with the provided location value, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetOutputVariableByLocation( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +); +SPV_REFLECT_DEPRECATED("renamed to spvReflectGetOutputVariableByLocation") +const SpvReflectInterfaceVariable* spvReflectGetOutputVariable( + const SpvReflectShaderModule* p_module, + uint32_t location, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetEntryPointOutputVariableByLocation + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the output variable from. + @param location The "location" value of the requested output variable. + A location of 0xFFFFFFFF will always return NULL + with *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains an output interface variable + with the provided location value, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableByLocation( + const SpvReflectShaderModule* p_module, + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetOutputVariableBySemantic + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param semantic The "semantic" value of the requested output variable. + A semantic of NULL will return NULL. + A semantic of "" will always return NULL with + *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the module contains an output interface variable + with the provided semantic, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetOutputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* semantic, + SpvReflectResult* p_result +); + +/* @fn spvReflectGetEntryPointOutputVariableBySemantic + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the output variable from. + @param semantic The "semantic" value of the requested output variable. + A semantic of NULL will return NULL. + A semantic of "" will always return NULL with + *p_result == ELEMENT_NOT_FOUND. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the entry point contains an output interface variable + with the provided semantic, a pointer to that + variable is returned. The caller must not free this + pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. +@note + +*/ +const SpvReflectInterfaceVariable* spvReflectGetEntryPointOutputVariableBySemantic( + const SpvReflectShaderModule* p_module, + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result +); + +/*! @fn spvReflectGetPushConstantBlock + + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param index The index of the desired block within the module's + array of push constant blocks. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the provided index is within range, a pointer to + the corresponding push constant block is returned. + The caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. + +*/ +const SpvReflectBlockVariable* spvReflectGetPushConstantBlock( + const SpvReflectShaderModule* p_module, + uint32_t index, + SpvReflectResult* p_result +); +SPV_REFLECT_DEPRECATED("renamed to spvReflectGetPushConstantBlock") +const SpvReflectBlockVariable* spvReflectGetPushConstant( + const SpvReflectShaderModule* p_module, + uint32_t index, + SpvReflectResult* p_result +); + +/*! @fn spvReflectGetEntryPointPushConstantBlock + @brief Get the push constant block corresponding to the given entry point. + As by the Vulkan specification there can be no more than one push + constant block used by a given entry point, so if there is one it will + be returned, otherwise NULL will be returned. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param entry_point The entry point to get the push constant block from. + @param p_result If successful, SPV_REFLECT_RESULT_SUCCESS will be + written to *p_result. Otherwise, a error code + indicating the cause of the failure will be stored + here. + @return If the provided index is within range, a pointer to + the corresponding push constant block is returned. + The caller must not free this pointer. + If no match can be found, or if an unrelated error + occurs, the return value will be NULL. Detailed + error results are written to *pResult. + +*/ +const SpvReflectBlockVariable* spvReflectGetEntryPointPushConstantBlock( + const SpvReflectShaderModule* p_module, + const char* entry_point, + SpvReflectResult* p_result +); + + +/*! @fn spvReflectChangeDescriptorBindingNumbers + @brief Assign new set and/or binding numbers to a descriptor binding. + In addition to updating the reflection data, this function modifies + the underlying SPIR-V bytecode. The updated code can be retrieved + with spvReflectGetCode(). If the binding is used in multiple + entry points within the module, it will be changed in all of them. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_binding Pointer to the descriptor binding to modify. + @param new_binding_number The new binding number to assign to the + provided descriptor binding. + To leave the binding number unchanged, pass + SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE. + @param new_set_number The new set number to assign to the + provided descriptor binding. Successfully changing + a descriptor binding's set number invalidates all + existing SpvReflectDescriptorBinding and + SpvReflectDescriptorSet pointers from this module. + To leave the set number unchanged, pass + SPV_REFLECT_SET_NUMBER_DONT_CHANGE. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of + the failure. +*/ +SpvReflectResult spvReflectChangeDescriptorBindingNumbers( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorBinding* p_binding, + uint32_t new_binding_number, + uint32_t new_set_number +); +SPV_REFLECT_DEPRECATED("Renamed to spvReflectChangeDescriptorBindingNumbers") +SpvReflectResult spvReflectChangeDescriptorBindingNumber( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorBinding* p_descriptor_binding, + uint32_t new_binding_number, + uint32_t optional_new_set_number +); + +/*! @fn spvReflectChangeDescriptorSetNumber + @brief Assign a new set number to an entire descriptor set (including + all descriptor bindings in that set). + In addition to updating the reflection data, this function modifies + the underlying SPIR-V bytecode. The updated code can be retrieved + with spvReflectGetCode(). If the descriptor set is used in + multiple entry points within the module, it will be modified in all + of them. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_set Pointer to the descriptor binding to modify. + @param new_set_number The new set number to assign to the + provided descriptor set, and all its descriptor + bindings. Successfully changing a descriptor + binding's set number invalidates all existing + SpvReflectDescriptorBinding and + SpvReflectDescriptorSet pointers from this module. + To leave the set number unchanged, pass + SPV_REFLECT_SET_NUMBER_DONT_CHANGE. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of + the failure. +*/ +SpvReflectResult spvReflectChangeDescriptorSetNumber( + SpvReflectShaderModule* p_module, + const SpvReflectDescriptorSet* p_set, + uint32_t new_set_number +); + +/*! @fn spvReflectChangeInputVariableLocation + @brief Assign a new location to an input interface variable. + In addition to updating the reflection data, this function modifies + the underlying SPIR-V bytecode. The updated code can be retrieved + with spvReflectGetCode(). + It is the caller's responsibility to avoid assigning the same + location to multiple input variables. If the input variable is used + by multiple entry points in the module, it will be changed in all of + them. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_input_variable Pointer to the input variable to update. + @param new_location The new location to assign to p_input_variable. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of + the failure. + +*/ +SpvReflectResult spvReflectChangeInputVariableLocation( + SpvReflectShaderModule* p_module, + const SpvReflectInterfaceVariable* p_input_variable, + uint32_t new_location +); + + +/*! @fn spvReflectChangeOutputVariableLocation + @brief Assign a new location to an output interface variable. + In addition to updating the reflection data, this function modifies + the underlying SPIR-V bytecode. The updated code can be retrieved + with spvReflectGetCode(). + It is the caller's responsibility to avoid assigning the same + location to multiple output variables. If the output variable is used + by multiple entry points in the module, it will be changed in all of + them. + @param p_module Pointer to an instance of SpvReflectShaderModule. + @param p_output_variable Pointer to the output variable to update. + @param new_location The new location to assign to p_output_variable. + @return If successful, returns SPV_REFLECT_RESULT_SUCCESS. + Otherwise, the error code indicates the cause of + the failure. + +*/ +SpvReflectResult spvReflectChangeOutputVariableLocation( + SpvReflectShaderModule* p_module, + const SpvReflectInterfaceVariable* p_output_variable, + uint32_t new_location +); + + +/*! @fn spvReflectSourceLanguage + + @param source_lang The source language code. + @return Returns string of source language specified in \a source_lang. + The caller must not free the memory associated with this string. +*/ +const char* spvReflectSourceLanguage(SpvSourceLanguage source_lang); + +#if defined(__cplusplus) +}; +#endif + +#if defined(__cplusplus) +#include +#include +#include + +namespace spv_reflect { + +/*! \class ShaderModule + +*/ +class ShaderModule { +public: + ShaderModule(); + ShaderModule(size_t size, const void* p_code); + ShaderModule(const std::vector& code); + ShaderModule(const std::vector& code); + ~ShaderModule(); + + SpvReflectResult GetResult() const; + + const SpvReflectShaderModule& GetShaderModule() const; + + uint32_t GetCodeSize() const; + const uint32_t* GetCode() const; + + const char* GetEntryPointName() const; + + const char* GetSourceFile() const; + + uint32_t GetEntryPointCount() const; + const char* GetEntryPointName(uint32_t index) const; + + SpvReflectShaderStageFlagBits GetShaderStage() const; + SPV_REFLECT_DEPRECATED("Renamed to GetShaderStage") + SpvReflectShaderStageFlagBits GetVulkanShaderStage() const { + return GetShaderStage(); + } + + SpvReflectResult EnumerateDescriptorBindings(uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const; + SpvReflectResult EnumerateEntryPointDescriptorBindings(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorBinding** pp_bindings) const; + SpvReflectResult EnumerateDescriptorSets( uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ; + SpvReflectResult EnumerateEntryPointDescriptorSets(const char* entry_point, uint32_t* p_count, SpvReflectDescriptorSet** pp_sets) const ; + SpvReflectResult EnumerateInputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; + SpvReflectResult EnumerateEntryPointInputVariables(const char* entry_point, uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; + SpvReflectResult EnumerateOutputVariables(uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; + SpvReflectResult EnumerateEntryPointOutputVariables(const char* entry_point, uint32_t* p_count,SpvReflectInterfaceVariable** pp_variables) const; + SpvReflectResult EnumeratePushConstantBlocks(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const; + SpvReflectResult EnumerateEntryPointPushConstantBlocks(const char* entry_point, uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const; + SPV_REFLECT_DEPRECATED("Renamed to EnumeratePushConstantBlocks") + SpvReflectResult EnumeratePushConstants(uint32_t* p_count, SpvReflectBlockVariable** pp_blocks) const { + return EnumeratePushConstantBlocks(p_count, pp_blocks); + } + + const SpvReflectDescriptorBinding* GetDescriptorBinding(uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; + const SpvReflectDescriptorBinding* GetEntryPointDescriptorBinding(const char* entry_point, uint32_t binding_number, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; + const SpvReflectDescriptorSet* GetDescriptorSet(uint32_t set_number, SpvReflectResult* p_result = nullptr) const; + const SpvReflectDescriptorSet* GetEntryPointDescriptorSet(const char* entry_point, uint32_t set_number, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetInputVariableByLocation(uint32_t location, SpvReflectResult* p_result = nullptr) const; + SPV_REFLECT_DEPRECATED("Renamed to GetInputVariableByLocation") + const SpvReflectInterfaceVariable* GetInputVariable(uint32_t location, SpvReflectResult* p_result = nullptr) const { + return GetInputVariableByLocation(location, p_result); + } + const SpvReflectInterfaceVariable* GetEntryPointInputVariableByLocation(const char* entry_point, uint32_t location, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetInputVariableBySemantic(const char* semantic, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetEntryPointInputVariableBySemantic(const char* entry_point, const char* semantic, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetOutputVariableByLocation(uint32_t location, SpvReflectResult* p_result = nullptr) const; + SPV_REFLECT_DEPRECATED("Renamed to GetOutputVariableByLocation") + const SpvReflectInterfaceVariable* GetOutputVariable(uint32_t location, SpvReflectResult* p_result = nullptr) const { + return GetOutputVariableByLocation(location, p_result); + } + const SpvReflectInterfaceVariable* GetEntryPointOutputVariableByLocation(const char* entry_point, uint32_t location, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetOutputVariableBySemantic(const char* semantic, SpvReflectResult* p_result = nullptr) const; + const SpvReflectInterfaceVariable* GetEntryPointOutputVariableBySemantic(const char* entry_point, const char* semantic, SpvReflectResult* p_result = nullptr) const; + const SpvReflectBlockVariable* GetPushConstantBlock(uint32_t index, SpvReflectResult* p_result = nullptr) const; + SPV_REFLECT_DEPRECATED("Renamed to GetPushConstantBlock") + const SpvReflectBlockVariable* GetPushConstant(uint32_t index, SpvReflectResult* p_result = nullptr) const { + return GetPushConstantBlock(index, p_result); + } + const SpvReflectBlockVariable* GetEntryPointPushConstantBlock(const char* entry_point, SpvReflectResult* p_result = nullptr) const; + + SpvReflectResult ChangeDescriptorBindingNumbers(const SpvReflectDescriptorBinding* p_binding, + uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE, + uint32_t optional_new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE); + SPV_REFLECT_DEPRECATED("Renamed to ChangeDescriptorBindingNumbers") + SpvReflectResult ChangeDescriptorBindingNumber(const SpvReflectDescriptorBinding* p_binding, uint32_t new_binding_number = SPV_REFLECT_BINDING_NUMBER_DONT_CHANGE, + uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE) { + return ChangeDescriptorBindingNumbers(p_binding, new_binding_number, new_set_number); + } + SpvReflectResult ChangeDescriptorSetNumber(const SpvReflectDescriptorSet* p_set, uint32_t new_set_number = SPV_REFLECT_SET_NUMBER_DONT_CHANGE); + SpvReflectResult ChangeInputVariableLocation(const SpvReflectInterfaceVariable* p_input_variable, uint32_t new_location); + SpvReflectResult ChangeOutputVariableLocation(const SpvReflectInterfaceVariable* p_output_variable, uint32_t new_location); + +private: + mutable SpvReflectResult m_result = SPV_REFLECT_RESULT_NOT_READY; + SpvReflectShaderModule m_module = {}; +}; + + +// ================================================================================================= +// ShaderModule +// ================================================================================================= + +/*! @fn ShaderModule + +*/ +inline ShaderModule::ShaderModule() {} + + +/*! @fn ShaderModule + + @param size + @param p_code + +*/ +inline ShaderModule::ShaderModule(size_t size, const void* p_code) { + m_result = spvReflectCreateShaderModule( + size, + p_code, + &m_module); +} + +/*! @fn ShaderModule + + @param code + +*/ +inline ShaderModule::ShaderModule(const std::vector& code) { + m_result = spvReflectCreateShaderModule( + code.size(), + code.data(), + &m_module); +} + +/*! @fn ShaderModule + + @param code + +*/ +inline ShaderModule::ShaderModule(const std::vector& code) { + m_result = spvReflectCreateShaderModule( + code.size() * sizeof(uint32_t), + code.data(), + &m_module); +} + +/*! @fn ~ShaderModule + +*/ +inline ShaderModule::~ShaderModule() { + spvReflectDestroyShaderModule(&m_module); +} + + +/*! @fn GetResult + + @return + +*/ +inline SpvReflectResult ShaderModule::GetResult() const { + return m_result; +} + + +/*! @fn GetShaderModule + + @return + +*/ +inline const SpvReflectShaderModule& ShaderModule::GetShaderModule() const { + return m_module; +} + + +/*! @fn GetCodeSize + + @return + + */ +inline uint32_t ShaderModule::GetCodeSize() const { + return spvReflectGetCodeSize(&m_module); +} + + +/*! @fn GetCode + + @return + +*/ +inline const uint32_t* ShaderModule::GetCode() const { + return spvReflectGetCode(&m_module); +} + + +/*! @fn GetEntryPoint + + @return Returns entry point + +*/ +inline const char* ShaderModule::GetEntryPointName() const { + return this->GetEntryPointName(0); +} + +/*! @fn GetEntryPoint + + @return Returns entry point + +*/ +inline const char* ShaderModule::GetSourceFile() const { + return m_module.source_file; +} + +/*! @fn GetEntryPointCount + + @param + @return +*/ +inline uint32_t ShaderModule::GetEntryPointCount() const { + return m_module.entry_point_count; +} + +/*! @fn GetEntryPointName + + @param index + @return +*/ +inline const char* ShaderModule::GetEntryPointName(uint32_t index) const { + return m_module.entry_points[index].name; +} + +/*! @fn GetShaderStage + + @return Returns Vulkan shader stage + +*/ +inline SpvReflectShaderStageFlagBits ShaderModule::GetShaderStage() const { + return m_module.shader_stage; +} + +/*! @fn EnumerateDescriptorBindings + + @param count + @param p_binding_numbers + @param pp_bindings + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateDescriptorBindings( + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +) const +{ + m_result = spvReflectEnumerateDescriptorBindings( + &m_module, + p_count, + pp_bindings); + return m_result; +} + +/*! @fn EnumerateEntryPointDescriptorBindings + + @param entry_point + @param count + @param pp_bindings + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorBindings( + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorBinding** pp_bindings +) const +{ + m_result = spvReflectEnumerateEntryPointDescriptorBindings( + &m_module, + entry_point, + p_count, + pp_bindings); + return m_result; +} + + +/*! @fn EnumerateDescriptorSets + + @param count + @param pp_sets + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateDescriptorSets( + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +) const +{ + m_result = spvReflectEnumerateDescriptorSets( + &m_module, + p_count, + pp_sets); + return m_result; +} + +/*! @fn EnumerateEntryPointDescriptorSets + + @param entry_point + @param count + @param pp_sets + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateEntryPointDescriptorSets( + const char* entry_point, + uint32_t* p_count, + SpvReflectDescriptorSet** pp_sets +) const +{ + m_result = spvReflectEnumerateEntryPointDescriptorSets( + &m_module, + entry_point, + p_count, + pp_sets); + return m_result; +} + + +/*! @fn EnumerateInputVariables + + @param count + @param pp_variables + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateInputVariables( + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) const +{ + m_result = spvReflectEnumerateInputVariables( + &m_module, + p_count, + pp_variables); + return m_result; +} + +/*! @fn EnumerateEntryPointInputVariables + + @param entry_point + @param count + @param pp_variables + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateEntryPointInputVariables( + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) const +{ + m_result = spvReflectEnumerateEntryPointInputVariables( + &m_module, + entry_point, + p_count, + pp_variables); + return m_result; +} + + +/*! @fn EnumerateOutputVariables + + @param count + @param pp_variables + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateOutputVariables( + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) const +{ + m_result = spvReflectEnumerateOutputVariables( + &m_module, + p_count, + pp_variables); + return m_result; +} + +/*! @fn EnumerateEntryPointOutputVariables + + @param entry_point + @param count + @param pp_variables + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateEntryPointOutputVariables( + const char* entry_point, + uint32_t* p_count, + SpvReflectInterfaceVariable** pp_variables +) const +{ + m_result = spvReflectEnumerateEntryPointOutputVariables( + &m_module, + entry_point, + p_count, + pp_variables); + return m_result; +} + + +/*! @fn EnumeratePushConstantBlocks + + @param count + @param pp_blocks + @return + +*/ +inline SpvReflectResult ShaderModule::EnumeratePushConstantBlocks( + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +) const +{ + m_result = spvReflectEnumeratePushConstantBlocks( + &m_module, + p_count, + pp_blocks); + return m_result; +} + +/*! @fn EnumerateEntryPointPushConstantBlocks + + @param entry_point + @param count + @param pp_blocks + @return + +*/ +inline SpvReflectResult ShaderModule::EnumerateEntryPointPushConstantBlocks( + const char* entry_point, + uint32_t* p_count, + SpvReflectBlockVariable** pp_blocks +) const +{ + m_result = spvReflectEnumerateEntryPointPushConstantBlocks( + &m_module, + entry_point, + p_count, + pp_blocks); + return m_result; +} + + +/*! @fn GetDescriptorBinding + + @param binding_number + @param set_number + @param p_result + @return + +*/ +inline const SpvReflectDescriptorBinding* ShaderModule::GetDescriptorBinding( + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +) const +{ + return spvReflectGetDescriptorBinding( + &m_module, + binding_number, + set_number, + p_result); +} + +/*! @fn GetEntryPointDescriptorBinding + + @param entry_point + @param binding_number + @param set_number + @param p_result + @return + +*/ +inline const SpvReflectDescriptorBinding* ShaderModule::GetEntryPointDescriptorBinding( + const char* entry_point, + uint32_t binding_number, + uint32_t set_number, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointDescriptorBinding( + &m_module, + entry_point, + binding_number, + set_number, + p_result); +} + + +/*! @fn GetDescriptorSet + + @param set_number + @param p_result + @return + +*/ +inline const SpvReflectDescriptorSet* ShaderModule::GetDescriptorSet( + uint32_t set_number, + SpvReflectResult* p_result +) const +{ + return spvReflectGetDescriptorSet( + &m_module, + set_number, + p_result); +} + +/*! @fn GetEntryPointDescriptorSet + + @param entry_point + @param set_number + @param p_result + @return + +*/ +inline const SpvReflectDescriptorSet* ShaderModule::GetEntryPointDescriptorSet( + const char* entry_point, + uint32_t set_number, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointDescriptorSet( + &m_module, + entry_point, + set_number, + p_result); +} + + +/*! @fn GetInputVariable + + @param location + @param p_result + @return + +*/ +inline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableByLocation( + uint32_t location, + SpvReflectResult* p_result +) const +{ + return spvReflectGetInputVariableByLocation( + &m_module, + location, + p_result); +} +inline const SpvReflectInterfaceVariable* ShaderModule::GetInputVariableBySemantic( + const char* semantic, + SpvReflectResult* p_result +) const +{ + return spvReflectGetInputVariableBySemantic( + &m_module, + semantic, + p_result); +} + +/*! @fn GetEntryPointInputVariable + + @param entry_point + @param location + @param p_result + @return + +*/ +inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableByLocation( + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointInputVariableByLocation( + &m_module, + entry_point, + location, + p_result); +} +inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointInputVariableBySemantic( + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointInputVariableBySemantic( + &m_module, + entry_point, + semantic, + p_result); +} + + +/*! @fn GetOutputVariable + + @param location + @param p_result + @return + +*/ +inline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableByLocation( + uint32_t location, + SpvReflectResult* p_result +) const +{ + return spvReflectGetOutputVariableByLocation( + &m_module, + location, + p_result); +} +inline const SpvReflectInterfaceVariable* ShaderModule::GetOutputVariableBySemantic( + const char* semantic, + SpvReflectResult* p_result +) const +{ + return spvReflectGetOutputVariableBySemantic(&m_module, + semantic, + p_result); +} + +/*! @fn GetEntryPointOutputVariable + + @param entry_point + @param location + @param p_result + @return + +*/ +inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableByLocation( + const char* entry_point, + uint32_t location, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointOutputVariableByLocation( + &m_module, + entry_point, + location, + p_result); +} +inline const SpvReflectInterfaceVariable* ShaderModule::GetEntryPointOutputVariableBySemantic( + const char* entry_point, + const char* semantic, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointOutputVariableBySemantic( + &m_module, + entry_point, + semantic, + p_result); +} + + +/*! @fn GetPushConstant + + @param index + @param p_result + @return + +*/ +inline const SpvReflectBlockVariable* ShaderModule::GetPushConstantBlock( + uint32_t index, + SpvReflectResult* p_result +) const +{ + return spvReflectGetPushConstantBlock( + &m_module, + index, + p_result); +} + +/*! @fn GetEntryPointPushConstant + + @param entry_point + @param index + @param p_result + @return + +*/ +inline const SpvReflectBlockVariable* ShaderModule::GetEntryPointPushConstantBlock( + const char* entry_point, + SpvReflectResult* p_result +) const +{ + return spvReflectGetEntryPointPushConstantBlock( + &m_module, + entry_point, + p_result); +} + + +/*! @fn ChangeDescriptorBindingNumbers + + @param p_binding + @param new_binding_number + @param new_set_number + @return + +*/ +inline SpvReflectResult ShaderModule::ChangeDescriptorBindingNumbers( + const SpvReflectDescriptorBinding* p_binding, + uint32_t new_binding_number, + uint32_t new_set_number +) +{ + return spvReflectChangeDescriptorBindingNumbers( + &m_module, + p_binding, + new_binding_number, + new_set_number); +} + + +/*! @fn ChangeDescriptorSetNumber + + @param p_set + @param new_set_number + @return + +*/ +inline SpvReflectResult ShaderModule::ChangeDescriptorSetNumber( + const SpvReflectDescriptorSet* p_set, + uint32_t new_set_number +) +{ + return spvReflectChangeDescriptorSetNumber( + &m_module, + p_set, + new_set_number); +} + + +/*! @fn ChangeInputVariableLocation + + @param p_input_variable + @param new_location + @return + +*/ +inline SpvReflectResult ShaderModule::ChangeInputVariableLocation( + const SpvReflectInterfaceVariable* p_input_variable, + uint32_t new_location) +{ + return spvReflectChangeInputVariableLocation( + &m_module, + p_input_variable, + new_location); +} + + +/*! @fn ChangeOutputVariableLocation + + @param p_input_variable + @param new_location + @return + +*/ +inline SpvReflectResult ShaderModule::ChangeOutputVariableLocation( + const SpvReflectInterfaceVariable* p_output_variable, + uint32_t new_location) +{ + return spvReflectChangeOutputVariableLocation( + &m_module, + p_output_variable, + new_location); +} + +} // namespace spv_reflect +#endif // defined(__cplusplus) +#endif // SPIRV_REFLECT_H diff --git a/src/third_party/vma/vk_mem_alloc.cpp b/src/third_party/vma/vk_mem_alloc.cpp new file mode 100644 index 0000000..9534c5a --- /dev/null +++ b/src/third_party/vma/vk_mem_alloc.cpp @@ -0,0 +1,3 @@ +#define VMA_IMPLEMENTATION +#pragma clang diagnostic ignored "-Wnullability-completeness" +#include "vk_mem_alloc.h" diff --git a/src/third_party/vma/vk_mem_alloc.h b/src/third_party/vma/vk_mem_alloc.h new file mode 100644 index 0000000..7aca6d3 --- /dev/null +++ b/src/third_party/vma/vk_mem_alloc.h @@ -0,0 +1,19229 @@ +// +// Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.0.0-development (2020-11-03) + +Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT + +Documentation of all members: vk_mem_alloc.h + +\section main_table_of_contents Table of contents + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - [Buddy allocation algorithm](@ref buddy_algorithm) + - \subpage defragmentation + - [Defragmenting CPU memory](@ref defragmentation_cpu) + - [Defragmenting GPU memory](@ref defragmentation_gpu) + - [Additional notes](@ref defragmentation_additional_notes) + - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) + - \subpage lost_allocations + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - \subpage record_and_replay +- \subpage usage_patterns + - [Common mistakes](@ref usage_patterns_common_mistakes) + - [Simple patterns](@ref usage_patterns_simple) + - [Advanced patterns](@ref usage_patterns_advanced) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\section main_see_also See also + +- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + + + +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +You don't need to build it as a separate library project. +You can add this file directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, you will get linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exacly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose. + +Note on language: This library is written in C++, but has C-compatible interface. +Thus you can include and use vk_mem_alloc.h in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. + +Please note that this library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. + +You may need to configure the way you import Vulkan functions. + +- By default, VMA assumes you you link statically with Vulkan API. If this is not the case, + `#define VMA_STATIC_VULKAN_FUNCTIONS 0` before `#include` of the VMA implementation and use another way. +- You can `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1` and make sure `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` globals are defined. + All the remaining Vulkan functions will be fetched automatically. +- Finally, you can provide your own pointers to all Vulkan functions needed by VMA using structure member + VmaAllocatorCreateInfo::pVulkanFunctions, if you fetched them in some custom way e.g. using some loader like [Volk](https://github.com/zeux/volk). + + +\section quick_start_initialization Initialization + +At program startup: + +-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. +-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by + calling vmaCreateAllocator(). + +\code +VmaAllocatorCreateInfo allocatorInfo = {}; +allocatorInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorInfo.physicalDevice = physicalDevice; +allocatorInfo.device = device; +allocatorInfo.instance = instance; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +vmaDestroyAllocator(allocator); +\endcode + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). This is the easiest and recommended way to use this library. + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +For more details, see description of this enum. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and used for rendering every frame, you can +do it using following code: + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +If you use VmaAllocationCreateInfo::usage, it is just internally converted to +a set of required and preferred flags. + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That's the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +Because of this, Vulkan Memory Allocator provides following facilities: + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it's implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: + +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData; + +VmaAllocator allocator; +VkBuffer constantBuffer; +VmaAllocation constantBufferAllocation; + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Kepping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +There are some exceptions though, when you should consider mapping memory only for a short period of time: + +- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2), + device is discrete AMD GPU, + and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory + (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU), + then whenever a memory block allocated from this memory type stays mapped + for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this + block is migrated by WDDM to system RAM, which degrades performance. It doesn't + matter if that particular memory block is actually used by the command buffer + being submitted. +- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175) + which requires unmapping before GPU can see updated texture. +- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`. + +Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on this platform you may not need to bother. + +\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable + +It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping) +despite it wasn't explicitly requested. +For example, application may work on integrated graphics with unified memory (like Intel) or +allocation from video memory might have failed, so the library chose system memory as fallback. + +You can detect this case and map such allocation to access its memory on CPU directly, +instead of launching a transfer operation. +In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(), +and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memFlags; +vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags); +if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) +{ + // Allocation ended up in mappable memory. You can map it and access it directly. + void* mappedData; + vmaMapMemory(allocator, alloc, &mappedData); + memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); + vmaUnmapMemory(allocator, alloc); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + +You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations +that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY). +If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly. +If not, the flag is just ignored. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +if(allocInfo.pMappedData != nullptr) +{ + // Allocation ended up in mappable memory. + // It's persistently mapped. You can access it directly. + memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it's physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetBudget(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStats(). vmaGetBudget() can be called every frame or even before every +allocation, while vmaCalculateStats() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +Some other allocations become lost instead to make room for it, if the mechanism of +[lost allocations](@ref lost_allocations) is used. +If that is not possible, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +Remember that using resouces that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset of an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = ... +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +vmaCreatePool(allocator, &poolCreateInfo, &pool); + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Whatever. +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed. + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed. + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer. +If there is not enough free space for a new allocation, but existing allocations +from the front of the queue can become lost, they become lost and the allocation +succeeds. + +![Ring buffer with lost allocations](../gfx/Linear_allocator_6_ring_buffer_lost.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\section buddy_algorithm Buddy allocation algorithm + +There is another allocation algorithm that can be used with custom pools, called +"buddy". Its internal data structure is based on a tree of blocks, each having +size that is a power of two and a half of its parent's size. When you want to +allocate memory of certain size, a free node in the tree is located. If it's too +large, it is recursively split into two halves (called "buddies"). However, if +requested allocation size is not a power of two, the size of a tree node is +aligned up to the nearest power of two and the remaining space is wasted. When +two buddy nodes become free, they are merged back into one larger node. + +![Buddy allocator](../gfx/Buddy_allocator.png) + +The advantage of buddy allocation algorithm over default algorithm is faster +allocation and deallocation, as well as smaller external fragmentation. The +disadvantage is more wasted space (internal fragmentation). + +For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation) +or other sources that describe this concept in general. + +To use buddy allocation algorithm with a custom pool, add flag +#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. + +Several limitations apply to pools that use buddy algorithm: + +- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two. + Otherwise, only largest power of two smaller than the size is used for + allocations. The remaining space always stays unused. +- [Margins](@ref debugging_memory_usage_margins) and + [corruption detection](@ref debugging_memory_usage_corruption_detection) + don't work in such pools. +- [Lost allocations](@ref lost_allocations) don't work in such pools. You can + use them, but they never become lost. Support may be added in the future. +- [Defragmentation](@ref defragmentation) doesn't work with allocations made from + such pool. + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature: +structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd(). +Given set of allocations, +this function can move them to compact used memory, ensure more continuous free +space and possibly also free some `VkDeviceMemory` blocks. + +What the defragmentation does is: + +- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset. + After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or + VmaAllocationInfo::offset changes. You must query them again using + vmaGetAllocationInfo() if you need them. +- Moves actual data in memory. + +What it doesn't do, so you need to do it yourself: + +- Recreate buffers and images that were bound to allocations that were defragmented and + bind them with their new places in memory. + You must use `vkDestroyBuffer()`, `vkDestroyImage()`, + `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory() + for that purpose and NOT vmaDestroyBuffer(), + vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to + destroy or create allocation objects! +- Recreate views and update descriptors that point to these buffers and images. + +\section defragmentation_cpu Defragmenting CPU memory + +Following example demonstrates how you can run defragmentation on CPU. +Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented. +Others are ignored. + +The way it works is: + +- It temporarily maps entire memory blocks when necessary. +- It moves data using `memmove()` function. + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit. +defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit. + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. +This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index +has been modified during defragmentation. +You can pass null, but you then need to query every allocation passed to defragmentation +for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it. + +If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools), +you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools +instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations +to defragment all allocations in given pools. +You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case. +You can also combine both methods. + +\section defragmentation_gpu Defragmenting GPU memory + +It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`. +To do that, you need to pass a command buffer that meets requirements as described in +VmaDefragmentationInfo2::commandBuffer. The way it works is: + +- It creates temporary buffers and binds them to entire memory blocks when necessary. +- It issues `vkCmdCopyBuffer()` to passed command buffer. + +Example: + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +VkCommandBuffer commandBuffer; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VkCommandBufferBeginInfo cmdBufBeginInfo = ...; +vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time. +defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time. +defragInfo.commandBuffer = commandBuffer; + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); + +vkEndCommandBuffer(commandBuffer); + +// Submit commandBuffer. +// Wait for a fence that ensures commandBuffer execution finished. + +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters. +The library automatically chooses best method to defragment each memory pool. + +You may try not to block your entire program to wait until defragmentation finishes, +but do it in the background, as long as you carefully fullfill requirements described +in function vmaDefragmentationBegin(). + +\section defragmentation_additional_notes Additional notes + +It is only legal to defragment allocations bound to: + +- buffers +- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and + being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`. + +Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other +layout may give undefined results. + +If you defragment allocations bound to images, new images to be bound to new +memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED` +and then transitioned to their original layout from before defragmentation if +needed using an image memory barrier. + +While using defragmentation, you may experience validation layer warnings, which you just need to ignore. +See [Validation layer warnings](@ref general_considerations_validation_layer_warnings). + +Please don't expect memory to be fully compacted after defragmentation. +Algorithms inside are based on some heuristics that try to maximize number of Vulkan +memory blocks to make totally empty to release them, as well as to maximimze continuous +empty space inside remaining blocks, while minimizing the number and size of allocations that +need to be moved. Some fragmentation may still remain - this is normal. + +\section defragmentation_custom_algorithm Writing custom defragmentation algorithm + +If you want to implement your own, custom defragmentation algorithm, +there is infrastructure prepared for that, +but it is not exposed through the library API - you need to hack its source code. +Here are steps needed to do this: + +-# Main thing you need to do is to define your own class derived from base abstract + class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods. + See definition and comments of this class for details. +-# Your code needs to interact with device memory block metadata. + If you need more access to its data than it's provided by its public interface, + declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`. +-# If you want to create a flag that would enable your algorithm or pass some additional + flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in + VmaDefragmentationInfo2::flags. +-# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object + of your new class whenever needed. + + +\page lost_allocations Lost allocations + +If your game oversubscribes video memory, if may work OK in previous-generation +graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically +paged to system RAM. In Vulkan you can't do it because when you run out of +memory, an allocation just fails. If you have more data (e.g. textures) that can +fit into VRAM and you don't need it all at once, you may want to upload them to +GPU on demand and "push out" ones that are not used for a long time to make room +for the new ones, effectively using VRAM (or a cartain memory pool) as a form of +cache. Vulkan Memory Allocator can help you with that by supporting a concept of +"lost allocations". + +To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT +flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to +such allocation in every new frame, you need to query it if it's not lost. +To check it, call vmaTouchAllocation(). +If the allocation is lost, you should not use it or buffer/image bound to it. +You mustn't forget to destroy this allocation and this buffer/image. +vmaGetAllocationInfo() can also be used for checking status of the allocation. +Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`. + +To create an allocation that can make some other allocations lost to make room +for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will +usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time. + +Warning! Current implementation uses quite naive, brute force algorithm, +which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT +flag quite slow. A new, more optimal algorithm and data structure to speed this +up is planned for the future. + +Q: When interleaving creation of new allocations with usage of existing ones, +how do you make sure that an allocation won't become lost while it's used in the +current frame? + +It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation +status/parameters and checks whether it's not lost, but when it's not, it also +atomically marks it as used in the current frame, which makes it impossible to +become lost in that frame. It uses lockless algorithm, so it works fast and +doesn't involve locking any internal mutex. + +Q: What if my allocation may still be in use by the GPU when it's rendering a +previous frame while I already submit new frame on the CPU? + +You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not +become lost for a number of additional frames back from the current one by +specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default +memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool). + +Q: How do you inform the library when new frame starts? + +You need to call function vmaSetCurrentFrameIndex(). + +Example code: + +\code +struct MyBuffer +{ + VkBuffer m_Buf = nullptr; + VmaAllocation m_Alloc = nullptr; + + // Called when the buffer is really needed in the current frame. + void EnsureBuffer(); +}; + +void MyBuffer::EnsureBuffer() +{ + // Buffer has been created. + if(m_Buf != VK_NULL_HANDLE) + { + // Check if its allocation is not lost + mark it as used in current frame. + if(vmaTouchAllocation(allocator, m_Alloc)) + { + // It's all OK - safe to use m_Buf. + return; + } + } + + // Buffer not yet exists or lost - destroy and recreate it. + + vmaDestroyBuffer(allocator, m_Buf, m_Alloc); + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 1024; + bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT | + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr); +} +\endcode + +When using lost allocations, you may see some Vulkan validation layer warnings +about overlapping regions of memory bound to different kinds of buffers and +images. This is still valid as long as you implement proper handling of lost +allocations (like in the example above) and don't use them. + +You can create an allocation that is already in lost state from the beginning using function +vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null. + +You can call function vmaMakePoolAllocationsLost() to set all eligible allocations +in a specified custom pool to lost state. +Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back +cannot become lost. + +Q: Can I touch allocation that cannot become lost? + +Yes, although it has no visible effect. +Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index +also for allocations that cannot become lost, but the only way to observe it is to dump +internal allocator state using vmaBuildStatsString(). +You can use this feature for debugging purposes to explicitly mark allocations that you use +in current frame and then analyze JSON dump to see for how long each allocation stays unused. + + +\page statistics Statistics + +This library contains functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. +Please keep in mind that these functions need to traverse all internal data structures +to gather these information, so they may be quite time-consuming. +Don't call them too often. + +\section statistics_numeric_statistics Numeric statistics + +You can query for overall statistics of the allocator using function vmaCalculateStats(). +Information are returned using structure #VmaStats. +It contains #VmaStatInfo - number of allocated blocks, number of allocations +(occupied ranges in these blocks), number of unused (free) ranges in these blocks, +number of bytes used and unused (but still allocated from Vulkan) and other information. +They are summed across memory heaps, memory types and total for whole allocator. + +You can query for statistics of a custom pool using function vmaGetPoolStats(). +Information are returned using structure #VmaPoolStats. + +You can query for information about specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStats(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +// Fill bufferInfo... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString(), in hexadecimal form. + +\section allocation_names Allocation names + +There is alternative mode available where `pUserData` pointer is used to point to +a null-terminated string, giving a name to the allocation. To use this mode, +set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. +Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to +vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +// Fill imageInfo... + +std::string imageName = "Texture: "; +imageName += fileName; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; +allocCreateInfo.pUserData = imageName.c_str(); + +VkImage image; +VmaAllocation allocation; +vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr); +\endcode + +The value of `pUserData` pointer of the allocation will be different than the one +you passed when setting allocation's name - pointing to a buffer managed +internally that holds copy of the string. + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +const char* imageName = (const char*)allocInfo.pUserData; +printf("Image name: %s\n", imageName); +\endcode + +That string is also printed in JSON report created by vmaBuildStatsString(). + +\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE`. +It works also with dedicated allocations. +It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +as they cannot be mapped. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin before and after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +The margin is applied also before first and after last allocation in a block. +It may occur only once between two adjacent allocations. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. +Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) before and after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it's not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\page record_and_replay Record and replay + +\section record_and_replay_introduction Introduction + +While using the library, sequence of calls to its functions together with their +parameters can be recorded to a file and later replayed using standalone player +application. It can be useful to: + +- Test correctness - check if same sequence of calls will not cause crash or + failures on a target platform. +- Gather statistics - see number of allocations, peak memory usage, number of + calls etc. +- Benchmark performance - see how much time it takes to replay the whole + sequence. + +\section record_and_replay_usage Usage + +Recording functionality is disabled by default. +To enable it, define following macro before every include of this library: + +\code +#define VMA_RECORDING_ENABLED 1 +\endcode + +To record sequence of calls to a file: Fill in +VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator +object. File is opened and written during whole lifetime of the allocator. + +To replay file: Use VmaReplay - standalone command-line program. +Precompiled binary can be found in "bin" directory. +Its source can be found in "src/VmaReplay" directory. +Its project is generated by Premake. +Command line syntax is printed when the program is launched without parameters. +Basic usage: + + VmaReplay.exe MyRecording.csv + +Documentation of file format can be found in file: "docs/Recording file format.md". +It's a human-readable, text file in CSV format (Comma Separated Values). + +\section record_and_replay_additional_considerations Additional considerations + +- Replaying file that was recorded on a different GPU (with different parameters + like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different + set of memory heaps and types) may give different performance and memory usage + results, as well as issue some warnings and errors. +- Current implementation of recording in VMA, as well as VmaReplay application, is + coded and tested only on Windows. Inclusion of recording code is driven by + `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to + add. Contributions are welcomed. + + +\page usage_patterns Recommended usage patterns + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_common_mistakes Common mistakes + +Use of CPU_TO_GPU instead of CPU_ONLY memory + +#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be +mapped and written by the CPU, as well as read directly by the GPU - like some +buffers or textures updated every frame (dynamic). If you create a staging copy +of a resource to be written by CPU and then used as a source of transfer to +another resource placed in the GPU memory, that staging resource should be +created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these +enums carefully for details. + +Unnecessary use of custom pools + +\ref custom_memory_pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them, limit maximum amount of memory they can occupy, or make some of them +push out the other through the mechanism of \ref lost_allocations. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + +\section usage_patterns_simple Simple patterns + +\subsection usage_patterns_simple_render_targets Render targets + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension +and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them e.g. when +display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. + +\subsection usage_patterns_simple_immutable_resources Immutable resources + +When: +Any resources that you fill on CPU only once (aka "immutable") or infrequently +and then read frequently on GPU, +e.g. textures, vertex and index buffers, constant buffers that don't change often. + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +To initialize content of such resource, create a CPU-side (aka "staging") copy of it +in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it, +and submit a transfer from it to the GPU resource. +You can keep the staging copy if you need it for another upload transfer in the future. +If you don't, you can destroy it or reuse this buffer for uploading different resource +after the transfer finishes. + +Prefer to create just buffers in system memory rather than images, even for uploading textures. +Use `vkCmdCopyBufferToImage()`. +Dont use images with `VK_IMAGE_TILING_LINEAR`. + +\subsection usage_patterns_dynamic_resources Dynamic resources + +When: +Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call, +written on CPU, read on GPU. + +What to do: +Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU. +You can map it and write to it directly on CPU, as well as read from it on GPU. + +This is a more complex situation. Different solutions are possible, +and the best one depends on specific GPU type, but you can use this simple approach for the start. +Prefer to write to such resource sequentially (e.g. using `memcpy`). +Don't perform random access or any reads from it on CPU, as it may be very slow. +Also note that textures written directly from the host through a mapped pointer need to be in LINEAR not OPTIMAL layout. + +\subsection usage_patterns_readback Readback + +When: +Resources that contain data written by GPU that you want to read back on CPU, +e.g. results of some computations. + +What to do: +Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU. +You can write to them directly on GPU, as well as map and read them on CPU. + +\section usage_patterns_advanced Advanced patterns + +\subsection usage_patterns_integrated_graphics Detecting integrated graphics + +You can support integrated graphics (like Intel HD Graphics, AMD APU) better +by detecting it in Vulkan. +To do it, call `vkGetPhysicalDeviceProperties()`, inspect +`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`. +When you find it, you can assume that memory is unified and all memory types are comparably fast +to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +You can then sum up sizes of all available memory heaps and treat them as useful for +your GPU resources, instead of only `DEVICE_LOCAL` ones. +You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them +directly instead of submitting explicit transfer (see below). + +\subsection usage_patterns_direct_vs_transfer Direct access versus transfer + +For resources that you frequently write on CPU and read on GPU, many solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit transfer each time. +-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU, + read it directly on GPU. +-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU, + read it directly on GPU. + +Which solution is the most efficient depends on your resource and especially on the GPU. +It is best to measure it and then make the decision. +Some general recommendations: + +- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead + related to using a second copy and making transfer. +- For small resources (e.g. constant buffers) use (2). + Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable. + Even if the resource ends up in system memory, its data may be cached on GPU after first + fetch over PCIe bus. +- For larger resources (e.g. textures), decide between (1) and (2). + You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is + both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1). + +Similarly, for resources that you frequently write on GPU and read on CPU, multiple +solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time. +-# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU, + map it and read it on CPU. + +You should take some measurements to decide which option is faster in case of your specific +resource. + +Note that textures accessed directly from the host through a mapped pointer need to be in LINEAR layout, +which may slow down their usage on the device. +Textures accessed only by the device and transfer operations can use OPTIMAL layout. + +If you don't want to specialize your code for specific types of GPUs, you can still make +an simple optimization for cases when your resource ends up in mappable memory to use it +directly in this case instead of creating CPU-side staging copy. +For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable). + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. + +The extension is supported by this library. It will be used automatically when +enabled. To enable it: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator`to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That's all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + + vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_AMD_device_coherent_memory) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It is promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures*::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It happens when you use lost allocations, and a new image or buffer is + created in place of an existing object that bacame lost. + - It may happen also when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size/2, size/4, size/8. +-# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was + specified, try to find space in existing blocks, possilby making some other + allocations lost. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +- Data transfer. Uploading (straming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. +- Allocations for imported/exported external memory. They tend to require + explicit memory type index and dedicated allocation anyway, so they don't + interact with main features of this library. Such special purpose allocations + should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`. +- Sub-allocation of parts of one large buffer. Although recommended as a good practice, + it is the user's responsibility to implement such logic on top of VMA. +- Recreation of buffers and images. Although the library has functions for + buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to + recreate these objects yourself after defragmentation. That's because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +- Handling CPU memory allocation failures. When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +- Code free of any compiler warnings. Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. +- This is a C++ library with C interface. + Bindings or ports to any other programming languages are welcomed as external projects and + are not going to be included into this repository. + +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +Define this macro to 0/1 to disable/enable support for recording functionality, +available through VmaAllocatorCreateInfo::pRecordSettings. +*/ +#ifndef VMA_RECORDING_ENABLED + #define VMA_RECORDING_ENABLED 0 +#endif + +#if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION) + #define NOMINMAX // For windows.h +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#ifndef VULKAN_H_ + #include "../vulkan/vulkan.h" +#endif + +// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, +// where AAA = major, BBB = minor, CCC = patch. +// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks { + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits { + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extenion will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +typedef VkFlags VmaAllocatorCreateFlags; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions { + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +} VmaVulkanFunctions; + +/// Flags to be used in VmaRecordSettings::flags. +typedef enum VmaRecordFlagBits { + /** \brief Enables flush after recording every function call. + + Enable it if you expect your application to crash, which may leave recording file truncated. + It may degrade performance though. + */ + VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001, + + VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaRecordFlagBits; +typedef VkFlags VmaRecordFlags; + +/// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings. +typedef struct VmaRecordSettings +{ + /// Flags for recording. Use #VmaRecordFlagBits enum. + VmaRecordFlags flags; + /** \brief Path to the file that should be written by the recording. + + Suggested extension: "csv". + If the file already exists, it will be overwritten. + It will be opened for the whole time #VmaAllocator object is alive. + If opening this file fails, creation of the whole allocator object fails. + */ + const char* VMA_NOT_NULL pFilePath; +} VmaRecordSettings; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Parameters for recording of VMA calls. Can be null. + + If not null, it enables recording of calls to VMA functions to a file. + If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, + creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + const VmaRecordSettings* VMA_NULLABLE pRecordSettings; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0, 1.1, 1.2 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; +} VmaAllocatorCreateInfo; + +/// Creates Allocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Information about existing #VmaAllocator object. +*/ +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. + +This function must be used if you make allocations with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator +when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot +become lost in the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** \brief Calculated statistics of memory usage in entire allocator. +*/ +typedef struct VmaStatInfo +{ + /// Number of `VkDeviceMemory` Vulkan memory blocks allocated. + uint32_t blockCount; + /// Number of #VmaAllocation allocation objects allocated. + uint32_t allocationCount; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Total number of bytes occupied by all allocations. + VkDeviceSize usedBytes; + /// Total number of bytes occupied by unused ranges. + VkDeviceSize unusedBytes; + VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax; + VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax; +} VmaStatInfo; + +/// General statistics from current state of Allocator. +typedef struct VmaStats +{ + VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]; + VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaStatInfo total; +} VmaStats; + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. For faster but more brief statistics +suitable to be called every frame or every allocation, use vmaGetBudget(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator VMA_NOT_NULL allocator, + VmaStats* VMA_NOT_NULL pStats); + +/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap. +*/ +typedef struct VmaBudget +{ + /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes. + */ + VkDeviceSize blockBytes; + + /** \brief Sum size of all allocations created in particular heap, in bytes. + + Usually less or equal than `blockBytes`. + Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - + available for new allocations or wasted due to fragmentation. + + It might be greater than `blockBytes` if there are some allocations in lost state, as they account + to this value as well. + */ + VkDeviceSize allocationBytes; + + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different than `blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, like other programs also consuming system resources. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** \brief Retrieves information about current memory budget for all memory heaps. + +\param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStats(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL pBudget); + +#ifndef VMA_STATS_STRING_ENABLED +#define VMA_STATS_STRING_ENABLED 1 +#endif + +#if VMA_STATS_STRING_ENABLED + +/// Builds and returns statistics as string in JSON format. +/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +#endif // #if VMA_STATS_STRING_ENABLED + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** Memory will be used on device only, so fast access from the device is preferred. + It usually means device-local GPU (video) memory. + No need to be mappable on host. + It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. + + Usage: + + - Resources written and read by device, e.g. images used as attachments. + - Resources transferred from host once (immutable) or infrequently and read by + device multiple times, e.g. textures to be sampled, vertex buffers, uniform + (constant) buffers, and majority of other types of resources used on GPU. + + Allocation may still end up in `HOST_VISIBLE` memory on some implementations. + In such case, you are free to map it. + You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** Memory will be mappable on host. + It usually means CPU (system) memory. + Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. + CPU access is typically uncached. Writes may be write-combined. + Resources created in this pool may still be accessible to the device, but access to them can be slow. + It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. + + Usage: Staging copy of resources used as transfer source. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. + CPU access is typically uncached. Writes may be write-combined. + + Usage: Resources written frequently by host (dynamic), read by device. E.g. textures (with LINEAR layout), vertex buffers, uniform buffers updated every frame or every draw call. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. + It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. + + Usage: + + - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. + - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. + + Usage: Staging copy of resources moved from GPU memory to CPU memory as part + of custom paging/residency mechanism, to be moved back to GPU memory when needed. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits { + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + You should not use this flag if VmaAllocationCreateInfo::pool is not null. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + + If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + + You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** Allocation created with this flag can become lost as a result of another + allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you + must check it before use. + + To check if allocation is not lost, call vmaGetAllocationInfo() and check if + VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + + You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. + */ + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, + /** While creating allocation using this flag, other allocations that were + created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + */ + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, + /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pUserData`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + + /** Allocation strategy that chooses smallest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, + /** Allocation strategy that chooses biggest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, + /** Allocation strategy that chooses first suitable free range for the + allocation. + + "First" doesn't necessarily means the one with smallest offset in memory, + but rather the one that is easiest and fastest to find. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000, + + /** Allocation strategy that tries to minimize memory usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, + /** Allocation strategy that tries to minimize allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + /** Allocation strategy that tries to minimize memory fragmentation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, + + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +typedef VkFlags VmaAllocationCreateFlags; + +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; +} VmaAllocationCreateInfo; + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateBuffer` +- `vkGetBufferMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyBuffer` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateImage` +- `vkGetImageMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyImage` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits { + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. For details, see documentation chapter + \ref linear_algorithm. + + When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default). + + For more details, see [Linear allocation algorithm](@ref linear_algorithm). + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** \brief Enables alternative, buddy allocation algorithm in this pool. + + It operates on a tree of blocks, each having size that is a power of two and + a half of its parent's size. Comparing to default algorithm, this one provides + faster allocation and deallocation and decreased external fragmentation, + at the expense of more memory wasted (internal fragmentation). + + For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). + */ + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +typedef VkFlags VmaPoolCreateFlags; + +/** \brief Describes parameter of created #VmaPool. +*/ +typedef struct VmaPoolCreateInfo { + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; +} VmaPoolCreateInfo; + +/** \brief Describes parameter of existing #VmaPool. +*/ +typedef struct VmaPoolStats { + /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes. + */ + VkDeviceSize size; + /** \brief Total number of bytes in the pool not used by any #VmaAllocation. + */ + VkDeviceSize unusedSize; + /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost. + */ + size_t allocationCount; + /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation. + */ + size_t unusedRangeCount; + /** \brief Size of the largest continuous free memory region available for new allocation. + + Making a new allocation of that size is not guaranteed to succeed because of + possible additional margin required to respect alignment and buffer/image + granularity. + */ + VkDeviceSize unusedRangeSizeMax; + /** \brief Number of `VkDeviceMemory` blocks allocated for this pool. + */ + size_t blockCount; +} VmaPoolStats; + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +@param allocator Allocator object. +@param pCreateInfo Parameters of pool to create. +@param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** \brief Retrieves statistics of existing #VmaPool object. + +@param allocator Allocator object. +@param pool Pool object. +@param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaPoolStats* VMA_NOT_NULL pPoolStats); + +/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now. + +@param allocator Allocator object. +@param pool Pool. +@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + size_t* VMA_NULLABLE pLostAllocationCount); + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE * VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. + +Some kinds allocations can be in lost state. +For more information, see [Lost allocations](@ref lost_allocations). +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). +*/ +typedef struct VmaAllocationInfo { + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + + If the allocation is lost, it is equal to `VK_NULL_HANDLE`. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes, unless allocation is lost. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after call to vmaDefragment() if this allocation is passed to the function. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; +} VmaAllocationInfo; + +/** \brief General purpose memory allocation. + +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +@param allocator Allocator object. +@param pVkMemoryRequirements Memory requirements for each allocation. +@param pCreateInfo Creation parameters for each alloction. +@param allocationCount Number of allocations to make. +@param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaAllocateMemoryForBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Deprecated. + +\deprecated +In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. +In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size. +Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize newSize); + +/** \brief Returns current information about specified allocation and atomically marks it as used in current frame. + +Current paramteres of given allocation are returned in `pAllocationInfo`. + +This function also atomically "touches" allocation - marks it as used in current frame, +just like vmaTouchAllocation(). +If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`. + +Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient, +you can avoid calling it too often. + +- You can retrieve same VmaAllocationInfo structure while creating your resource, from function + vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change + (e.g. due to defragmentation or allocation becoming lost). +- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame. + +If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function returns `VK_TRUE` if it's not in lost state, so it can still be used. +It then also atomically "touches" the allocation - marks it as used in current frame, +so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames. + +If the allocation is in lost state, the function returns `VK_FALSE`. +Memory of such allocation, as well as buffer or image bound to it, should not be used. +Lost allocation and the buffer/image still need to be destroyed. + +If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function always returns `VK_TRUE`. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Sets pUserData in given allocation to new value. + +If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, +pUserData must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pUserData`. String +passed as pUserData doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +pUserData is freed from memory. + +If the flag was not used, the value of pointer `pUserData` is just copied to +allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Creates new allocation that is in lost state from the beginning. + +It can be useful if you need a dummy, non-null allocation. + +You still need to destroy created object using vmaFreeMemory(). + +Returned allocation is not tied to any specific memory pool or memory type and +not bound to any image or buffer. It has size = 0. It cannot be turned into +a real, non-empty allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. +If the allocation is part of bigger `VkDeviceMemory` block, the pointer is +correctly offseted to the beginning of region assigned to this particular +allocation. + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function always fails when called for allocation that was created with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be +mapped. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE * VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +@param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits); + +/** \struct VmaDefragmentationContext +\brief Represents Opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it. +Call function vmaDefragmentationEnd() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use. +typedef enum VmaDefragmentationFlagBits { + VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1, + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +typedef VkFlags VmaDefragmentationFlags; + +/** \brief Parameters for defragmentation. + +To be used with function vmaDefragmentationBegin(). +*/ +typedef struct VmaDefragmentationInfo2 { + /** \brief Reserved for future use. Should be 0. + */ + VmaDefragmentationFlags flags; + /** \brief Number of allocations in `pAllocations` array. + */ + uint32_t allocationCount; + /** \brief Pointer to array of allocations that can be defragmented. + + The array should have `allocationCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same allocation cannot occur twice. + It is safe to pass allocations that are in the lost state - they are ignored. + All allocations not present in this array are considered non-moveable during this defragmentation. + */ + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations; + /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. + + The array should have `allocationCount` elements. + You can pass null if you are not interested in this information. + */ + VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged; + /** \brief Numer of pools in `pPools` array. + */ + uint32_t poolCount; + /** \brief Either null or pointer to array of pools to be defragmented. + + All the allocations in the specified pools can be moved during defragmentation + and there is no way to check if they were really moved as in `pAllocationsChanged`, + so you must query all the allocations in all these pools for new `VkDeviceMemory` + and offset using vmaGetAllocationInfo() if you might need to recreate buffers + and images bound to them. + + The array should have `poolCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same pool cannot occur twice. + + Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. + It might be more efficient. + */ + const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxCpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxCpuAllocationsToMove; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxGpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxGpuAllocationsToMove; + /** \brief Optional. Command buffer where GPU copy commands will be posted. + + If not null, it must be a valid command buffer handle that supports Transfer queue type. + It must be in the recording state and outside of a render pass instance. + You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). + + Passing null means that only CPU defragmentation will be performed. + */ + VkCommandBuffer VMA_NULLABLE commandBuffer; +} VmaDefragmentationInfo2; + +typedef struct VmaDefragmentationPassMoveInfo { + VmaAllocation VMA_NOT_NULL allocation; + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory; + VkDeviceSize offset; +} VmaDefragmentationPassMoveInfo; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassInfo { + uint32_t moveCount; + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassInfo; + +/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment(). + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. +*/ +typedef struct VmaDefragmentationInfo { + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. + + Default is `VK_WHOLE_SIZE`, which means no limit. + */ + VkDeviceSize maxBytesToMove; + /** \brief Maximum number of allocations that can be moved to different place. + + Default is `UINT32_MAX`, which means no limit. + */ + uint32_t maxAllocationsToMove; +} VmaDefragmentationInfo; + +/** \brief Statistics returned by function vmaDefragment(). */ +typedef struct VmaDefragmentationStats { + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** \brief Begins defragmentation process. + +@param allocator Allocator object. +@param pInfo Structure filled with parameters of defragmentation. +@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. +@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. +@return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error. + +Use this function instead of old, deprecated vmaDefragment(). + +Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd(): + +- You should not use any of allocations passed as `pInfo->pAllocations` or + any allocations that belong to pools passed as `pInfo->pPools`, + including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access + their data. +- Some mutexes protecting internal data structures may be locked, so trying to + make or free any allocations, bind buffers or images, map memory, or launch + another simultaneous defragmentation in between may cause stall (when done on + another thread) or deadlock (when done on the same thread), unless you are + 100% sure that defragmented allocations are in different pools. +- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined. + They become valid after call to vmaDefragmentationEnd(). +- If `pInfo->commandBuffer` is not null, you must submit that command buffer + and make sure it finished execution before calling vmaDefragmentationEnd(). + +For more information and important limitations regarding defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo, + VmaDefragmentationStats* VMA_NULLABLE pStats, + VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +Use this function to finish defragmentation started by vmaDefragmentationBegin(). +It is safe to pass `context == null`. The function then does nothing. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context); + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context, + VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo +); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context +); + +/** \brief Deprecated. Compacts memory by moving allocations. + +@param pAllocations Array of allocations that can be moved during this compation. +@param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays. +@param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information. +@param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values. +@param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information. +@return `VK_SUCCESS` if completed, negative error code in case of error. + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. + +This function works by moving allocations to different places (different +`VkDeviceMemory` objects and/or different offsets) in order to optimize memory +usage. Only allocations that are in `pAllocations` array can be moved. All other +allocations are considered nonmovable in this call. Basic rules: + +- Only allocations made in memory types that have + `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` + flags can be compacted. You may pass other allocations but it makes no sense - + these will never be moved. +- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or + #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations + passed to this function that come from such pools are ignored. +- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or + created as dedicated allocations for any other reason are also ignored. +- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT + flag can be compacted. If not persistently mapped, memory will be mapped + temporarily inside this function if needed. +- You must not pass same #VmaAllocation object multiple times in `pAllocations` array. + +The function also frees empty `VkDeviceMemory` blocks. + +Warning: This function may be time-consuming, so you shouldn't call it too often +(like after every resource creation/destruction). +You can call it on special occasions (like when reloading a game level or +when you just destroyed a lot of objects). Calling it every frame may be OK, but +you should measure that on your platform. + +For more information, see [Defragmentation](@ref defragmentation) chapter. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + size_t allocationCount, + VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged, + const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo, + VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE pNext); + +/** +@param[out] pBuffer Buffer that was created. +@param[out] pAllocation Allocation that was created. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, *pBuffer and *pAllocation are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null +and #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include + +#if VMA_RECORDING_ENABLED + #include + #if defined(_WIN32) + #include + #else + #include + #include + #endif +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory); +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 + #if defined(VK_NO_PROTOTYPES) + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + #endif +#endif + +// Define this macro to 1 to make the library use STL containers instead of its own implementation. +//#define VMA_USE_STL_CONTAINERS 1 + +/* Set this macro to 1 to make the library including and using STL containers: +std::pair, std::vector, std::list, std::unordered_map. + +Set it to 0 or undefined to make the library using its own implementation of +the containers. +*/ +#if VMA_USE_STL_CONTAINERS + #define VMA_USE_STL_VECTOR 1 + #define VMA_USE_STL_UNORDERED_MAP 1 + #define VMA_USE_STL_LIST 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/ + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +THESE INCLUDES ARE NOT ENABLED BY DEFAULT. +Library has its own container implementation. +*/ +#if VMA_USE_STL_VECTOR + #include +#endif + +#if VMA_USE_STL_UNORDERED_MAP + #include +#endif + +#if VMA_USE_STL_LIST + #include +#endif + +/* +Following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#include // for assert +#include // for min, max +#include + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ +#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) +#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // MAC_OS_X_VERSION_10_16), even though the function is marked + // availabe for 10.15. That's why the preprocessor checks for 10.16 but + // the __builtin_available checks for 10.15. + // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + if (__builtin_available(macOS 10.15, iOS 13, *)) + return aligned_alloc(alignment, size); +#endif +#endif + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* ptr) +{ + free(ptr); +} +#endif + +// If your compiler is not compatible with C++11 and definition of +// aligned_alloc() function is missing, uncommeting following line may help: + +//#include + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (__alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) (std::min((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) (std::max((v1), (v2))) +#endif + +#ifndef VMA_SWAP + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(format, ...) + /* + #define VMA_DEBUG_LOG(format, ...) do { \ + printf(format, __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast(num)); + } + static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast(num)); + } + static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_DEBUG_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_ALIGNMENT (1) +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin before and after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin before and after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif + +static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX; + +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; + +/******************************************************************************* +END OF CONFIGURATION +*/ + +// # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. + +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; + +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = { + VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + +// Returns number of bits set to 1 in (v). +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x-1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} +// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +#if VMA_STATS_STRING_ENABLED + +static const char* VmaAlgorithmToStr(uint32_t algorithm) +{ + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + return "Linear"; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + return "Buddy"; + case 0: + return "Default"; + default: + VMA_ASSERT(0); + return ""; + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +#ifndef VMA_SORT + +template +Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) +{ + Iterator centerValue = end; --centerValue; + Iterator insertIndex = beg; + for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) + { + if(cmp(*memTypeIndex, *centerValue)) + { + if(insertIndex != memTypeIndex) + { + VMA_SWAP(*memTypeIndex, *insertIndex); + } + ++insertIndex; + } + } + if(insertIndex != centerValue) + { + VMA_SWAP(*insertIndex, *centerValue); + } + return insertIndex; +} + +template +void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) +{ + if(beg < end) + { + Iterator it = VmaQuickSortPartition(beg, end, cmp); + VmaQuickSort(beg, it, cmp); + VmaQuickSort(it + 1, end, cmp); + } +} + +#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp) + +#endif // #ifndef VMA_SORT + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if(suballocType1 > suballocType2) + { + VMA_SWAP(suballocType1, suballocType2); + } + + switch(suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->Lock(); } } + ~VmaMutexLock() + { if(m_pMutex) { m_pMutex->Unlock(); } } +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockRead(); } } + ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockWrite(); } } + ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif + +// Minimum size of a free suballocation to register it in the free suballocation collection. +static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16; + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp) +{ + size_t down = 0, up = (end - beg); + while(down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if(cmp(*(beg+mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if(it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for(uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if(iPtr == VMA_NULL) + { + return false; + } + for(uint32_t j = i + 1; j < count; ++j) + { + if(iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if(srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + else + { + return VMA_NULL; + } +} + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if(str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +// STL-compatible allocator. +template +class VmaStlAllocator +{ +public: + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { } + template VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) { } + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } + + VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete; +}; + +#if VMA_USE_STL_VECTOR + +#define VmaVector std::vector + +template +static void VmaVectorInsert(std::vector& vec, size_t index, const T& item) +{ + vec.insert(vec.begin() + index, item); +} + +template +static void VmaVectorRemove(std::vector& vec, size_t index) +{ + vec.erase(vec.begin() + index); +} + +#else // #if VMA_USE_STL_VECTOR + +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + + VmaVector(const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) + { + } + + VmaVector(size_t count, const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) + { + } + + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) + : VmaVector(count, allocator) {} + + VmaVector(const VmaVector& src) : + m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) + { + if(m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } + } + + ~VmaVector() + { + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + } + + VmaVector& operator=(const VmaVector& rhs) + { + if(&rhs != this) + { + resize(rhs.m_Count); + if(m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; + } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + const T* data() const { return m_pArray; } + + T& operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + const T& operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + + T& front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + const T& front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + T& back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + const T& back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + + void reserve(size_t newCapacity, bool freeMemory = false) + { + newCapacity = VMA_MAX(newCapacity, m_Count); + + if((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if(m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + } + + void resize(size_t newCount, bool freeMemory = false) + { + size_t newCapacity = m_Capacity; + if(newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + else if(freeMemory) + { + newCapacity = newCount; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if(elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; + } + + void clear(bool freeMemory = false) + { + resize(0, freeMemory); + } + + void insert(size_t index, const T& src) + { + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if(index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; + } + + void remove(size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if(index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); + } + + void push_back(const T& src) + { + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; + } + + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + + void push_front(const T& src) + { + insert(0, src); + } + + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + + typedef T* iterator; + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} + +#endif // #if VMA_USE_STL_VECTOR + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaSmallVector + +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ + +template +class VmaSmallVector +{ +public: + typedef T value_type; + + VmaSmallVector(const AllocatorT& allocator) : + m_Count(0), + m_DynamicArray(allocator) + { + } + VmaSmallVector(size_t count, const AllocatorT& allocator) : + m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) + { + } + template + VmaSmallVector(const VmaSmallVector& src) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector& rhs) = delete; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + + T& operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } + const T& operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } + + T& front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + const T& front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + T& back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } + const T& back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } + + void resize(size_t newCount, bool freeMemory = false) + { + if(newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount, freeMemory); + } + else if(newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount, freeMemory); + if(m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if(newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if(newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0, freeMemory); + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; + } + + void clear(bool freeMemory = false) + { + m_DynamicArray.clear(freeMemory); + m_Count = 0; + } + + void insert(size_t index, const T& src) + { + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if(index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; + } + + void remove(size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if(index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); + } + + void push_back(const T& src) + { + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; + } + + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + + void push_front(const T& src) + { + insert(0, src); + } + + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + + typedef T* iterator; + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +//////////////////////////////////////////////////////////////////////////////// +// class VmaPoolAllocator + +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector< ItemBlock, VmaStlAllocator > m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types... args) +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if(block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for(uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaRawList, VmaList + +#if VMA_USE_STL_LIST + +#define VmaList std::list + +#else // #if VMA_USE_STL_LIST + +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + ~VmaRawList(); + void Clear(); + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + const ItemType* Front() const { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushBack(); + ItemType* PushFront(); + ItemType* PushBack(const T& value); + ItemType* PushFront(const T& value); + void PopBack(); + void PopFront(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) +{ +} + +template +VmaRawList::~VmaRawList() +{ + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. +} + +template +void VmaRawList::Clear() +{ + if(IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while(pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if(pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} + +template +class VmaList +{ + VMA_CLASS_NO_COPY(VmaList) +public: + class iterator + { + public: + iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + iterator operator++(int) + { + iterator result = *this; + ++*this; + return result; + } + iterator operator--(int) + { + iterator result = *this; + --*this; + return result; + } + + bool operator==(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + friend class VmaList; + }; + + class const_iterator + { + public: + const_iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + const_iterator(const iterator& src) : + m_pList(src.m_pList), + m_pItem(src.m_pItem) + { + } + + const T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + const T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + const_iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + const_iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + const_iterator operator++(int) + { + const_iterator result = *this; + ++*this; + return result; + } + const_iterator operator--(int) + { + const_iterator result = *this; + --*this; + return result; + } + + bool operator==(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + friend class VmaList; + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { } + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + void clear() { m_RawList.Clear(); } + void push_back(const T& value) { m_RawList.PushBack(value); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + +private: + VmaRawList m_RawList; +}; + +#endif // #if VMA_USE_STL_LIST + +//////////////////////////////////////////////////////////////////////////////// +// class VmaMap + +// Unused in this version. +#if 0 + +#if VMA_USE_STL_UNORDERED_MAP + +#define VmaPair std::pair + +#define VMA_MAP_TYPE(KeyT, ValueT) \ + std::unordered_map< KeyT, ValueT, std::hash, std::equal_to, VmaStlAllocator< std::pair > > + +#else // #if VMA_USE_STL_UNORDERED_MAP + +template +struct VmaPair +{ + T1 first; + T2 second; + + VmaPair() : first(), second() { } + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { } +}; + +/* Class compatible with subset of interface of std::unordered_map. +KeyT, ValueT must be POD because they will be stored in VmaVector. +*/ +template +class VmaMap +{ +public: + typedef VmaPair PairType; + typedef PairType* iterator; + + VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) { } + + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + +private: + VmaVector< PairType, VmaStlAllocator > m_Vector; +}; + +#define VMA_MAP_TYPE(KeyT, ValueT) VmaMap + +template +struct VmaPairFirstLess +{ + bool operator()(const VmaPair& lhs, const VmaPair& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } +}; + +template +void VmaMap::insert(const PairType& pair) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); +} + +template +VmaPair* VmaMap::find(const KeyT& key) +{ + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess()); + if((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } +} + +template +void VmaMap::erase(iterator it) +{ + VmaVectorRemove(m_Vector, it - m_Vector.begin()); +} + +#endif // #if VMA_USE_STL_UNORDERED_MAP + +#endif // #if 0 + +//////////////////////////////////////////////////////////////////////////////// + +class VmaDeviceMemoryBlock; + +enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE }; + +struct VmaAllocation_T +{ +private: + static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80; + + enum FLAGS + { + FLAG_USER_DATA_STRING = 0x01, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + /* + This struct is allocated using VmaPoolAllocator. + */ + + VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) : + m_Alignment{1}, + m_Size{0}, + m_pUserData{VMA_NULL}, + m_LastUseFrameIndex{currentFrameIndex}, + m_MemoryTypeIndex{0}, + m_Type{(uint8_t)ALLOCATION_TYPE_NONE}, + m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN}, + m_MapCount{0}, + m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0} + { +#if VMA_STATS_STRING_ENABLED + m_CreationFrameIndex = currentFrameIndex; + m_BufferImageUsage = 0; +#endif + } + + ~VmaAllocation_T() + { + VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pUserData == VMA_NULL); + } + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VkDeviceSize offset, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped, + bool canBecomeLost) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; + m_BlockAllocation.m_CanBecomeLost = canBecomeLost; + } + + void InitLost() + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_MemoryTypeIndex = 0; + m_BlockAllocation.m_Block = VMA_NULL; + m_BlockAllocation.m_Offset = 0; + m_BlockAllocation.m_CanBecomeLost = true; + } + + void ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset); + + void ChangeOffset(VkDeviceSize newOffset); + + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + } + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; } + void* GetUserData() const { return m_pUserData; } + void SetUserData(VmaAllocator hAllocator, void* pUserData); + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + return m_BlockAllocation.m_Block; + } + VkDeviceSize GetOffset() const; + VkDeviceMemory GetMemory() const; + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } + void* GetMappedData() const; + bool CanBecomeLost() const; + + uint32_t GetLastUseFrameIndex() const + { + return m_LastUseFrameIndex.load(); + } + bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired) + { + return m_LastUseFrameIndex.compare_exchange_weak(expected, desired); + } + /* + - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex, + makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true. + - Else, returns false. + + If hAllocation is already lost, assert - you should not call it then. + If hAllocation was not created with CAN_BECOME_LOST_BIT, assert. + */ + bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); + outInfo.blockCount = 1; + outInfo.allocationCount = 1; + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = m_Size; + outInfo.unusedBytes = 0; + outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + } + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; } + uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + + void InitBufferImageUsage(uint32_t bufferImageUsage) + { + VMA_ASSERT(m_BufferImageUsage == 0); + m_BufferImageUsage = bufferImageUsage; + } + + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + VMA_ATOMIC_UINT32 m_LastUseFrameIndex; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT. + // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS + + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VkDeviceSize m_Offset; + bool m_CanBecomeLost; + }; + + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + }; + + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + +#if VMA_STATS_STRING_ENABLED + uint32_t m_CreationFrameIndex; + uint32_t m_BufferImageUsage; // 0 if unknown. +#endif + + void FreeUserDataString(VmaAllocator hAllocator); +}; + +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +typedef VmaList< VmaSuballocation, VmaStlAllocator > VmaSuballocationList; + +// Cost of one additional allocation lost, as equivalent in bytes. +static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576; + +enum class VmaAllocationRequestType +{ + Normal, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. + +If canMakeOtherLost was false: +- item points to a FREE suballocation. +- itemsToMakeLostCount is 0. + +If canMakeOtherLost was true: +- item points to first of sequence of suballocations, which are either FREE, + or point to VmaAllocations that can become lost. +- itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for + the requested allocation to succeed. +*/ +struct VmaAllocationRequest +{ + VkDeviceSize offset; + VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation. + VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation. + VmaSuballocationList::iterator item; + size_t itemsToMakeLostCount; + void* customData; + VmaAllocationRequestType type; + + VkDeviceSize CalcCost() const + { + return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST; + } +}; + +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + VmaBlockMetadata(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata() { } + virtual void Init(VkDeviceSize size) { m_Size = size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + VkDeviceSize GetSize() const { return m_Size; } + virtual size_t GetAllocationCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; + // Shouldn't modify blockCount. + virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(const VmaAllocation allocation) = 0; + virtual void FreeAtOffset(VkDeviceSize offset) = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; +}; + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) +public: + VmaBlockMetadata_Generic(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Generic(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + + //////////////////////////////////////////////////////////////////////////////// + // For defragmentation + + bool IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + friend class VmaDefragmentationAlgorithm_Fast; + + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free and have size greater than certain threshold. + // Sorted by size, ascending. + VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; + + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const; + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Linear(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const; + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return GetAllocationCount() == 0; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector< VmaSuballocation, VmaStlAllocator > SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +/* +- GetSize() is the original size of allocated memory block. +- m_UsableSize is this size aligned down to a power of two. + All allocations and calculations happen relative to m_UsableSize. +- GetUnusableSize() is the difference between them. + It is repoted as separate, unused range, not available for allocations. + +Node at level 0 has size = m_UsableSize. +Each next level contains nodes with size 2 times smaller than current level. +m_LevelCount is the maximum number of levels to use in the current object. +*/ +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) +public: + VmaBlockMetadata_Buddy(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Buddy(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_AllocationCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); } + virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); } + +private: + static const VkDeviceSize MIN_NODE_SIZE = 32; + static const size_t MAX_LEVELS = 30; + + struct ValidationContext + { + size_t calculatedAllocationCount; + size_t calculatedFreeCount; + VkDeviceSize calculatedSumFreeSize; + + ValidationContext() : + calculatedAllocationCount(0), + calculatedFreeCount(0), + calculatedSumFreeSize(0) { } + }; + + struct Node + { + VkDeviceSize offset; + enum TYPE + { + TYPE_FREE, + TYPE_ALLOCATION, + TYPE_SPLIT, + TYPE_COUNT + } type; + Node* parent; + Node* buddy; + + union + { + struct + { + Node* prev; + Node* next; + } free; + struct + { + VmaAllocation alloc; + } allocation; + struct + { + Node* leftChild; + } split; + }; + }; + + // Size of the memory block aligned down to a power of two. + VkDeviceSize m_UsableSize; + uint32_t m_LevelCount; + + Node* m_Root; + struct { + Node* front; + Node* back; + } m_FreeList[MAX_LEVELS]; + // Number of nodes in the tree with type == TYPE_ALLOCATION. + size_t m_AllocationCount; + // Number of nodes in the tree with type == TYPE_FREE. + size_t m_FreeCount; + // This includes space wasted due to internal fragmentation. Doesn't include unusable size. + VkDeviceSize m_SumFreeSize; + + VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } + void DeleteNode(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; + uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; + inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } + // Alloc passed just for validation. Can be null. + void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset); + void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const; + // Adds node to the front of FreeList at given level. + // node->type must be FREE. + // node->free.prev, next can be undefined. + void AddToFreeListFront(uint32_t level, Node* node); + // Removes node from FreeList at given level. + // node->type must be FREE. + // node->free.prev, next stay untouched. + void RemoveFromFreeList(uint32_t level, Node* node); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; +#endif +}; + +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: This class must be externally synchronized. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + + ~VmaDeviceMemoryBlock() + { + VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + } + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_Mutex; + uint32_t m_MapCount; + void* m_pMappedData; +}; + +struct VmaPointerLess +{ + bool operator()(const void* lhs, const void* rhs) const + { + return lhs < rhs; + } +}; + +struct VmaDefragmentationMove +{ + size_t srcBlockIndex; + size_t dstBlockIndex; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaDeviceMemoryBlock* pSrcBlock; + VmaDeviceMemoryBlock* pDstBlock; +}; + +class VmaDefragmentationAlgorithm; + +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +struct VmaBlockVector +{ + VMA_CLASS_NO_COPY(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm); + ~VmaBlockVector(); + + VkResult CreateMinBlocks(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + + void GetPoolStats(VmaPoolStats* pStats); + + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + + // Adds statistics of this BlockVector to pStats. + void AddStats(VmaStats* pStats); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount); + VkResult CheckCorruption(); + + // Saves results in pCtx->res. + void Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer); + void DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + uint32_t flags, + VmaDefragmentationStats* pStats); + + uint32_t ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves); + + void CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats); + + //////////////////////////////////////////////////////////////////////////////// + // To be used only while the m_Mutex is locked. Used during defragmentation. + + size_t GetBlockCount() const { return m_Blocks.size(); } + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + size_t CalcAllocationCount() const; + bool IsBufferImageGranularityConflictPossible() const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const uint32_t m_FrameInUseCount; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + VMA_RW_MUTEX m_Mutex; + + /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) - + a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */ + bool m_HasEmptyBlock; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator > m_Blocks; + uint32_t m_NextBlockId; + + VkDeviceSize CalcMaxBlockSize() const; + + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + + VkResult AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + // To be used only without CAN_MAKE_OTHER_LOST flag. + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + + // Saves result to pCtx->res. + void ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves); + // Saves result to pCtx->res. + void ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer); + + /* + Used during defragmentation. pDefragmentationStats is optional. It's in/out + - updated with new data. + */ + void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats); + + void UpdateHasEmptyBlock(); +}; + +struct VmaPool_T +{ + VMA_CLASS_NO_COPY(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; +}; + +/* +Performs defragmentation: + +- Updates `pBlockVector->m_pMetadata`. +- Updates allocations by calling ChangeBlockAllocation() or ChangeOffset(). +- Does not move actual data, only returns requested moves as `moves`. +*/ +class VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm) +public: + VmaDefragmentationAlgorithm( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex) : + m_hAllocator(hAllocator), + m_pBlockVector(pBlockVector), + m_CurrentFrameIndex(currentFrameIndex) + { + } + virtual ~VmaDefragmentationAlgorithm() + { + } + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0; + virtual void AddAll() = 0; + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) = 0; + + virtual VkDeviceSize GetBytesMoved() const = 0; + virtual uint32_t GetAllocationsMoved() const = 0; + +protected: + VmaAllocator const m_hAllocator; + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrentFrameIndex; + + struct AllocationInfo + { + VmaAllocation m_hAllocation; + VkBool32* m_pChanged; + + AllocationInfo() : + m_hAllocation(VK_NULL_HANDLE), + m_pChanged(VMA_NULL) + { + } + AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) : + m_hAllocation(hAlloc), + m_pChanged(pChanged) + { + } + }; +}; + +class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic) +public: + VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Generic(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + struct AllocationInfoSizeGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize(); + } + }; + + struct AllocationInfoOffsetGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset(); + } + }; + + struct BlockInfo + { + size_t m_OriginalBlockIndex; + VmaDeviceMemoryBlock* m_pBlock; + bool m_HasNonMovableAllocations; + VmaVector< AllocationInfo, VmaStlAllocator > m_Allocations; + + BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) : + m_OriginalBlockIndex(SIZE_MAX), + m_pBlock(VMA_NULL), + m_HasNonMovableAllocations(true), + m_Allocations(pAllocationCallbacks) + { + } + + void CalcHasNonMovableAllocations() + { + const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount(); + const size_t defragmentAllocCount = m_Allocations.size(); + m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; + } + + void SortAllocationsBySizeDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater()); + } + + void SortAllocationsByOffsetDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater()); + } + }; + + struct BlockPointerLess + { + bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlock; + } + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock; + } + }; + + // 1. Blocks with some non-movable allocations go first. + // 2. Blocks with smaller sumFreeSize go first. + struct BlockInfoCompareMoveDestination + { + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) + { + return true; + } + if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) + { + return false; + } + if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) + { + return true; + } + return false; + } + }; + + typedef VmaVector< BlockInfo*, VmaStlAllocator > BlockInfoVector; + BlockInfoVector m_Blocks; + + VkResult DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations); + + size_t CalcBlocksWithNonMovableCount() const; + + static bool MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset); +}; + +class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast) +public: + VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Fast(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + struct BlockInfo + { + size_t origBlockIndex; + }; + + class FreeSpaceDatabase + { + public: + FreeSpaceDatabase() + { + FreeSpace s = {}; + s.blockInfoIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + m_FreeSpaces[i] = s; + } + } + + void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) + { + if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + return; + } + + // Find first invalid or the smallest structure. + size_t bestIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Empty structure. + if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) + { + bestIndex = i; + break; + } + if(m_FreeSpaces[i].size < size && + (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) + { + bestIndex = i; + } + } + + if(bestIndex != SIZE_MAX) + { + m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex; + m_FreeSpaces[bestIndex].offset = offset; + m_FreeSpaces[bestIndex].size = size; + } + } + + bool Fetch(VkDeviceSize alignment, VkDeviceSize size, + size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset) + { + size_t bestIndex = SIZE_MAX; + VkDeviceSize bestFreeSpaceAfter = 0; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Structure is valid. + if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) + { + const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment); + // Allocation fits into this structure. + if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) + { + const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - + (dstOffset + size); + if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) + { + bestIndex = i; + bestFreeSpaceAfter = freeSpaceAfter; + } + } + } + } + + if(bestIndex != SIZE_MAX) + { + outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex; + outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment); + + if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + // Leave this structure for remaining empty space. + const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size; + m_FreeSpaces[bestIndex].offset += alignmentPlusSize; + m_FreeSpaces[bestIndex].size -= alignmentPlusSize; + } + else + { + // This structure becomes invalid. + m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX; + } + + return true; + } + + return false; + } + + private: + static const size_t MAX_COUNT = 4; + + struct FreeSpace + { + size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. + VkDeviceSize offset; + VkDeviceSize size; + } m_FreeSpaces[MAX_COUNT]; + }; + + const bool m_OverlappingMoveSupported; + + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + VmaVector< BlockInfo, VmaStlAllocator > m_BlockInfos; + + void PreprocessMetadata(); + void PostprocessMetadata(); + void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc); +}; + +struct VmaBlockDefragmentationContext +{ + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + }; + uint32_t flags; + VkBuffer hBuffer; +}; + +class VmaBlockVectorDefragmentationContext +{ + VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) +public: + VkResult res; + bool mutexLocked; + VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator > blockContexts; + VmaVector< VmaDefragmentationMove, VmaStlAllocator > defragmentationMoves; + uint32_t defragmentationMovesProcessed; + uint32_t defragmentationMovesCommitted; + bool hasDefragmentationPlan; + + VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, // Optional. + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex); + ~VmaBlockVectorDefragmentationContext(); + + VmaPool GetCustomPool() const { return m_hCustomPool; } + VmaBlockVector* GetBlockVector() const { return m_pBlockVector; } + VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; } + + void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + void AddAll() { m_AllAllocations = true; } + + void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags); + +private: + const VmaAllocator m_hAllocator; + // Null if not from custom pool. + const VmaPool m_hCustomPool; + // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrFrameIndex; + // Owner of this object. + VmaDefragmentationAlgorithm* m_pAlgorithm; + + struct AllocInfo + { + VmaAllocation hAlloc; + VkBool32* pChanged; + }; + // Used between constructor and Begin. + VmaVector< AllocInfo, VmaStlAllocator > m_Allocations; + bool m_AllAllocations; +}; + +struct VmaDefragmentationContext_T +{ +private: + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats); + ~VmaDefragmentationContext_T(); + + void AddPools(uint32_t poolCount, const VmaPool* pPools); + void AddAllocations( + uint32_t allocationCount, + const VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged); + + /* + Returns: + - `VK_SUCCESS` if succeeded and object can be destroyed immediately. + - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). + - Negative value if error occured and object can be destroyed immediately. + */ + VkResult Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags); + + VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo); + VkResult DefragmentPassEnd(); + +private: + const VmaAllocator m_hAllocator; + const uint32_t m_CurrFrameIndex; + const uint32_t m_Flags; + VmaDefragmentationStats* const m_pStats; + + VkDeviceSize m_MaxCpuBytesToMove; + uint32_t m_MaxCpuAllocationsToMove; + VkDeviceSize m_MaxGpuBytesToMove; + uint32_t m_MaxGpuAllocationsToMove; + + // Owner of these objects. + VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; + // Owner of these objects. + VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator > m_CustomPoolContexts; +}; + +#if VMA_RECORDING_ENABLED + +class VmaRecorder +{ +public: + VmaRecorder(); + VkResult Init(const VmaRecordSettings& settings, bool useMutex); + void WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled); + ~VmaRecorder(); + + void RecordCreateAllocator(uint32_t frameIndex); + void RecordDestroyAllocator(uint32_t frameIndex); + void RecordCreatePool(uint32_t frameIndex, + const VmaPoolCreateInfo& createInfo, + VmaPool pool); + void RecordDestroyPool(uint32_t frameIndex, VmaPool pool); + void RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData); + void RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation); + void RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation); + void RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool); + void RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx); + void RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx); + void RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name); + +private: + struct CallParams + { + uint32_t threadId; + double time; + }; + + class UserDataString + { + public: + UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData); + const char* GetString() const { return m_Str; } + + private: + char m_PtrStr[17]; + const char* m_Str; + }; + + bool m_UseMutex; + VmaRecordFlags m_Flags; + FILE* m_File; + VMA_MUTEX m_FileMutex; + std::chrono::time_point m_RecordingStartTime; + + void GetBasicParams(CallParams& outParams); + + // T must be a pointer type, e.g. VmaAllocation, VmaPool. + template + void PrintPointerList(uint64_t count, const T* pItems) + { + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } + } + + void PrintPointerList(uint64_t count, const VmaAllocation* pItems); + void Flush(); +}; + +#endif // #if VMA_RECORDING_ENABLED + +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks); + + template VmaAllocation Allocate(Types... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // #if VMA_MEMORY_BUDGET + + VmaCurrentBudgetData() + { + for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif + } + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + m_AllocationBytes[heapIndex] += allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } + + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME + m_AllocationBytes[heapIndex] -= allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } +}; + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY(VmaAllocator_T) +public: + bool m_UseMutex; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + VkDevice m_hDevice; + VkInstance m_hInstance; + bool m_AllocationCallbacksSpecified; + VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + + // Each vector is sorted by memory (handle value). + typedef VmaVector< VmaAllocation, VmaStlAllocator > AllocationVectorType; + AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES]; + VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_DEBUG_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + +#if VMA_RECORDING_ENABLED + VmaRecorder* GetRecorder() const { return m_pRecorder; } +#endif + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown. + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + VkResult ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize); + + void CalculateStats(VmaStats* pStats); + + void GetBudget( + VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext); + VkResult DefragmentationEnd( + VmaDefragmentationContext context); + + VkResult DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context); + VkResult DefragmentationPassEnd( + VmaDefragmentationContext context); + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + bool TouchAllocation(VmaAllocation hAllocation); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + void MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount); + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + void CreateLostAllocation(VmaAllocation* pAllocation); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. + + VMA_RW_MUTEX m_PoolsMutex; + // Protected by m_PoolsMutex. Sorted by pointer value. + VmaVector > m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + +#if VMA_RECORDING_ENABLED + VmaRecorder* m_pRecorder; +#endif + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation #2 after VmaAllocator_T definition + +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaStringBuilder + +#if VMA_STATS_STRING_ENABLED + +class VmaStringBuilder +{ +public: + VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator(alloc->GetAllocationCallbacks())) { } + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + + void Add(char ch) { m_Data.push_back(ch); } + void Add(const char* pStr); + void AddNewLine() { Add('\n'); } + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector< char, VmaStlAllocator > m_Data; +}; + +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if(strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char *p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char *p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaJsonWriter + +#if VMA_STATS_STRING_ENABLED + +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY(VmaJsonWriter) +public: + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + void BeginObject(bool singleLine = false); + void EndObject(); + + void BeginArray(bool singleLine = false); + void EndArray(); + + void WriteString(const char* pStr); + void BeginString(const char* pStr = VMA_NULL); + void ContinueString(const char* pStr); + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + void ContinueString_Pointer(const void* ptr); + void EndString(const char* pStr = VMA_NULL); + + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + void WriteBool(bool b); + void WriteNull(); + +private: + static const char* const INDENT; + + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; + +const char* const VmaJsonWriter::INDENT = " "; + +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) : + m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) +{ +} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for(size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if(ch == '\\') + { + m_SB.Add("\\\\"); + } + else if(ch == '"') + { + m_SB.Add("\\\""); + } + else if(ch >= 32) + { + m_SB.Add(ch); + } + else switch(ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if(!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if(currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if(!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if(count > 0 && oneLess) + { + --count; + } + for(size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) +{ + if(IsUserDataString()) + { + VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData); + + FreeUserDataString(hAllocator); + + if(pUserData != VMA_NULL) + { + m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData); + } + } + else + { + m_pUserData = pUserData; + } +} + +void VmaAllocation_T::ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset) +{ + VMA_ASSERT(block != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + + // Move mapping reference counter from old block to new block. + if(block != m_BlockAllocation.m_Block) + { + uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP; + if(IsPersistentMap()) + ++mapRefCount; + m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount); + block->Map(hAllocator, mapRefCount, VMA_NULL); + } + + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; +} + +void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + m_BlockAllocation.m_Offset = newOffset; +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Offset; + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if(m_MapCount != 0) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + m_BlockAllocation.m_Offset; + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +bool VmaAllocation_T::CanBecomeLost() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_CanBecomeLost; + case ALLOCATION_TYPE_DEDICATED: + return false; + default: + VMA_ASSERT(0); + return false; + } +} + +bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + VMA_ASSERT(CanBecomeLost()); + + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + uint32_t localLastUseFrameIndex = GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + VMA_ASSERT(0); + return false; + } + else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) + { + return false; + } + else // Last use time earlier than current time. + { + if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) + { + // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST. + // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock. + return true; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED + +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = { + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; + +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + + if(m_pUserData != VMA_NULL) + { + json.WriteString("UserData"); + if(IsUserDataString()) + { + json.WriteString((const char*)m_pUserData); + } + else + { + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + } + + json.WriteString("CreationFrameIndex"); + json.WriteNumber(m_CreationFrameIndex); + + json.WriteString("LastUseFrameIndex"); + json.WriteNumber(GetLastUseFrameIndex()); + + if(m_BufferImageUsage != 0) + { + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); + } +} + +#endif + +void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) +{ + VMA_ASSERT(IsUserDataString()); + VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData); + m_pUserData = VMA_NULL; +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if(m_MapCount != 0) + { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if(result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + if(m_MapCount == 0) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED + +static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) +{ + json.BeginObject(); + + json.WriteString("Blocks"); + json.WriteNumber(stat.blockCount); + + json.WriteString("Allocations"); + json.WriteNumber(stat.allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber(stat.unusedRangeCount); + + json.WriteString("UsedBytes"); + json.WriteNumber(stat.usedBytes); + + json.WriteString("UnusedBytes"); + json.WriteNumber(stat.unusedBytes); + + if(stat.allocationCount > 1) + { + json.WriteString("AllocationSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.allocationSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.allocationSizeMax); + json.EndObject(); + } + + if(stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.unusedRangeSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.unusedRangeSizeMax); + json.EndObject(); + } + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +struct VmaSuballocationItemSizeLess +{ + bool operator()( + const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + bool operator()( + const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata + +VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) : + m_Size(0), + m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) +{ +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const +{ + json.BeginObject(); + + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + hAllocation->PrintParameters(json); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Generic + +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_FreeSuballocationsBySize(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ +} + +VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() +{ +} + +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_FreeCount = 1; + m_SumFreeSize = size; + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + m_Suballocations.push_back(suballoc); + VmaSuballocationList::iterator suballocItem = m_Suballocations.end(); + --suballocItem; + m_FreeSuballocationsBySize.push_back(suballocItem); +} + +bool VmaBlockMetadata_Generic::Validate() const +{ + VMA_VALIDATE(!m_Suballocations.empty()); + + // Expected offset of new suballocation as calculated from previous ones. + VkDeviceSize calculatedOffset = 0; + // Expected number of free suballocations as calculated from traversing their list. + uint32_t calculatedFreeCount = 0; + // Expected sum size of free suballocations as calculated from traversing their list. + VkDeviceSize calculatedSumFreeSize = 0; + // Expected number of free suballocations that should be registered in + // m_FreeSuballocationsBySize calculated from traversing their list. + size_t freeSuballocationsToRegister = 0; + // True if previous visited suballocation was free. + bool prevFree = false; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& subAlloc = *suballocItem; + + // Actual offset of this suballocation doesn't match expected one. + VMA_VALIDATE(subAlloc.offset == calculatedOffset); + + const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Two adjacent free suballocations are invalid. They should be merged. + VMA_VALIDATE(!prevFree || !currFree); + + VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE)); + + if(currFree) + { + calculatedSumFreeSize += subAlloc.size; + ++calculatedFreeCount; + if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + ++freeSuballocationsToRegister; + } + + // Margin required between allocations - every free space must be at least that large. + VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN); + } + else + { + VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset); + VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size); + + // Margin required between allocations - previous allocation must be free. + VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree); + } + + calculatedOffset += subAlloc.size; + prevFree = currFree; + } + + // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't + // match expected one. + VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); + + VkDeviceSize lastSize = 0; + for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { + VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; + + // Only free suballocations can be registered in m_FreeSuballocationsBySize. + VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); + // They must be sorted by size ascending. + VMA_VALIDATE(suballocItem->size >= lastSize); + + lastSize = suballocItem->size; + } + + // Check if totals match calculacted values. + VMA_VALIDATE(ValidateFreeSuballocationList()); + VMA_VALIDATE(calculatedOffset == GetSize()); + VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); + VMA_VALIDATE(calculatedFreeCount == m_FreeCount); + + return true; +} + +VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const +{ + if(!m_FreeSuballocationsBySize.empty()) + { + return m_FreeSuballocationsBySize.back()->size; + } + else + { + return 0; + } +} + +bool VmaBlockMetadata_Generic::IsEmpty() const +{ + return (m_Suballocations.size() == 1) && (m_FreeCount == 1); +} + +void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + outInfo.blockCount = 1; + + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + outInfo.allocationCount = rangeCount - m_FreeCount; + outInfo.unusedRangeCount = m_FreeCount; + + outInfo.unusedBytes = m_SumFreeSize; + outInfo.usedBytes = GetSize() - outInfo.unusedBytes; + + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& suballoc = *suballocItem; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); + } + else + { + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); + } + } +} + +void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize; + inoutStats.allocationCount += rangeCount - m_FreeCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const +{ + PrintDetailedMap_Begin(json, + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount); // unusedRangeCount + + size_t i = 0; + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem, ++i) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size); + } + else + { + PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation); + } + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Generic::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(!upperAddress); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + pAllocationRequest->type = VmaAllocationRequestType::Normal; + + // There is not enough total free space in this block to fullfill the request: Early return. + if(canMakeOtherLost == false && + m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) + { + return false; + } + + // New algorithm, efficiently searching freeSuballocationsBySize. + const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); + if(freeSuballocCount > 0) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN. + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + 2 * VMA_DEBUG_MARGIN, + VmaSuballocationItemSizeLess()); + size_t index = it - m_FreeSuballocationsBySize.data(); + for(; index < freeSuballocCount; ++index) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + it, + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = it; + return true; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Search staring from biggest suballocations. + for(size_t index = freeSuballocCount; index--; ) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + } + + if(canMakeOtherLost) + { + // Brute-force algorithm. TODO: Come up with something better. + + bool found = false; + VmaAllocationRequest tmpAllocRequest = {}; + tmpAllocRequest.type = VmaAllocationRequestType::Normal; + for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin(); + suballocIt != m_Suballocations.end(); + ++suballocIt) + { + if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE || + suballocIt->hAllocation->CanBecomeLost()) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + suballocIt, + canMakeOtherLost, + &tmpAllocRequest.offset, + &tmpAllocRequest.itemsToMakeLostCount, + &tmpAllocRequest.sumFreeSize, + &tmpAllocRequest.sumItemSize)) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + break; + } + if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + found = true; + } + } + } + } + + return found; + } + + return false; +} + +bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal); + + while(pAllocationRequest->itemsToMakeLostCount > 0) + { + if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++pAllocationRequest->item; + } + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost()); + if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item); + --pAllocationRequest->itemsToMakeLostCount; + } + else + { + return false; + } + } + + VMA_HEAVY_ASSERT(Validate()); + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE); + + return true; +} + +uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE && + it->hAllocation->CanBecomeLost() && + it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + it = FreeSuballocation(it); + ++lostAllocationCount; + } + } + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, it->offset + it->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Generic::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Given offset is inside this suballocation. + VMA_ASSERT(request.offset >= suballoc.offset); + const VkDeviceSize paddingBegin = request.offset - suballoc.offset; + VMA_ASSERT(suballoc.size >= paddingBegin + allocSize); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = request.offset; + suballoc.size = allocSize; + suballoc.type = type; + suballoc.hAllocation = hAllocation; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if(paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset + allocSize; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if(paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if(paddingBegin > 0) + { + ++m_FreeCount; + } + if(paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == allocation) + { + FreeSuballocation(suballocItem); + VMA_HEAVY_ASSERT(Validate()); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.offset == offset) + { + FreeSuballocation(suballocItem); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + VMA_VALIDATE(it->size >= lastSize); + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata_Generic::CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(suballocItem != m_Suballocations.cend()); + VMA_ASSERT(pOffset != VMA_NULL); + + *itemsToMakeLostCount = 0; + *pSumFreeSize = 0; + *pSumItemSize = 0; + + if(canMakeOtherLost) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize = suballocItem->size; + } + else + { + if(suballocItem->hAllocation->CanBecomeLost() && + suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize = suballocItem->size; + } + else + { + return false; + } + } + + // Remaining size is too small for this request: Early return. + if(GetSize() - suballocItem->offset < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballocItem->offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Now that we have final *pOffset, check if we are past suballocItem. + // If yes, return false - this function should be called for another suballocItem as starting point. + if(*pOffset >= suballocItem->offset + suballocItem->size) + { + return false; + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin; + // Another early return check. + if(suballocItem->offset + totalSize > GetSize()) + { + return false; + } + + // Advance lastSuballocItem until desired size is reached. + // Update itemsToMakeLostCount. + VmaSuballocationList::const_iterator lastSuballocItem = suballocItem; + if(totalSize > suballocItem->size) + { + VkDeviceSize remainingSize = totalSize - suballocItem->size; + while(remainingSize > 0) + { + ++lastSuballocItem; + if(lastSuballocItem == m_Suballocations.cend()) + { + return false; + } + if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize += lastSuballocItem->size; + } + else + { + VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE); + if(lastSuballocItem->hAllocation->CanBecomeLost() && + lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize += lastSuballocItem->size; + } + else + { + return false; + } + } + remainingSize = (lastSuballocItem->size < remainingSize) ? + remainingSize - lastSuballocItem->size : 0; + } + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1) + { + VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE); + if(nextSuballoc.hAllocation->CanBecomeLost() && + nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + else + { + const VmaSuballocation& suballoc = *suballocItem; + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + *pSumFreeSize = suballoc.size; + + // Size of this suballocation is too small for this request: Early return. + if(suballoc.size < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballoc.offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballoc.offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + // Fail if requested size plus margin before and after is bigger than size of this suballocation. + if(paddingBegin + allocSize + requiredEndMargin > suballoc.size) + { + return false; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + + // All tests passed: Success. pOffset is already filled. + return true; +} + +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ + // Change this suballocation to be marked as free. + VmaSuballocation& suballoc = *suballocItem; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + // Update totals. + ++m_FreeCount; + m_SumFreeSize += suballoc.size; + + // Merge with previous and/or next suballocation if it's also free. + bool mergeWithNext = false; + bool mergeWithPrev = false; + + VmaSuballocationList::iterator nextItem = suballocItem; + ++nextItem; + if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { + mergeWithNext = true; + } + + VmaSuballocationList::iterator prevItem = suballocItem; + if(suballocItem != m_Suballocations.begin()) + { + --prevItem; + if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + mergeWithPrev = true; + } + } + + if(mergeWithNext) + { + UnregisterFreeSuballocation(nextItem); + MergeFreeWithNext(suballocItem); + } + + if(mergeWithPrev) + { + UnregisterFreeSuballocation(prevItem); + MergeFreeWithNext(prevItem); + RegisterFreeSuballocation(prevItem); + return prevItem; + } + else + { + RegisterFreeSuballocation(suballocItem); + return suballocItem; + } +} + +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + if(m_FreeSuballocationsBySize.empty()) + { + m_FreeSuballocationsBySize.push_back(item); + } + else + { + VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); + } + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for(size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if(m_FreeSuballocationsBySize[index] == item) + { + VmaVectorRemove(m_FreeSuballocationsBySize, index); + return; + } + VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); + } + VMA_ASSERT(0 && "Not found."); + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + +bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const +{ + if(bufferImageGranularity == 1 || IsEmpty()) + { + return false; + } + + VkDeviceSize minAlignment = VK_WHOLE_SIZE; + bool typeConflictFound = false; + for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin(); + it != m_Suballocations.cend(); + ++it) + { + const VmaSuballocationType suballocType = it->type; + if(suballocType != VMA_SUBALLOCATION_TYPE_FREE) + { + minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment()); + if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) + { + typeConflictFound = true; + } + inOutPrevSuballocType = suballocType; + } + } + + return typeConflictFound || minAlignment >= bufferImageGranularity; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Linear + +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_Suballocations1(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) +{ +} + +VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() +{ +} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if(!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE); + } + if(!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + VkDeviceSize offset = VMA_DEBUG_MARGIN; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation == VK_NULL_HANDLE); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const +{ + const VkDeviceSize size = GetSize(); + + /* + We don't consider gaps inside allocation vectors with freed allocations because + they are not suitable for reuse in linear allocator. We consider only space that + is available for new allocations. + */ + if(IsEmpty()) + { + return size; + } + + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + /* + Available space is after end of 1st, as well as before beginning of 1st (which + whould make it a ring buffer). + */ + { + const size_t suballocations1stCount = suballocations1st.size(); + VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount); + const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1]; + return VMA_MAX( + firstSuballoc.offset, + size - (lastSuballoc.offset + lastSuballoc.size)); + } + break; + + case SECOND_VECTOR_RING_BUFFER: + /* + Available space is only between end of 2nd and beginning of 1st. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount]; + return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size); + } + break; + + case SECOND_VECTOR_DOUBLE_STACK: + /* + Available space is only between end of 1st and top of 2nd. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& topSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& lastSuballoc1st = suballocations1st.back(); + return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size); + } + break; + + default: + VMA_ASSERT(0); + return 0; + } +} + +void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + outInfo.blockCount = 1; + outInfo.allocationCount = (uint32_t)GetAllocationCount(); + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = 0; + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if(lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if(lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if(lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + outInfo.unusedBytes = size - outInfo.usedBytes; +} + +void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.size += size; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + return upperAddress ? + CreateAllocationRequest_UpperAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if(allocSize > size) + { + return false; + } + VkDeviceSize resultBaseOffset = size - allocSize; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if(allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the end. + if(VMA_DEBUG_MARGIN > 0) + { + if(resultOffset < VMA_DEBUG_MARGIN) + { + return false; + } + resultOffset -= VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item unused. + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : size; + + // There is enough free space at the end after alignment. + if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + pAllocationRequest->itemsToMakeLostCount = 0; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->sumItemSize = 0; + size_t index1st = m_1stNullItemsBeginCount; + + if(canMakeOtherLost) + { + while(index1st < suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) + { + // Next colliding allocation at the beginning of 1st vector found. Try to make it lost. + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { + // No problem. + } + else + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + ++index1st; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1) + { + while(index1st < suballocations1st.size()) + { + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) + { + if(suballoc.hAllocation != VK_NULL_HANDLE) + { + // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type). + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++index1st; + } + } + + // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost. + if(index1st == suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > size) + { + // TODO: This is a known bug that it's not yet implemented and the allocation is failing. + VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost."); + } + } + + // There is enough free space at the end after alignment. + if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) || + (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = + (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) + - resultBaseOffset + - pAllocationRequest->sumItemSize; + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + if(pAllocationRequest->itemsToMakeLostCount == 0) + { + return true; + } + + VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER); + + // We always start from 1st. + SuballocationVectorType* suballocations = &AccessSuballocations1st(); + size_t index = m_1stNullItemsBeginCount; + size_t madeLostCount = 0; + while(madeLostCount < pAllocationRequest->itemsToMakeLostCount) + { + if(index == suballocations->size()) + { + index = 0; + // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st. + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + suballocations = &AccessSuballocations2nd(); + } + // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY: + // suballocations continues pointing at AccessSuballocations1st(). + VMA_ASSERT(!suballocations->empty()); + } + VmaSuballocation& suballoc = (*suballocations)[index]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(suballoc.hAllocation->CanBecomeLost()); + if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += suballoc.size; + if(suballocations == &AccessSuballocations1st()) + { + ++m_1stNullItemsMiddleCount; + } + else + { + ++m_2ndNullItemsCount; + } + ++madeLostCount; + } + else + { + return false; + } + } + ++index; + } + + CleanupAfterFree(); + //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree(). + + return true; +} + +uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + if(lostAllocationCount) + { + CleanupAfterFree(); + } + + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type }; + + switch(request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + request.offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(request.offset + allocSize <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) +{ + FreeAtOffset(allocation->GetOffset()); +} + +void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if(firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + // Item from the middle of 1st vector. + { + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if(it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if(m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if(it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while(m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while(m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().hAllocation == VK_NULL_HANDLE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd.back().hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd[0].hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if(ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) + { + ++srcIndex; + } + if(dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if(suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if(suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while(m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Buddy + +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ + memset(m_FreeList, 0, sizeof(m_FreeList)); +} + +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ + DeleteNode(m_Root); +} + +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_UsableSize = VmaPrevPow2(size); + m_SumFreeSize = m_UsableSize; + + // Calculate m_LevelCount. + m_LevelCount = 1; + while(m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) + { + ++m_LevelCount; + } + + Node* rootNode = vma_new(GetAllocationCallbacks(), Node)(); + rootNode->offset = 0; + rootNode->type = Node::TYPE_FREE; + rootNode->parent = VMA_NULL; + rootNode->buddy = VMA_NULL; + + m_Root = rootNode; + AddToFreeListFront(0, rootNode); +} + +bool VmaBlockMetadata_Buddy::Validate() const +{ + // Validate tree. + ValidationContext ctx; + if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { + VMA_VALIDATE(false && "ValidateNode failed."); + } + VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); + VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); + + // Validate free node lists. + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || + m_FreeList[level].front->free.prev == VMA_NULL); + + for(Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { + VMA_VALIDATE(node->type == Node::TYPE_FREE); + + if(node->free.next == VMA_NULL) + { + VMA_VALIDATE(m_FreeList[level].back == node); + } + else + { + VMA_VALIDATE(node->free.next->free.prev == node); + } + } + } + + // Validate that free lists ar higher levels are empty. + for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); + } + + return true; +} + +VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const +{ + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + if(m_FreeList[level].front != VMA_NULL) + { + return LevelToNodeSize(level); + } + } + return 0; +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + outInfo.blockCount = 1; + + outInfo.allocationCount = outInfo.unusedRangeCount = 0; + outInfo.usedBytes = outInfo.unusedBytes = 0; + + outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0; + outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused. + + CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0)); + + if(unusableSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusableSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize); + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize); + } +} + +void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize + unusableSize; + inoutStats.allocationCount += m_AllocationCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); + + if(unusableSize > 0) + { + ++inoutStats.unusedRangeCount; + // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations. + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const +{ + // TODO optimize + VmaStatInfo stat; + CalcAllocationStatInfo(stat); + + PrintDetailedMap_Begin( + json, + stat.unusedBytes, + stat.allocationCount, + stat.unusedRangeCount); + + PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if(unusableSize > 0) + { + PrintDetailedMap_UnusedRange(json, + m_UsableSize, // offset + unusableSize); // size + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Buddy::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // Simple way to respect bufferImageGranularity. May be optimized some day. + // Whenever it might be an OPTIMAL image... + if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity); + allocSize = VMA_MAX(allocSize, bufferImageGranularity); + } + + if(allocSize > m_UsableSize) + { + return false; + } + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + for(uint32_t level = targetLevel + 1; level--; ) + { + for(Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if(freeNode->offset % allocAlignment == 0) + { + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->offset = freeNode->offset; + pAllocationRequest->sumFreeSize = LevelToNodeSize(level); + pAllocationRequest->sumItemSize = 0; + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->customData = (void*)(uintptr_t)level; + return true; + } + } + } + + return false; +} + +bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return pAllocationRequest->itemsToMakeLostCount == 0; +} + +uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return 0; +} + +void VmaBlockMetadata_Buddy::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; + + Node* currNode = m_FreeList[currLevel].front; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + while(currNode->offset != request.offset) + { + currNode = currNode->free.next; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + } + + // Go down, splitting free nodes. + while(currLevel < targetLevel) + { + // currNode is already first free node at currLevel. + // Remove it from list of free nodes at this currLevel. + RemoveFromFreeList(currLevel, currNode); + + const uint32_t childrenLevel = currLevel + 1; + + // Create two free sub-nodes. + Node* leftChild = vma_new(GetAllocationCallbacks(), Node)(); + Node* rightChild = vma_new(GetAllocationCallbacks(), Node)(); + + leftChild->offset = currNode->offset; + leftChild->type = Node::TYPE_FREE; + leftChild->parent = currNode; + leftChild->buddy = rightChild; + + rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); + rightChild->type = Node::TYPE_FREE; + rightChild->parent = currNode; + rightChild->buddy = leftChild; + + // Convert current currNode to split type. + currNode->type = Node::TYPE_SPLIT; + currNode->split.leftChild = leftChild; + + // Add child nodes to free list. Order is important! + AddToFreeListFront(childrenLevel, rightChild); + AddToFreeListFront(childrenLevel, leftChild); + + ++m_FreeCount; + //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2. + ++currLevel; + currNode = m_FreeList[currLevel].front; + + /* + We can be sure that currNode, as left child of node previously split, + also fullfills the alignment requirement. + */ + } + + // Remove from free list. + VMA_ASSERT(currLevel == targetLevel && + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); + RemoveFromFreeList(currLevel, currNode); + + // Convert to allocation node. + currNode->type = Node::TYPE_ALLOCATION; + currNode->allocation.alloc = hAllocation; + + ++m_AllocationCount; + --m_FreeCount; + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Buddy::DeleteNode(Node* node) +{ + if(node->type == Node::TYPE_SPLIT) + { + DeleteNode(node->split.leftChild->buddy); + DeleteNode(node->split.leftChild); + } + + vma_delete(GetAllocationCallbacks(), node); +} + +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ + VMA_VALIDATE(level < m_LevelCount); + VMA_VALIDATE(curr->parent == parent); + VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); + VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); + switch(curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize(); + VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE); + break; + case Node::TYPE_SPLIT: + { + const uint32_t childrenLevel = level + 1; + const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2; + const Node* const leftChild = curr->split.leftChild; + VMA_VALIDATE(leftChild != VMA_NULL); + VMA_VALIDATE(leftChild->offset == curr->offset); + if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for left child failed."); + } + const Node* const rightChild = leftChild->buddy; + VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); + if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for right child failed."); + } + } + break; + default: + return false; + } + + return true; +} + +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ + // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. + uint32_t level = 0; + VkDeviceSize currLevelNodeSize = m_UsableSize; + VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; + while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { + ++level; + currLevelNodeSize = nextLevelNodeSize; + nextLevelNodeSize = currLevelNodeSize >> 1; + } + return level; +} + +void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) +{ + // Find node and level. + Node* node = m_Root; + VkDeviceSize nodeOffset = 0; + uint32_t level = 0; + VkDeviceSize levelNodeSize = LevelToNodeSize(0); + while(node->type == Node::TYPE_SPLIT) + { + const VkDeviceSize nextLevelSize = levelNodeSize >> 1; + if(offset < nodeOffset + nextLevelSize) + { + node = node->split.leftChild; + } + else + { + node = node->split.leftChild->buddy; + nodeOffset += nextLevelSize; + } + ++level; + levelNodeSize = nextLevelSize; + } + + VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); + VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc); + + ++m_FreeCount; + --m_AllocationCount; + m_SumFreeSize += alloc->GetSize(); + + node->type = Node::TYPE_FREE; + + // Join free nodes if possible. + while(level > 0 && node->buddy->type == Node::TYPE_FREE) + { + RemoveFromFreeList(level, node->buddy); + Node* const parent = node->parent; + + vma_delete(GetAllocationCallbacks(), node->buddy); + vma_delete(GetAllocationCallbacks(), node); + parent->type = Node::TYPE_FREE; + + node = parent; + --level; + //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2. + --m_FreeCount; + } + + AddToFreeListFront(level, node); +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += levelNodeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + ++outInfo.allocationCount; + outInfo.usedBytes += allocSize; + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize); + outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize); + + const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize; + if(unusedRangeSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} + +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ + VMA_ASSERT(node->type == Node::TYPE_FREE); + + // List is empty. + Node* const frontNode = m_FreeList[level].front; + if(frontNode == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == VMA_NULL); + node->free.prev = node->free.next = VMA_NULL; + m_FreeList[level].front = m_FreeList[level].back = node; + } + else + { + VMA_ASSERT(frontNode->free.prev == VMA_NULL); + node->free.prev = VMA_NULL; + node->free.next = frontNode; + frontNode->free.prev = node; + m_FreeList[level].front = node; + } +} + +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ + VMA_ASSERT(m_FreeList[level].front != VMA_NULL); + + // It is at the front. + if(node->free.prev == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].front == node); + m_FreeList[level].front = node->free.next; + } + else + { + Node* const prevFreeNode = node->free.prev; + VMA_ASSERT(prevFreeNode->free.next == node); + prevFreeNode->free.next = node->free.next; + } + + // It is at the back. + if(node->free.next == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == node); + m_FreeList[level].back = node->free.prev; + } + else + { + Node* const nextFreeNode = node->free.next; + VMA_ASSERT(nextFreeNode->free.prev == node); + nextFreeNode->free.prev = node->free.prev; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc); + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + if(allocSize < levelNodeSize) + { + PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + PrintDetailedMapNode(json, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + PrintDetailedMapNode(json, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} +#endif // #if VMA_STATS_STRING_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaDeviceMemoryBlock + +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : + m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) +{ +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator); + break; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator); + break; + default: + VMA_ASSERT(0); + // Fall-through. + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator); + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if(count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount != 0) + { + m_MapCount += count; + VMA_ASSERT(m_pMappedData != VMA_NULL); + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if(result == VK_SUCCESS) + { + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + m_MapCount = count; + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if(count == 0) + { + return; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount >= count) + { + m_MapCount -= count; + if(m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!"); + } + else if(!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} + +static void InitStatInfo(VmaStatInfo& outInfo) +{ + memset(&outInfo, 0, sizeof(outInfo)); + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMin = UINT64_MAX; +} + +// Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. +static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) +{ + inoutInfo.blockCount += srcInfo.blockCount; + inoutInfo.allocationCount += srcInfo.allocationCount; + inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount; + inoutInfo.usedBytes += srcInfo.usedBytes; + inoutInfo.unusedBytes += srcInfo.unusedBytes; + inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin); + inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax); + inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin); + inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax); +} + +static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo) +{ + inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ? + VmaRoundDiv(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0; + inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ? + VmaRoundDiv(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0; +} + +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) : + m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.frameInUseCount, + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm + m_Id(0), + m_Name(VMA_NULL) +{ +} + +VmaPool_T::~VmaPool_T() +{ +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if(pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} + +#if VMA_STATS_STRING_ENABLED + +#endif // #if VMA_STATS_STRING_ENABLED + +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm) : + m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_FrameInUseCount(frameInUseCount), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_HasEmptyBlock(false), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) +{ +} + +VmaBlockVector::~VmaBlockVector() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for(size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + + pStats->size = 0; + pStats->unusedSize = 0; + pStats->allocationCount = 0; + pStats->unusedRangeCount = 0; + pStats->unusedRangeSizeMax = 0; + pStats->blockCount = blockCount; + + for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddPoolStats(*pStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; + +VkResult VmaBlockVector::Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + if(IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + currentFrameIndex, + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + } + + if(res != VK_SUCCESS) + { + // Free all already created allocations. + while(allocIndex--) + { + Free(pAllocations[allocIndex]); + } + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0; + const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !IsCustomPool(); + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer. + // Which in turn is available only when maxBlockCount = 1. + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) + { + canMakeOtherLost = false; + } + + // Upper address can only be used with linear allocator and within single memory block. + if(isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Validate strategy. + switch(strategy) + { + case 0: + strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; + break; + case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: + break; + default: + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + /* + Under certain condition, this whole section can be skipped for optimization, so + we move on directly to trying to allocate with canMakeOtherLost. That's the case + e.g. for custom pools with linear algorithm. + */ + if(!canMakeOtherLost || canCreateNewBlock) + { + // 1. Search existing allocations. Try to allocate without making other allocations lost. + VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags; + allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if(!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if(canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if(!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if(!m_ExplicitBlockSize) + { + while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if(res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + + // 3. Try to allocate from existing blocks with making other allocations lost. + if(canMakeOtherLost) + { + uint32_t tryIndex = 0; + for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) + { + VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL; + VmaAllocationRequest bestRequest = {}; + VkDeviceSize bestRequestCost = VK_WHOLE_SIZE; + + // 1. Search existing allocations. + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0) + { + break; + } + } + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + break; + } + } + } + } + } + + if(pBestRequestBlock != VMA_NULL) + { + if(mapped) + { + VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost( + currentFrameIndex, + m_FrameInUseCount, + &bestRequest)) + { + // Allocate from this pBlock. + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); + pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBestRequestBlock, + bestRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBestRequestBlock->Validate()); + VMA_DEBUG_LOG(" Returned from existing block"); + (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + // else: Some allocations must have been touched while we are here. Next try. + } + else + { + // Could not find place in any of the blocks - break outer loop. + break; + } + } + /* Maximum number of tries exceeded - a very unlike event when many other + threads are simultaneously touching allocations making it impossible to make + lost at the same time as we try to allocate. */ + if(tryIndex == VMA_ALLOCATION_TRY_COUNT) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free( + const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if(hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + pBlock->m_pMetadata->Free(hAllocation); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if(pBlock->m_pMetadata->IsEmpty()) + { + // Already has empty block. We don't want to have two, so delete this one. + if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have an empty block - leave it. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if(m_HasEmptyBlock && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if(pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + UpdateHasEmptyBlock(); + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if(pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG(" Deleted empty block"); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for(size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if(result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if(m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for(size_t i = 1; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0); + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if(pBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + isUpperAddress, + suballocType, + false, // canMakeOtherLost + strategy, + &currRequest)) + { + // Allocate from pCurrBlock. + VMA_ASSERT(currRequest.itemsToMakeLostCount == 0); + + if(mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); + pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBlock, + currRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBlock->Validate()); + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if(res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm); + + m_Blocks.push_back(pBlock); + if(pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +void VmaBlockVector::ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves) +{ + const size_t blockCount = m_Blocks.size(); + const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex); + + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002, + }; + + struct BlockInfo + { + uint32_t flags; + void* pMappedData; + }; + VmaVector< BlockInfo, VmaStlAllocator > + blockInfo(blockCount, BlockInfo(), VmaStlAllocator(m_hAllocator->GetAllocationCallbacks())); + memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED; + blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED; + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Get mapped pointer or map if necessary. + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + BlockInfo& currBlockInfo = blockInfo[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) + { + currBlockInfo.pMappedData = pBlock->GetMappedData(); + // It is not originally mapped - map it. + if(currBlockInfo.pMappedData == VMA_NULL) + { + pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData); + if(pDefragCtx->res == VK_SUCCESS) + { + currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION; + } + } + } + } + + // Go over all moves. Do actual data transfer. + if(pDefragCtx->res == VK_SUCCESS) + { + const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; + + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex]; + const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData); + + // Invalidate source. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex]; + memRange.memory = pSrcBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), + pSrcBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + + // THE PLACE WHERE ACTUAL DATA COPY HAPPENS. + memmove( + reinterpret_cast(dstBlockInfo.pMappedData) + move.dstOffset, + reinterpret_cast(srcBlockInfo.pMappedData) + move.srcOffset, + static_cast(move.size)); + + if(IsCorruptionDetectionEnabled()) + { + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size); + } + + // Flush destination. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex]; + memRange.memory = pDstBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), + pDstBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + } + } + + // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation. + // Regardless of pCtx->res == VK_SUCCESS. + for(size_t blockIndex = blockCount; blockIndex--; ) + { + const BlockInfo& currBlockInfo = blockInfo[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + pBlock->Unmap(m_hAllocator, 1); + } + } +} + +void VmaBlockVector::ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer) +{ + const size_t blockCount = m_Blocks.size(); + + pDefragCtx->blockContexts.resize(blockCount); + memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN) + { + // Old school move still require us to map the whole block + pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + } + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Create and bind buffer for whole block if necessary. + { + VkBufferCreateInfo bufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo); + + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) + { + bufCreateInfo.size = pBlock->m_pMetadata->GetSize(); + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)( + m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); + if(pDefragCtx->res == VK_SUCCESS) + { + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)( + m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); + } + } + } + } + + // Go over all moves. Post data transfer commands to command buffer. + if(pDefragCtx->res == VK_SUCCESS) + { + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; + const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer); + + VkBufferCopy region = { + move.srcOffset, + move.dstOffset, + move.size }; + (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)( + commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); + } + } + + // Save buffers to defrag context for later destruction. + if(pDefragCtx->res == VK_SUCCESS && moveCount > 0) + { + pDefragCtx->res = VK_NOT_READY; + } +} + +void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats) +{ + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if(pBlock->m_pMetadata->IsEmpty()) + { + if(m_Blocks.size() > m_MinBlockCount) + { + if(pDefragmentationStats != VMA_NULL) + { + ++pDefragmentationStats->deviceMemoryBlocksFreed; + pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize(); + } + + VmaVectorRemove(m_Blocks, blockIndex); + pBlock->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlock); + } + else + { + break; + } + } + } + UpdateHasEmptyBlock(); +} + +void VmaBlockVector::UpdateHasEmptyBlock() +{ + m_HasEmptyBlock = false; + for(size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if(pBlock->m_pMetadata->IsEmpty()) + { + m_HasEmptyBlock = true; + break; + } + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + json.BeginObject(); + + if(IsCustomPool()) + { + const char* poolName = m_hParentPool->GetName(); + if(poolName != VMA_NULL && poolName[0] != '\0') + { + json.WriteString("Name"); + json.WriteString(poolName); + } + + json.WriteString("MemoryTypeIndex"); + json.WriteNumber(m_MemoryTypeIndex); + + json.WriteString("BlockSize"); + json.WriteNumber(m_PreferredBlockSize); + + json.WriteString("BlockCount"); + json.BeginObject(true); + if(m_MinBlockCount > 0) + { + json.WriteString("Min"); + json.WriteNumber((uint64_t)m_MinBlockCount); + } + if(m_MaxBlockCount < SIZE_MAX) + { + json.WriteString("Max"); + json.WriteNumber((uint64_t)m_MaxBlockCount); + } + json.WriteString("Cur"); + json.WriteNumber((uint64_t)m_Blocks.size()); + json.EndObject(); + + if(m_FrameInUseCount > 0) + { + json.WriteString("FrameInUseCount"); + json.WriteNumber(m_FrameInUseCount); + } + + if(m_Algorithm != 0) + { + json.WriteString("Algorithm"); + json.WriteString(VmaAlgorithmToStr(m_Algorithm)); + } + } + else + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(m_PreferredBlockSize); + } + + json.WriteString("Blocks"); + json.BeginObject(); + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + } + json.EndObject(); + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer) +{ + pCtx->res = VK_SUCCESS; + + const VkMemoryPropertyFlags memPropFlags = + m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; + const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + + const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 && + isHostVisible; + const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 && + !IsCorruptionDetectionEnabled() && + ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; + + // There are options to defragment this memory type. + if(canDefragmentOnCpu || canDefragmentOnGpu) + { + bool defragmentOnGpu; + // There is only one option to defragment this memory type. + if(canDefragmentOnGpu != canDefragmentOnCpu) + { + defragmentOnGpu = canDefragmentOnGpu; + } + // Both options are available: Heuristics to choose the best one. + else + { + defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 || + m_hAllocator->IsIntegratedGpu(); + } + + bool overlappingMoveSupported = !defragmentOnGpu; + + if(m_hAllocator->m_UseMutex) + { + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(!m_Mutex.TryLockWrite()) + { + pCtx->res = VK_ERROR_INITIALIZATION_FAILED; + return; + } + } + else + { + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } + } + + pCtx->Begin(overlappingMoveSupported, flags); + + // Defragment. + + const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove; + const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; + pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags); + + // Accumulate statistics. + if(pStats != VMA_NULL) + { + const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved(); + const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved(); + pStats->bytesMoved += bytesMoved; + pStats->allocationsMoved += allocationsMoved; + VMA_ASSERT(bytesMoved <= maxBytesToMove); + VMA_ASSERT(allocationsMoved <= maxAllocationsToMove); + if(defragmentOnGpu) + { + maxGpuBytesToMove -= bytesMoved; + maxGpuAllocationsToMove -= allocationsMoved; + } + else + { + maxCpuBytesToMove -= bytesMoved; + maxCpuAllocationsToMove -= allocationsMoved; + } + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(m_hAllocator->m_UseMutex) + m_Mutex.UnlockWrite(); + + if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty()) + pCtx->res = VK_NOT_READY; + + return; + } + + if(pCtx->res >= VK_SUCCESS) + { + if(defragmentOnGpu) + { + ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer); + } + else + { + ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves); + } + } + } +} + +void VmaBlockVector::DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + uint32_t flags, + VmaDefragmentationStats* pStats) +{ + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex) + { + VMA_ASSERT(pCtx->mutexLocked == false); + + // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any + // lock protecting us. Since we mutate state here, we have to take the lock out now + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } + + // If the mutex isn't locked we didn't do any work and there is nothing to delete. + if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex) + { + // Destroy buffers. + for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) + { + VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex]; + if(blockCtx.hBuffer) + { + (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); + } + } + + if(pCtx->res >= VK_SUCCESS) + { + FreeEmptyBlocks(pStats); + } + } + + if(pCtx->mutexLocked) + { + VMA_ASSERT(m_hAllocator->m_UseMutex); + m_Mutex.UnlockWrite(); + } +} + +uint32_t VmaBlockVector::ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves); + + for(uint32_t i = 0; i < moveCount; ++ i) + { + VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i]; + + pMove->allocation = move.hAllocation; + pMove->memory = move.pDstBlock->GetDeviceMemory(); + pMove->offset = move.dstOffset; + + ++ pMove; + } + + pCtx->defragmentationMovesProcessed += moveCount; + + return moveCount; +} + +void VmaBlockVector::CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i) + { + const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i]; + + move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset); + move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset); + } + + pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; + FreeEmptyBlocks(pStats); +} + +size_t VmaBlockVector::CalcAllocationCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + result += m_Blocks[i]->m_pMetadata->GetAllocationCount(); + } + return result; +} + +bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const +{ + if(m_BufferImageGranularity == 1) + { + return false; + } + VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE; + for(size_t i = 0, count = m_Blocks.size(); i < count; ++i) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[i]; + VMA_ASSERT(m_Algorithm == 0); + VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata; + if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) + { + return true; + } + } + return false; +} + +void VmaBlockVector::MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + size_t lostAllocationCount = 0; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); + } + if(pLostAllocationCount != VMA_NULL) + { + *pLostAllocationCount = lostAllocationCount; + } +} + +VkResult VmaBlockVector::CheckCorruption() +{ + if(!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStats(VmaStats* pStats) +{ + const uint32_t memTypeIndex = m_MemoryTypeIndex; + const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex); + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + VmaStatInfo allocationStatInfo; + pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Generic members definition + +VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + // Create block info for each block. + const size_t blockCount = m_pBlockVector->m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); + pBlockInfo->m_OriginalBlockIndex = blockIndex; + pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex]; + m_Blocks.push_back(pBlockInfo); + } + + // Sort them by m_pBlock pointer value. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess()); +} + +VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost. + if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) + { + VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock(); + BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess()); + if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock) + { + AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged); + (*it)->m_Allocations.push_back(allocInfo); + } + else + { + VMA_ASSERT(0); + } + + ++m_AllocationCount; + } +} + +VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations) +{ + if(m_Blocks.empty()) + { + return VK_SUCCESS; + } + + // This is a choice based on research. + // Option 1: + uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT; + // Option 2: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT; + // Option 3: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT; + + size_t srcBlockMinIndex = 0; + // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations. + /* + if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) + { + const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); + if(blocksWithNonMovableCount > 0) + { + srcBlockMinIndex = blocksWithNonMovableCount - 1; + } + } + */ + + size_t srcBlockIndex = m_Blocks.size() - 1; + size_t srcAllocIndex = SIZE_MAX; + for(;;) + { + // 1. Find next allocation to move. + // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source". + // 1.2. Then start from last to first m_Allocations. + while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) + { + if(m_Blocks[srcBlockIndex]->m_Allocations.empty()) + { + // Finished: no more allocations to process. + if(srcBlockIndex == srcBlockMinIndex) + { + return VK_SUCCESS; + } + else + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + } + else + { + srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1; + } + } + + BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex]; + AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; + + const VkDeviceSize size = allocInfo.m_hAllocation->GetSize(); + const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset(); + const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment(); + const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType(); + + // 2. Try to find new place for this allocation in preceding or current block. + for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) + { + BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; + VmaAllocationRequest dstAllocRequest; + if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( + m_CurrentFrameIndex, + m_pBlockVector->GetFrameInUseCount(), + m_pBlockVector->GetBufferImageGranularity(), + size, + alignment, + false, // upperAddress + suballocType, + false, // canMakeOtherLost + strategy, + &dstAllocRequest) && + MoveMakesSense( + dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) + { + VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0); + + // Reached limit on number of allocations or bytes to move. + if((m_AllocationsMoved + 1 > maxAllocationsToMove) || + (m_BytesMoved + size > maxBytesToMove)) + { + return VK_SUCCESS; + } + + VmaDefragmentationMove move = {}; + move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; + move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; + move.srcOffset = srcOffset; + move.dstOffset = dstAllocRequest.offset; + move.size = size; + move.hAllocation = allocInfo.m_hAllocation; + move.pSrcBlock = pSrcBlockInfo->m_pBlock; + move.pDstBlock = pDstBlockInfo->m_pBlock; + + moves.push_back(move); + + pDstBlockInfo->m_pBlock->m_pMetadata->Alloc( + dstAllocRequest, + suballocType, + size, + allocInfo.m_hAllocation); + + if(freeOldAllocations) + { + pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); + allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + } + + if(allocInfo.m_pChanged != VMA_NULL) + { + *allocInfo.m_pChanged = VK_TRUE; + } + + ++m_AllocationsMoved; + m_BytesMoved += size; + + VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex); + + break; + } + } + + // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round. + + if(srcAllocIndex > 0) + { + --srcAllocIndex; + } + else + { + if(srcBlockIndex > 0) + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + else + { + return VK_SUCCESS; + } + } + } +} + +size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i]->m_HasNonMovableAllocations) + { + ++result; + } + } + return result; +} + +VkResult VmaDefragmentationAlgorithm_Generic::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + if(!m_AllAllocations && m_AllocationCount == 0) + { + return VK_SUCCESS; + } + + const size_t blockCount = m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = m_Blocks[blockIndex]; + + if(m_AllAllocations) + { + VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata; + for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL); + pBlockInfo->m_Allocations.push_back(allocInfo); + } + } + } + + pBlockInfo->CalcHasNonMovableAllocations(); + + // This is a choice based on research. + // Option 1: + pBlockInfo->SortAllocationsByOffsetDescending(); + // Option 2: + //pBlockInfo->SortAllocationsBySizeDescending(); + } + + // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination()); + + // This is a choice based on research. + const uint32_t roundCount = 2; + + // Execute defragmentation rounds (the main part). + VkResult result = VK_SUCCESS; + for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) + { + result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)); + } + + return result; +} + +bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset) +{ + if(dstBlockIndex < srcBlockIndex) + { + return true; + } + if(dstBlockIndex > srcBlockIndex) + { + return false; + } + if(dstOffset < srcOffset) + { + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Fast + +VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_OverlappingMoveSupported(overlappingMoveSupported), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_BlockInfos(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN == 0); + +} + +VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() +{ +} + +VkResult VmaDefragmentationAlgorithm_Fast::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount); + + const size_t blockCount = m_pBlockVector->GetBlockCount(); + if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) + { + return VK_SUCCESS; + } + + PreprocessMetadata(); + + // Sort blocks in order from most destination. + + m_BlockInfos.resize(blockCount); + for(size_t i = 0; i < blockCount; ++i) + { + m_BlockInfos[i].origBlockIndex = i; + } + + VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool { + return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() < + m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); + }); + + // THE MAIN ALGORITHM + + FreeSpaceDatabase freeSpaceDb; + + size_t dstBlockInfoIndex = 0; + size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + VkDeviceSize dstBlockSize = pDstMetadata->GetSize(); + VkDeviceSize dstOffset = 0; + + bool end = false; + for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) + { + const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); + VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata; + for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); + !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) + { + VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation; + const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment(); + const VkDeviceSize srcAllocSize = srcSuballocIt->size; + if(m_AllocationsMoved == maxAllocationsToMove || + m_BytesMoved + srcAllocSize > maxBytesToMove) + { + end = true; + break; + } + const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; + + VmaDefragmentationMove move = {}; + // Try to place it in one of free spaces from the database. + size_t freeSpaceInfoIndex; + VkDeviceSize dstAllocOffset; + if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, + freeSpaceInfoIndex, dstAllocOffset)) + { + size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); + VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; + + // Same block + if(freeSpaceInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeOffset(dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + else + { + dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment); + + // If the allocation doesn't fit before the end of dstBlock, forward to next block. + while(dstBlockInfoIndex < srcBlockInfoIndex && + dstAllocOffset + srcAllocSize > dstBlockSize) + { + // But before that, register remaining free space at the end of dst block. + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset); + + ++dstBlockInfoIndex; + dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + dstBlockSize = pDstMetadata->GetSize(); + dstOffset = 0; + dstAllocOffset = 0; + } + + // Same block + if(dstBlockInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset; + + bool skipOver = overlap; + if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) + { + // If destination and source place overlap, skip if it would move it + // by only < 1/64 of its size. + skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize; + } + + if(skipOver) + { + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset); + + dstOffset = srcAllocOffset + srcAllocSize; + ++srcSuballocIt; + } + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + else + { + srcSuballocIt->offset = dstAllocOffset; + srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + ++srcSuballocIt; + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex); + VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + pDstMetadata->m_Suballocations.push_back(suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + } + } + + m_BlockInfos.clear(); + + PostprocessMetadata(); + + return VK_SUCCESS; +} + +void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + pMetadata->m_FreeCount = 0; + pMetadata->m_SumFreeSize = pMetadata->GetSize(); + pMetadata->m_FreeSuballocationsBySize.clear(); + for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); ) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE) + { + VmaSuballocationList::iterator nextIt = it; + ++nextIt; + pMetadata->m_Suballocations.erase(it); + it = nextIt; + } + else + { + ++it; + } + } + } +} + +void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + const VkDeviceSize blockSize = pMetadata->GetSize(); + + // No allocations in this block - entire area is free. + if(pMetadata->m_Suballocations.empty()) + { + pMetadata->m_FreeCount = 1; + //pMetadata->m_SumFreeSize is already set to blockSize. + VmaSuballocation suballoc = { + 0, // offset + blockSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + pMetadata->m_Suballocations.push_back(suballoc); + pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin()); + } + // There are some allocations in this block. + else + { + VkDeviceSize offset = 0; + VmaSuballocationList::iterator it; + for(it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(it->offset >= offset); + + // Need to insert preceding free space. + if(it->offset > offset) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = it->offset - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt); + } + } + + pMetadata->m_SumFreeSize -= it->size; + offset = it->offset + it->size; + } + + // Need to insert trailing free space. + if(offset < blockSize) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = blockSize - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VMA_ASSERT(it == pMetadata->m_Suballocations.end()); + VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt); + } + } + + VMA_SORT( + pMetadata->m_FreeSuballocationsBySize.begin(), + pMetadata->m_FreeSuballocationsBySize.end(), + VmaSuballocationItemSizeLess()); + } + + VMA_HEAVY_ASSERT(pMetadata->Validate()); + } +} + +void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc) +{ + // TODO: Optimize somehow. Remember iterator instead of searching for it linearly. + VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + while(it != pMetadata->m_Suballocations.end()) + { + if(it->offset < suballoc.offset) + { + ++it; + } + } + pMetadata->m_Suballocations.insert(it, suballoc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaBlockVectorDefragmentationContext + +VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex) : + res(VK_SUCCESS), + mutexLocked(false), + blockContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMoves(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMovesProcessed(0), + defragmentationMovesCommitted(0), + hasDefragmentationPlan(0), + m_hAllocator(hAllocator), + m_hCustomPool(hCustomPool), + m_pBlockVector(pBlockVector), + m_CurrFrameIndex(currFrameIndex), + m_pAlgorithm(VMA_NULL), + m_Allocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_AllAllocations(false) +{ +} + +VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() +{ + vma_delete(m_hAllocator, m_pAlgorithm); +} + +void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + AllocInfo info = { hAlloc, pChanged }; + m_Allocations.push_back(info); +} + +void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags) +{ + const bool allAllocations = m_AllAllocations || + m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); + + /******************************** + HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. + ********************************/ + + /* + Fast algorithm is supported only when certain criteria are met: + - VMA_DEBUG_MARGIN is 0. + - All allocations in this block vector are moveable. + - There is no possibility of image/buffer granularity conflict. + - The defragmentation is not incremental + */ + if(VMA_DEBUG_MARGIN == 0 && + allAllocations && + !m_pBlockVector->IsBufferImageGranularityConflictPossible() && + !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)) + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + else + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + + if(allAllocations) + { + m_pAlgorithm->AddAll(); + } + else + { + for(size_t i = 0, count = m_Allocations.size(); i < count; ++i) + { + m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged); + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationContext + +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats) : + m_hAllocator(hAllocator), + m_CurrFrameIndex(currFrameIndex), + m_Flags(flags), + m_pStats(pStats), + m_CustomPoolContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts)); +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; + if(pBlockVectorCtx) + { + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + } +} + +void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools) +{ + for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + VmaPool pool = pPools[poolIndex]; + VMA_ASSERT(pool); + // Pools with algorithm other than default are not defragmented. + if(pool->m_BlockVector.GetAlgorithm() == 0) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == pool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + pool, + &pool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + + pBlockVectorDefragCtx->AddAll(); + } + } +} + +void VmaDefragmentationContext_T::AddAllocations( + uint32_t allocationCount, + const VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged) +{ + // Dispatch pAllocations among defragmentators. Create them when necessary. + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation hAlloc = pAllocations[allocIndex]; + VMA_ASSERT(hAlloc); + // DedicatedAlloc cannot be defragmented. + if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && + // Lost allocation cannot be defragmented. + (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool(); + // This allocation belongs to custom pool. + if(hAllocPool != VK_NULL_HANDLE) + { + // Pools with algorithm other than default are not defragmented. + if(hAllocPool->m_BlockVector.GetAlgorithm() == 0) + { + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + hAllocPool, + &hAllocPool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + } + } + // This allocation belongs to default pool. + else + { + const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); + pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + VMA_NULL, // hCustomPool + m_hAllocator->m_pBlockVectors[memTypeIndex], + m_CurrFrameIndex); + m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; + } + } + + if(pBlockVectorDefragCtx) + { + VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? + &pAllocationsChanged[allocIndex] : VMA_NULL; + pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged); + } + } + } +} + +VkResult VmaDefragmentationContext_T::Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags) +{ + if(pStats) + { + memset(pStats, 0, sizeof(VmaDefragmentationStats)); + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + // For incremental defragmetnations, we just earmark how much we can move + // The real meat is in the defragmentation steps + m_MaxCpuBytesToMove = maxCpuBytesToMove; + m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove; + + m_MaxGpuBytesToMove = maxGpuBytesToMove; + m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove; + + if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 && + m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0) + return VK_SUCCESS; + + return VK_NOT_READY; + } + + if(commandBuffer == VK_NULL_HANDLE) + { + maxGpuBytesToMove = 0; + maxGpuAllocationsToMove = 0; + } + + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount && res >= VK_SUCCESS; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + + return res; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo) +{ + VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves; + uint32_t movesLeft = pInfo->moveCount; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + + pInfo->moveCount = pInfo->moveCount - movesLeft; + + return VK_SUCCESS; +} +VkResult VmaDefragmentationContext_T::DefragmentPassEnd() +{ + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + + return res; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaRecorder + +#if VMA_RECORDING_ENABLED + +VmaRecorder::VmaRecorder() : + m_UseMutex(true), + m_Flags(0), + m_File(VMA_NULL), + m_RecordingStartTime(std::chrono::high_resolution_clock::now()) +{ +} + +VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) +{ + m_UseMutex = useMutex; + m_Flags = settings.flags; + +#if defined(_WIN32) + // Open file for writing. + errno_t err = fopen_s(&m_File, settings.pFilePath, "wb"); + + if(err != 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } +#else + // Open file for writing. + m_File = fopen(settings.pFilePath, "wb"); + + if(m_File == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } +#endif + + // Write header. + fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); + fprintf(m_File, "%s\n", "1,8"); + + return VK_SUCCESS; +} + +VmaRecorder::~VmaRecorder() +{ + if(m_File != VMA_NULL) + { + fclose(m_File); + } +} + +void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex, + createInfo.memoryTypeIndex, + createInfo.flags, + createInfo.blockSize, + (uint64_t)createInfo.minBlockCount, + (uint64_t)createInfo.maxBlockCount, + createInfo.frameInUseCount, + pool); + Flush(); +} + +void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, ",%s\n", userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, "\n"); + Flush(); +} + +void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr( + allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0, + pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + bufCreateInfo.flags, + bufCreateInfo.size, + bufCreateInfo.usage, + bufCreateInfo.sharingMode, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + imageCreateInfo.flags, + imageCreateInfo.imageType, + imageCreateInfo.format, + imageCreateInfo.extent.width, + imageCreateInfo.extent.height, + imageCreateInfo.extent.depth, + imageCreateInfo.mipLevels, + imageCreateInfo.arrayLayers, + imageCreateInfo.samples, + imageCreateInfo.tiling, + imageCreateInfo.usage, + imageCreateInfo.sharingMode, + imageCreateInfo.initialLayout, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex, + info.flags); + PrintPointerList(info.allocationCount, info.pAllocations); + fprintf(m_File, ","); + PrintPointerList(info.poolCount, info.pPools); + fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n", + info.maxCpuBytesToMove, + info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, + info.maxGpuAllocationsToMove, + info.commandBuffer, + ctx); + Flush(); +} + +void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex, + ctx); + Flush(); +} + +void VmaRecorder::RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + pool, name != VMA_NULL ? name : ""); + Flush(); +} + +VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData) +{ + if(pUserData != VMA_NULL) + { + if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) + { + m_Str = (const char*)pUserData; + } + else + { + // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it. + snprintf(m_PtrStr, 17, "%p", pUserData); + m_Str = m_PtrStr; + } + } + else + { + m_Str = ""; + } +} + +void VmaRecorder::WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled) +{ + fprintf(m_File, "Config,Begin\n"); + + fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion)); + + fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion); + fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion); + fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID); + fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID); + fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType); + fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName); + + fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount); + fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity); + fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize); + + fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount); + for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size); + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags); + } + fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount); + for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex); + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags); + } + + fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0); + + fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT); + fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN); + fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY); + fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE); + fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + fprintf(m_File, "Config,End\n"); +} + +void VmaRecorder::GetBasicParams(CallParams& outParams) +{ + #if defined(_WIN32) + outParams.threadId = GetCurrentThreadId(); + #else + // Use C++11 features to get thread id and convert it to uint32_t. + // There is room for optimization since sstream is quite slow. + // Is there a better way to convert std::this_thread::get_id() to uint32_t? + std::thread::id thread_id = std::this_thread::get_id(); + std::stringstream thread_id_to_string_converter; + thread_id_to_string_converter << thread_id; + std::string thread_id_as_string = thread_id_to_string_converter.str(); + outParams.threadId = static_cast(std::stoi(thread_id_as_string.c_str())); + #endif + + auto current_time = std::chrono::high_resolution_clock::now(); + + outParams.time = std::chrono::duration(current_time - m_RecordingStartTime).count(); +} + +void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems) +{ + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } +} + +void VmaRecorder::Flush() +{ + if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) + { + fflush(m_File); + } +} + +#endif // #if VMA_RECORDING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocationObjectAllocator + +VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) : + m_Allocator(pAllocationCallbacks, 1024) +{ +} + +template VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocator_T + +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_CurrentFrameIndex(0), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_Pools(VmaStlAllocator(GetAllocationCallbacks())), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +#if VMA_RECORDING_ENABLED + ,m_pRecorder(VMA_NULL) +#endif +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + pCreateInfo->frameInUseCount, + false, // explicitBlockSize + false); // linearAlgorithm + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // becase minBlockCount is 0. + m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator(GetAllocationCallbacks())); + + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + + if(pCreateInfo->pRecordSettings != VMA_NULL && + !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) + { +#if VMA_RECORDING_ENABLED + m_pRecorder = vma_new(this, VmaRecorder)(); + res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex); + if(res != VK_SUCCESS) + { + return res; + } + m_pRecorder->WriteConfiguration( + m_PhysicalDeviceProperties, + m_MemProps, + m_VulkanApiVersion, + m_UseKhrDedicatedAllocation, + m_UseKhrBindMemory2, + m_UseExtMemoryBudget, + m_UseAmdDeviceCoherentMemory); + m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex()); +#else + VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1."); + return VK_ERROR_FEATURE_NOT_PRESENT; +#endif + } + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ +#if VMA_RECORDING_ENABLED + if(m_pRecorder != VMA_NULL) + { + m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex()); + vma_delete(this, m_pRecorder); + } +#endif + + VMA_ASSERT(m_Pools.empty()); + + for(size_t i = GetMemoryTypeCount(); i--; ) + { + if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) + { + VMA_ASSERT(0 && "Unfreed dedicated allocations found."); + } + + vma_delete(this, m_pDedicatedAllocations[i]); + vma_delete(this, m_pBlockVectors[i]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif +} + +#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + // If memory is lazily allocated, it should be always dedicated. + if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector); + + const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); + bool preferDedicatedMemory = + VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || + dedicatedAllocation || + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + size > preferredBlockSize / 2; + + if(preferDedicatedMemory && + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + finalCreateInfo.pool == VK_NULL_HANDLE) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + return AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + allocationCount, + pAllocations); + } + } + else + { + VkResult res = blockVector->Allocate( + m_CurrentFrameIndex.load(), + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + return res; + } + + // 5. Try dedicated memory. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + res = AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + else + { + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + } + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + if(withinBudget) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetBudget(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown + (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + // Register them in m_pDedicatedAllocations. + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + VmaVectorInsertSorted(*pDedicatedAllocations, pAllocations[allocIndex]); + } + } + + VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + currAlloc->SetUserData(this, VMA_NULL); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString); + (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size); + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(requiresDedicatedAllocation) + { + if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(createInfo.pool != VK_NULL_HANDLE) + { + VMA_ASSERT(0 && "Pool specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + if((createInfo.pool != VK_NULL_HANDLE) && + ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + if(createInfo.pool != VK_NULL_HANDLE) + { + const VkDeviceSize alignmentForPool = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex())); + + VmaAllocationCreateInfo createInfoForPool = createInfo; + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + return createInfo.pool->m_BlockVector.Allocate( + m_CurrentFrameIndex.load(), + vkMemReq.size, + alignmentForPool, + createInfoForPool, + suballocType, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + VkDeviceSize alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Succeeded on first try. + if(res == VK_SUCCESS) + { + return res; + } + // Allocation from this memory type failed. Try other compatible memory types. + else + { + for(;;) + { + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Allocation from this alternative memory type succeeded. + if(res == VK_SUCCESS) + { + return res; + } + // else: Allocation from this memory type failed. Try next one - next loop iteration. + } + // No other matching memory type index could be found. + else + { + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + else + return res; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(TouchAllocation(allocation)) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetBlock()->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + + // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes. + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + allocation->SetUserData(this, VMA_NULL); + m_AllocationObjectAllocator.Free(allocation); + } + } +} + +VkResult VmaAllocator_T::ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + // This function is deprecated and so it does nothing. It's left for backward compatibility. + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(newSize == alloc->GetSize()) + { + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_POOL_MEMORY; +} + +void VmaAllocator_T::CalculateStats(VmaStats* pStats) +{ + // Initialize. + InitStatInfo(pStats->total); + for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + InitStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + InitStatInfo(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + pBlockVector->AddStats(pStats); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + m_Pools[poolIndex]->m_BlockVector.AddStats(pStats); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) + { + VmaStatInfo allocationStatInfo; + (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } + } + + // Postprocess. + VmaPostprocessCalcStatInfo(pStats->total); + for(size_t i = 0; i < GetMemoryTypeCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < GetMemoryHeapCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]); +} + +void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudget->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudget->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetBudget(outBudget, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudget->usage = outBudget->blockBytes; + outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +VkResult VmaAllocator_T::DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext) +{ + if(info.pAllocationsChanged != VMA_NULL) + { + memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32)); + } + + *pContext = vma_new(this, VmaDefragmentationContext_T)( + this, m_CurrentFrameIndex.load(), info.flags, pStats); + + (*pContext)->AddPools(info.poolCount, info.pPools); + (*pContext)->AddAllocations( + info.allocationCount, info.pAllocations, info.pAllocationsChanged); + + VkResult res = (*pContext)->Defragment( + info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, + info.commandBuffer, pStats, info.flags); + + if(res != VK_NOT_READY) + { + vma_delete(this, *pContext); + *pContext = VMA_NULL; + } + + return res; +} + +VkResult VmaAllocator_T::DefragmentationEnd( + VmaDefragmentationContext context) +{ + vma_delete(this, context); + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context) +{ + return context->DefragmentPassBegin(pInfo); +} +VkResult VmaAllocator_T::DefragmentationPassEnd( + VmaDefragmentationContext context) +{ + return context->DefragmentPassEnd(); + +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + if(hAllocation->CanBecomeLost()) + { + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + pAllocationInfo->memoryType = UINT32_MAX; + pAllocationInfo->deviceMemory = VK_NULL_HANDLE; + pAllocationInfo->offset = 0; + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + } +} + +bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) +{ + // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo. + if(hAllocation->CanBecomeLost()) + { + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + return false; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + return true; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + return true; + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + VmaVectorInsertSorted(m_Pools, *pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + bool success = VmaVectorRemoveSorted(m_Pools, pool); + VMA_ASSERT(success && "Pool not found in Allocator."); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) +{ + pool->m_BlockVector.GetPoolStats(pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +void VmaAllocator_T::MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount) +{ + hPool->m_BlockVector.MakePoolAllocationsLost( + m_CurrentFrameIndex.load(), + pLostAllocationCount); +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(((1u << memTypeIndex) & memoryTypeBits) != 0) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation) +{ + *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false); + (*pAllocation)->InitLost(); +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + if(hAllocation->CanBecomeLost()) + { + return VK_ERROR_MEMORY_MAP_FAILED; + } + + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + bool success = VmaVectorRemoveSorted(*pDedicatedAllocations, allocation); + VMA_ASSERT(success); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET + +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} + +#endif // #if VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + !hAllocation->CanBecomeLost() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED + +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + bool dedicatedAllocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + if(pDedicatedAllocVector->empty() == false) + { + if(dedicatedAllocationsStarted == false) + { + dedicatedAllocationsStarted = true; + json.WriteString("DedicatedAllocations"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + json.BeginArray(); + + for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i) + { + json.BeginObject(true); + const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i]; + hAlloc->PrintParameters(json); + json.EndObject(); + } + + json.EndArray(); + } + } + if(dedicatedAllocationsStarted) + { + json.EndObject(); + } + + { + bool allocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false) + { + if(allocationsStarted == false) + { + allocationsStarted = true; + json.WriteString("DefaultPools"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); + } + } + if(allocationsStarted) + { + json.EndObject(); + } + } + + // Custom pools + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + const size_t poolCount = m_Pools.size(); + if(poolCount > 0) + { + json.WriteString("Pools"); + json.BeginObject(); + for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + json.BeginString(); + json.ContinueString(m_Pools[poolIndex]->GetId()); + json.EndString(); + + m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json); + } + json.EndObject(); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// Public interface + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + return (*pAllocator)->Init(pCreateInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStats(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget) +{ + VMA_ASSERT(allocator && pBudget); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator); + { + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaBudget budget[VK_MAX_MEMORY_HEAPS]; + allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount()); + + VmaStats stats; + allocator->CalculateStats(&stats); + + json.WriteString("Total"); + VmaPrintStatInfo(json, stats.total); + + for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + + json.WriteString("Size"); + json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size); + + json.WriteString("Flags"); + json.BeginArray(true); + if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + json.EndArray(); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BlockBytes"); + json.WriteNumber(budget[heapIndex].blockBytes); + json.WriteString("AllocationBytes"); + json.WriteNumber(budget[heapIndex].allocationBytes); + json.WriteString("Usage"); + json.WriteNumber(budget[heapIndex].usage); + json.WriteString("Budget"); + json.WriteNumber(budget[heapIndex].budget); + } + json.EndObject(); + + if(stats.memoryHeap[heapIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]); + } + + for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + + json.BeginObject(); + + json.WriteString("Flags"); + json.BeginArray(true); + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + json.WriteString("HOST_VISIBLE"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) + { + json.WriteString("HOST_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) + { + json.WriteString("HOST_CACHED"); + } + if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) + { + json.WriteString("LAZILY_ALLOCATED"); + } + if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0) + { + json.WriteString(" PROTECTED"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_UNCACHED"); + } + json.EndArray(); + + if(stats.memoryType[typeIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryType[typeIndex]); + } + + json.EndObject(); + } + } + + json.EndObject(); + } + if(detailedMap == VK_TRUE) + { + allocator->PrintDetailedMap(json); + } + + json.EndObject(); + } + + const size_t len = sb.GetLength(); + char* const pChars = vma_new_array(allocator, char, len + 1); + if(len > 0) + { + memcpy(pChars, sb.GetData(), len); + } + pChars[len] = '\0'; + *ppStatsString = pChars; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + size_t len = strlen(pStatsString); + vma_delete_array(allocator, pStatsString, len + 1); + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + memoryTypeBits &= allocator->GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags; + uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags; + uint32_t notPreferredFlags = 0; + + // Convert usage to requiredFlags and preferredFlags. + switch(pAllocationCreateInfo->usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + default: + VMA_ASSERT(0); + break; + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < allocator->GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + + VmaCountBitsSet(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkBuffer hBuffer = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements( + hDev, hBuffer, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkImage hImage = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetImageMemoryRequirements( + hDev, hImage, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->CreatePool(pCreateInfo, pPool); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStats(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->MakePoolAllocationsLost(pool, pLostAllocationCount); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemory( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryPages( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + (uint64_t)allocationCount, + pAllocations); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForBuffer( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + image, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForImage( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemoryPages( + allocator->GetCurrentFrameIndex(), + (uint64_t)allocationCount, + pAllocations); + } +#endif + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaResizeAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->ResizeAllocation(allocation, newSize); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordGetAllocationInfo( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordTouchAllocation( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return allocator->TouchAllocation(allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetAllocationUserData( + allocator->GetCurrentFrameIndex(), + allocation, + pUserData); + } +#endif +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation) +{ + VMA_ASSERT(allocator && pAllocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + allocator->CreateLostAllocation(pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateLostAllocation( + allocator->GetCurrentFrameIndex(), + *pAllocation); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->Map(allocation, ppData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordUnmapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFlushAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordInvalidateAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + //TODO + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + //TODO + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + const VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats) +{ + // Deprecated interface, reimplemented using new one. + + VmaDefragmentationInfo2 info2 = {}; + info2.allocationCount = (uint32_t)allocationCount; + info2.pAllocations = pAllocations; + info2.pAllocationsChanged = pAllocationsChanged; + if(pDefragmentationInfo != VMA_NULL) + { + info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove; + info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove; + } + else + { + info2.maxCpuAllocationsToMove = UINT32_MAX; + info2.maxCpuBytesToMove = VK_WHOLE_SIZE; + } + // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero. + + VmaDefragmentationContext ctx; + VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx); + if(res == VK_NOT_READY) + { + res = vmaDefragmentationEnd( allocator, ctx); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + // Degenerate case: Nothing to defragment. + if(pInfo->allocationCount == 0 && pInfo->poolCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL); + VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations)); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools)); + + VMA_DEBUG_LOG("vmaDefragmentationBegin"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationBegin( + allocator->GetCurrentFrameIndex(), *pInfo, *pContext); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaDefragmentationEnd"); + + if(context != VK_NULL_HANDLE) + { + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationEnd( + allocator->GetCurrentFrameIndex(), context); + } +#endif + + return allocator->DefragmentationEnd(context); + } + else + { + return VK_SUCCESS; + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo + ) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(pInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + { + pInfo->moveCount = 0; + return VK_SUCCESS; + } + + return allocator->DefragmentationPassBegin(pInfo, context); +} +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + return VK_SUCCESS; + + return allocator->DefragmentationPassEnd(context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + pBufferCreateInfo->usage, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateBuffer( + allocator->GetCurrentFrameIndex(), + *pBufferCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyBuffer( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + *pImage, // dedicatedImage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateImage( + allocator->GetCurrentFrameIndex(), + *pImageCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyImage( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +#endif // #ifdef VMA_IMPLEMENTATION \ No newline at end of file diff --git a/src/third_party/vulkan/loader/adapters.h b/src/third_party/vulkan/loader/adapters.h new file mode 100644 index 0000000..ef97d66 --- /dev/null +++ b/src/third_party/vulkan/loader/adapters.h @@ -0,0 +1,80 @@ +/* +* Copyright (c) 2019 The Khronos Group Inc. +* Copyright (c) 2019 Valve Corporation +* Copyright (c) 2019 LunarG, Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Author: Lenny Komow +*/ + +typedef struct LoaderEnumAdapters2 { + ULONG adapter_count; + struct { + UINT handle; + LUID luid; + ULONG source_count; + BOOL present_move_regions_preferred; + } * adapters; +} LoaderEnumAdapters2; + +typedef _Check_return_ NTSTATUS(APIENTRY *PFN_LoaderEnumAdapters2)(const LoaderEnumAdapters2 *); + +typedef enum AdapterInfoType { + LOADER_QUERY_TYPE_REGISTRY = 48, +} AdapterInfoType; + +typedef struct LoaderQueryAdapterInfo { + UINT handle; + AdapterInfoType type; + VOID *private_data; + UINT private_data_size; +} LoaderQueryAdapterInfo; + +typedef _Check_return_ NTSTATUS(APIENTRY *PFN_LoaderQueryAdapterInfo)(const LoaderQueryAdapterInfo *); + +typedef enum LoaderQueryRegistryType { + LOADER_QUERY_REGISTRY_ADAPTER_KEY = 1, +} LoaderQueryRegistryType; + +typedef enum LoaderQueryRegistryStatus { + LOADER_QUERY_REGISTRY_STATUS_SUCCESS = 0, + LOADER_QUERY_REGISTRY_STATUS_BUFFER_OVERFLOW = 1, +} LoaderQueryRegistryStatus; + +typedef struct LoaderQueryRegistryFlags { + union { + struct { + UINT translate_path : 1; + UINT mutable_value : 1; + UINT reserved : 30; + }; + UINT value; + }; +} LoaderQueryRegistryFlags; + +typedef struct LoaderQueryRegistryInfo { + LoaderQueryRegistryType query_type; + LoaderQueryRegistryFlags query_flags; + WCHAR value_name[MAX_PATH]; + ULONG value_type; + ULONG physical_adapter_index; + ULONG output_value_size; + LoaderQueryRegistryStatus status; + union { + DWORD output_dword; + UINT64 output_qword; + WCHAR output_string[1]; + BYTE output_binary[1]; + }; +} LoaderQueryRegistryInfo; diff --git a/src/third_party/vulkan/loader/asm_offset.c b/src/third_party/vulkan/loader/asm_offset.c new file mode 100644 index 0000000..97832af --- /dev/null +++ b/src/third_party/vulkan/loader/asm_offset.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2017-2018 The Khronos Group Inc. + * Copyright (c) 2017-2018 Valve Corporation + * Copyright (c) 2017-2018 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Lenny Komow + */ + +// This code generates an assembly file which provides offsets to get struct members from assembly code. + +#include +#include "loader.h" + +#if !defined(_MSC_VER) || (_MSC_VER >= 1900) +#define SIZE_T_FMT "%-8zu" +#else +#define SIZE_T_FMT "%-8lu" +#endif + +struct ValueInfo +{ + const char *name; + size_t value; + const char *comment; +}; + +int main(int argc, char **argv) { + const char *assembler = NULL; + for (int i = 0; i < argc; ++i) { + if (!strcmp(argv[i], "MASM")) { + assembler = "MASM"; + } else if (!strcmp(argv[i], "GAS")) { + assembler = "GAS"; + } + } + if (assembler == NULL) { + return 1; + } + + struct ValueInfo values[] = { + { .name = "VK_DEBUG_REPORT_ERROR_BIT_EXT", .value = (size_t) VK_DEBUG_REPORT_ERROR_BIT_EXT, + .comment = "The numerical value of the enum value 'VK_DEBUG_REPORT_ERROR_BIT_EXT'" }, + { .name = "PTR_SIZE", .value = sizeof(void*), + .comment = "The size of a pointer" }, + { .name = "HASH_SIZE", .value = sizeof(struct loader_dispatch_hash_entry), + .comment = "The size of a 'loader_dispatch_hash_entry' struct" }, + { .name = "HASH_OFFSET_INSTANCE", .value = offsetof(struct loader_instance, phys_dev_ext_disp_hash), + .comment = "The offset of 'phys_dev_ext_disp_hash' within a 'loader_instance' struct" }, + { .name = "PHYS_DEV_OFFSET_INST_DISPATCH", .value = offsetof(struct loader_instance_dispatch_table, phys_dev_ext), + .comment = "The offset of 'phys_dev_ext' within in 'loader_instance_dispatch_table' struct" }, + { .name = "PHYS_DEV_OFFSET_PHYS_DEV_TRAMP", .value = offsetof(struct loader_physical_device_tramp, phys_dev), + .comment = "The offset of 'phys_dev' within a 'loader_physical_device_tramp' struct" }, + { .name = "ICD_TERM_OFFSET_PHYS_DEV_TERM", .value = offsetof(struct loader_physical_device_term, this_icd_term), + .comment = "The offset of 'this_icd_term' within a 'loader_physical_device_term' struct" }, + { .name = "PHYS_DEV_OFFSET_PHYS_DEV_TERM", .value = offsetof(struct loader_physical_device_term, phys_dev), + .comment = "The offset of 'phys_dev' within a 'loader_physical_device_term' struct" }, + { .name = "INSTANCE_OFFSET_ICD_TERM", .value = offsetof(struct loader_icd_term, this_instance), + .comment = "The offset of 'this_instance' within a 'loader_icd_term' struct" }, + { .name = "DISPATCH_OFFSET_ICD_TERM", .value = offsetof(struct loader_icd_term, phys_dev_ext), + .comment = "The offset of 'phys_dev_ext' within a 'loader_icd_term' struct" }, + { .name = "FUNC_NAME_OFFSET_HASH", .value = offsetof(struct loader_dispatch_hash_entry, func_name), + .comment = "The offset of 'func_name' within a 'loader_dispatch_hash_entry' struct" }, + { .name = "EXT_OFFSET_DEVICE_DISPATCH", .value = offsetof(struct loader_dev_dispatch_table, ext_dispatch), + .comment = "The offset of 'ext_dispatch' within a 'loader_dev_dispatch_table' struct" }, + }; + + FILE *file = fopen("gen_defines.asm", "w"); + fprintf(file, "\n"); + if (!strcmp(assembler, "MASM")) { + for (size_t i = 0; i < sizeof(values)/sizeof(values[0]); ++i) { + fprintf(file, "%-32s equ " SIZE_T_FMT "; %s\n", values[i].name, values[i].value, values[i].comment); + } + } else if (!strcmp(assembler, "GAS")) { +#ifdef __x86_64__ + fprintf(file, ".set X86_64, 1\n"); +#endif // __x86_64__ + for (size_t i = 0; i < sizeof(values)/sizeof(values[0]); ++i) { + fprintf(file, ".set %-32s, " SIZE_T_FMT "# %s\n", values[i].name, values[i].value, values[i].comment); + } + } + return fclose(file); +} diff --git a/src/third_party/vulkan/loader/cJSON.c b/src/third_party/vulkan/loader/cJSON.c new file mode 100644 index 0000000..8da6d83 --- /dev/null +++ b/src/third_party/vulkan/loader/cJSON.c @@ -0,0 +1,1215 @@ +/* + Copyright (c) 2009 Dave Gamble + Copyright (c) 2015-2016 The Khronos Group Inc. + Copyright (c) 2015-2016 Valve Corporation + Copyright (c) 2015-2016 LunarG, Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +/* cJSON */ +/* JSON parser in C. */ + +#include +#include +#include +#include +#include +#include +#include +#include "cJSON.h" + +static const char *ep; + +const char *cJSON_GetErrorPtr(void) { return ep; } + +static void *(*cJSON_malloc)(size_t sz) = malloc; +static void (*cJSON_free)(void *ptr) = free; + +static char *cJSON_strdup(const char *str) { + size_t len; + char *copy; + + len = strlen(str) + 1; + if (!(copy = (char *)cJSON_malloc(len))) return 0; + memcpy(copy, str, len); + return copy; +} + +void cJSON_InitHooks(cJSON_Hooks *hooks) { + if (!hooks) { /* Reset hooks */ + cJSON_malloc = malloc; + cJSON_free = free; + return; + } + + cJSON_malloc = (hooks->malloc_fn) ? hooks->malloc_fn : malloc; + cJSON_free = (hooks->free_fn) ? hooks->free_fn : free; +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(void) { + cJSON *node = (cJSON *)cJSON_malloc(sizeof(cJSON)); + if (node) memset(node, 0, sizeof(cJSON)); + return node; +} + +/* Delete a cJSON structure. */ +void cJSON_Delete(cJSON *c) { + cJSON *next; + while (c) { + next = c->next; + if (!(c->type & cJSON_IsReference) && c->child) cJSON_Delete(c->child); + if (!(c->type & cJSON_IsReference) && c->valuestring) cJSON_free(c->valuestring); + if (!(c->type & cJSON_StringIsConst) && c->string) cJSON_free(c->string); + cJSON_free(c); + c = next; + } +} + +void cJSON_Free(void *p) { cJSON_free(p); } + +/* Parse the input text to generate a number, and populate the result into item. + */ +static const char *parse_number(cJSON *item, const char *num) { + double n = 0, sign = 1, scale = 0; + int subscale = 0, signsubscale = 1; + + if (*num == '-') sign = -1, num++; /* Has sign? */ + if (*num == '0') num++; /* is zero */ + if (*num >= '1' && *num <= '9') do + n = (n * 10.0) + (*num++ - '0'); + while (*num >= '0' && *num <= '9'); /* Number? */ + if (*num == '.' && num[1] >= '0' && num[1] <= '9') { + num++; + do + n = (n * 10.0) + (*num++ - '0'), scale--; + while (*num >= '0' && *num <= '9'); + } /* Fractional part? */ + if (*num == 'e' || *num == 'E') /* Exponent? */ + { + num++; + if (*num == '+') + num++; + else if (*num == '-') + signsubscale = -1, num++; /* With sign? */ + while (*num >= '0' && *num <= '9') subscale = (subscale * 10) + (*num++ - '0'); /* Number? */ + } + + n = sign * n * pow(10.0, (scale + subscale * signsubscale)); /* number = +/- + number.fraction * + 10^+/- exponent */ + + item->valuedouble = n; + item->valueint = (int)n; + item->type = cJSON_Number; + return num; +} + +static size_t pow2gt(size_t x) { + --x; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + return x + 1; +} + +typedef struct { + char *buffer; + size_t length; + size_t offset; +} printbuffer; + +static char *ensure(printbuffer *p, size_t needed) { + char *newbuffer; + size_t newsize; + if (!p || !p->buffer) return 0; + needed += p->offset; + if (needed <= p->length) return p->buffer + p->offset; + + newsize = pow2gt(needed); + newbuffer = (char *)cJSON_malloc(newsize); + if (!newbuffer) { + cJSON_free(p->buffer); + p->length = 0, p->buffer = 0; + return 0; + } + if (newbuffer) memcpy(newbuffer, p->buffer, p->length); + cJSON_free(p->buffer); + p->length = newsize; + p->buffer = newbuffer; + return newbuffer + p->offset; +} + +static size_t update(printbuffer *p) { + char *str; + if (!p || !p->buffer) return 0; + str = p->buffer + p->offset; + return p->offset + strlen(str); +} + +/* Render the number nicely from the given item into a string. */ +static char *print_number(cJSON *item, printbuffer *p) { + char *str = 0; + double d = item->valuedouble; + if (d == 0) { + if (p) + str = ensure(p, 2); + else + str = (char *)cJSON_malloc(2); /* special case for 0. */ + if (str) strcpy(str, "0"); + } else if (fabs(((double)item->valueint) - d) <= DBL_EPSILON && d <= INT_MAX && d >= INT_MIN) { + if (p) + str = ensure(p, 21); + else + str = (char *)cJSON_malloc(21); /* 2^64+1 can be represented in 21 chars. */ + if (str) sprintf(str, "%d", item->valueint); + } else { + if (p) + str = ensure(p, 64); + else + str = (char *)cJSON_malloc(64); /* This is a nice tradeoff. */ + if (str) { + if (fabs(floor(d) - d) <= DBL_EPSILON && fabs(d) < 1.0e60) + sprintf(str, "%.0f", d); + else if (fabs(d) < 1.0e-6 || fabs(d) > 1.0e9) + sprintf(str, "%e", d); + else + sprintf(str, "%f", d); + } + } + return str; +} + +static unsigned parse_hex4(const char *str) { + unsigned h = 0; + if (*str >= '0' && *str <= '9') + h += (*str) - '0'; + else if (*str >= 'A' && *str <= 'F') + h += 10 + (*str) - 'A'; + else if (*str >= 'a' && *str <= 'f') + h += 10 + (*str) - 'a'; + else + return 0; + h = h << 4; + str++; + if (*str >= '0' && *str <= '9') + h += (*str) - '0'; + else if (*str >= 'A' && *str <= 'F') + h += 10 + (*str) - 'A'; + else if (*str >= 'a' && *str <= 'f') + h += 10 + (*str) - 'a'; + else + return 0; + h = h << 4; + str++; + if (*str >= '0' && *str <= '9') + h += (*str) - '0'; + else if (*str >= 'A' && *str <= 'F') + h += 10 + (*str) - 'A'; + else if (*str >= 'a' && *str <= 'f') + h += 10 + (*str) - 'a'; + else + return 0; + h = h << 4; + str++; + if (*str >= '0' && *str <= '9') + h += (*str) - '0'; + else if (*str >= 'A' && *str <= 'F') + h += 10 + (*str) - 'A'; + else if (*str >= 'a' && *str <= 'f') + h += 10 + (*str) - 'a'; + else + return 0; + return h; +} + +/* Parse the input text into an unescaped cstring, and populate item. */ +static const unsigned char firstByteMark[7] = {0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC}; +static const char *parse_string(cJSON *item, const char *str) { + const char *ptr = str + 1; + char *ptr2; + char *out; + int len = 0; + unsigned uc, uc2; + if (*str != '\"') { + ep = str; + return 0; + } /* not a string! */ + + while (*ptr != '\"' && *ptr && ++len) + if (*ptr++ == '\\') ptr++; /* Skip escaped quotes. */ + + out = (char *)cJSON_malloc(len + 1); /* This is how long we need for the string, roughly. */ + if (!out) return 0; + + ptr = str + 1; + ptr2 = out; + while (*ptr != '\"' && *ptr) { + if (*ptr != '\\') + *ptr2++ = *ptr++; + else { + ptr++; + switch (*ptr) { + case 'b': + *ptr2++ = '\b'; + break; + case 'f': + *ptr2++ = '\f'; + break; + case 'n': + *ptr2++ = '\n'; + break; + case 'r': + *ptr2++ = '\r'; + break; + case 't': + *ptr2++ = '\t'; + break; + case 'u': /* transcode utf16 to utf8. */ + uc = parse_hex4(ptr + 1); + ptr += 4; /* get the unicode char. */ + + if ((uc >= 0xDC00 && uc <= 0xDFFF) || uc == 0) break; /* check for invalid. */ + + if (uc >= 0xD800 && uc <= 0xDBFF) /* UTF16 surrogate pairs. */ + { + if (ptr[1] != '\\' || ptr[2] != 'u') break; /* missing second-half of surrogate. */ + uc2 = parse_hex4(ptr + 3); + ptr += 6; + if (uc2 < 0xDC00 || uc2 > 0xDFFF) break; /* invalid second-half of surrogate. */ + uc = 0x10000 + (((uc & 0x3FF) << 10) | (uc2 & 0x3FF)); + } + + len = 4; + if (uc < 0x80) + len = 1; + else if (uc < 0x800) + len = 2; + else if (uc < 0x10000) + len = 3; + ptr2 += len; + + switch (len) { + case 4: + *--ptr2 = ((uc | 0x80) & 0xBF); + uc >>= 6; + // fall through + case 3: + *--ptr2 = ((uc | 0x80) & 0xBF); + uc >>= 6; + // fall through + case 2: + *--ptr2 = ((uc | 0x80) & 0xBF); + uc >>= 6; + // fall through + case 1: + *--ptr2 = ((unsigned char)uc | firstByteMark[len]); + } + ptr2 += len; + break; + default: + *ptr2++ = *ptr; + break; + } + ptr++; + } + } + *ptr2 = 0; + if (*ptr == '\"') ptr++; + item->valuestring = out; + item->type = cJSON_String; + return ptr; +} + +/* Render the cstring provided to an escaped version that can be printed. */ +static char *print_string_ptr(const char *str, printbuffer *p) { + const char *ptr; + char *ptr2; + char *out; + size_t len = 0, flag = 0; + unsigned char token; + + for (ptr = str; *ptr; ptr++) flag |= ((*ptr > 0 && *ptr < 32) || (*ptr == '\"') || (*ptr == '\\')) ? 1 : 0; + if (!flag) { + len = ptr - str; + if (p) + out = ensure(p, len + 3); + else + out = (char *)cJSON_malloc(len + 3); + if (!out) return 0; + ptr2 = out; + *ptr2++ = '\"'; + strcpy(ptr2, str); + ptr2[len] = '\"'; + ptr2[len + 1] = 0; + return out; + } + + if (!str) { + if (p) + out = ensure(p, 3); + else + out = (char *)cJSON_malloc(3); + if (!out) return 0; + strcpy(out, "\"\""); + return out; + } + ptr = str; + while ((token = *ptr) && ++len) { + if (strchr("\"\\\b\f\n\r\t", token)) + len++; + else if (token < 32) + len += 5; + ptr++; + } + + if (p) + out = ensure(p, len + 3); + else + out = (char *)cJSON_malloc(len + 3); + if (!out) return 0; + + ptr2 = out; + ptr = str; + *ptr2++ = '\"'; + while (*ptr) { + if ((unsigned char)*ptr > 31 && *ptr != '\"' && *ptr != '\\') + *ptr2++ = *ptr++; + else { + switch (token = *ptr++) { + case '\\': + *ptr2++ = '\\'; + break; + case '\"': + *ptr2++ = '\"'; + break; + case '\b': + *ptr2++ = '\b'; + break; + case '\f': + *ptr2++ = '\f'; + break; + case '\n': + *ptr2++ = '\n'; + break; + case '\r': + *ptr2++ = '\r'; + break; + case '\t': + *ptr2++ = '\t'; + break; + default: + sprintf(ptr2, "u%04x", token); + ptr2 += 5; + break; /* escape and print */ + } + } + } + *ptr2++ = '\"'; + *ptr2++ = 0; + return out; +} +/* Invote print_string_ptr (which is useful) on an item. */ +static char *print_string(cJSON *item, printbuffer *p) { return print_string_ptr(item->valuestring, p); } + +/* Predeclare these prototypes. */ +static const char *parse_value(cJSON *item, const char *value); +static char *print_value(cJSON *item, int depth, int fmt, printbuffer *p); +static const char *parse_array(cJSON *item, const char *value); +static char *print_array(cJSON *item, int depth, int fmt, printbuffer *p); +static const char *parse_object(cJSON *item, const char *value); +static char *print_object(cJSON *item, int depth, int fmt, printbuffer *p); + +/* Utility to jump whitespace and cr/lf */ +static const char *skip(const char *in) { + while (in && *in && (unsigned char)*in <= 32) in++; + return in; +} + +/* Parse an object - create a new root, and populate. */ +cJSON *cJSON_ParseWithOpts(const char *value, const char **return_parse_end, int require_null_terminated) { + const char *end = 0; + cJSON *c = cJSON_New_Item(); + ep = 0; + if (!c) return 0; /* memory fail */ + + end = parse_value(c, skip(value)); + if (!end) { + cJSON_Delete(c); + return 0; + } /* parse failure. ep is set. */ + + /* if we require null-terminated JSON without appended garbage, skip and + * then check for a null terminator */ + if (require_null_terminated) { + end = skip(end); + if (*end) { + cJSON_Delete(c); + ep = end; + return 0; + } + } + if (return_parse_end) *return_parse_end = end; + return c; +} +/* Default options for cJSON_Parse */ +cJSON *cJSON_Parse(const char *value) { return cJSON_ParseWithOpts(value, 0, 0); } + +/* Render a cJSON item/entity/structure to text. */ +char *cJSON_Print(cJSON *item) { return print_value(item, 0, 1, 0); } +char *cJSON_PrintUnformatted(cJSON *item) { return print_value(item, 0, 0, 0); } + +char *cJSON_PrintBuffered(cJSON *item, int prebuffer, int fmt) { + printbuffer p; + p.buffer = (char *)cJSON_malloc(prebuffer); + p.length = prebuffer; + p.offset = 0; + return print_value(item, 0, fmt, &p); +} + +/* Parser core - when encountering text, process appropriately. */ +static const char *parse_value(cJSON *item, const char *value) { + if (!value) return 0; /* Fail on null. */ + if (!strncmp(value, "null", 4)) { + item->type = cJSON_NULL; + return value + 4; + } + if (!strncmp(value, "false", 5)) { + item->type = cJSON_False; + return value + 5; + } + if (!strncmp(value, "true", 4)) { + item->type = cJSON_True; + item->valueint = 1; + return value + 4; + } + if (*value == '\"') { + return parse_string(item, value); + } + if (*value == '-' || (*value >= '0' && *value <= '9')) { + return parse_number(item, value); + } + if (*value == '[') { + return parse_array(item, value); + } + if (*value == '{') { + return parse_object(item, value); + } + + ep = value; + return 0; /* failure. */ +} + +/* Render a value to text. */ +static char *print_value(cJSON *item, int depth, int fmt, printbuffer *p) { + char *out = 0; + if (!item) return 0; + if (p) { + switch ((item->type) & 255) { + case cJSON_NULL: { + out = ensure(p, 5); + if (out) strcpy(out, "null"); + break; + } + case cJSON_False: { + out = ensure(p, 6); + if (out) strcpy(out, "false"); + break; + } + case cJSON_True: { + out = ensure(p, 5); + if (out) strcpy(out, "true"); + break; + } + case cJSON_Number: + out = print_number(item, p); + break; + case cJSON_String: + out = print_string(item, p); + break; + case cJSON_Array: + out = print_array(item, depth, fmt, p); + break; + case cJSON_Object: + out = print_object(item, depth, fmt, p); + break; + } + } else { + switch ((item->type) & 255) { + case cJSON_NULL: + out = cJSON_strdup("null"); + break; + case cJSON_False: + out = cJSON_strdup("false"); + break; + case cJSON_True: + out = cJSON_strdup("true"); + break; + case cJSON_Number: + out = print_number(item, 0); + break; + case cJSON_String: + out = print_string(item, 0); + break; + case cJSON_Array: + out = print_array(item, depth, fmt, 0); + break; + case cJSON_Object: + out = print_object(item, depth, fmt, 0); + break; + } + } + return out; +} + +/* Build an array from input text. */ +static const char *parse_array(cJSON *item, const char *value) { + cJSON *child; + if (*value != '[') { + ep = value; + return 0; + } /* not an array! */ + + item->type = cJSON_Array; + value = skip(value + 1); + if (*value == ']') return value + 1; /* empty array. */ + + item->child = child = cJSON_New_Item(); + if (!item->child) return 0; /* memory fail */ + value = skip(parse_value(child, skip(value))); /* skip any spacing, get the value. */ + if (!value) return 0; + + while (*value == ',') { + cJSON *new_item; + if (!(new_item = cJSON_New_Item())) return 0; /* memory fail */ + child->next = new_item; + new_item->prev = child; + child = new_item; + value = skip(parse_value(child, skip(value + 1))); + if (!value) return 0; /* memory fail */ + } + + if (*value == ']') return value + 1; /* end of array */ + ep = value; + return 0; /* malformed. */ +} + +/* Render an array to text */ +static char *print_array(cJSON *item, int depth, int fmt, printbuffer *p) { + char **entries; + char *out = 0, *ptr, *ret; + size_t len = 5; + cJSON *child = item->child; + int numentries = 0, fail = 0, j = 0; + size_t tmplen = 0, i = 0; + + /* How many entries in the array? */ + while (child) numentries++, child = child->next; + /* Explicitly handle numentries==0 */ + if (!numentries) { + if (p) + out = ensure(p, 3); + else + out = (char *)cJSON_malloc(3); + if (out) strcpy(out, "[]"); + return out; + } + + if (p) { + /* Compose the output array. */ + i = p->offset; + ptr = ensure(p, 1); + if (!ptr) return 0; + *ptr = '['; + p->offset++; + child = item->child; + while (child && !fail) { + print_value(child, depth + 1, fmt, p); + p->offset = update(p); + if (child->next) { + len = fmt ? 2 : 1; + ptr = ensure(p, len + 1); + if (!ptr) return 0; + *ptr++ = ','; + if (fmt) *ptr++ = ' '; + *ptr = 0; + p->offset += len; + } + child = child->next; + } + ptr = ensure(p, 2); + if (!ptr) return 0; + *ptr++ = ']'; + *ptr = 0; + out = (p->buffer) + i; + } else { + /* Allocate an array to hold the values for each */ + entries = (char **)cJSON_malloc(numentries * sizeof(char *)); + if (!entries) return 0; + memset(entries, 0, numentries * sizeof(char *)); + /* Retrieve all the results: */ + child = item->child; + while (child && !fail) { + ret = print_value(child, depth + 1, fmt, 0); + entries[i++] = ret; + if (ret) + len += strlen(ret) + 2 + (fmt ? 1 : 0); + else + fail = 1; + child = child->next; + } + + /* If we didn't fail, try to malloc the output string */ + if (!fail) out = (char *)cJSON_malloc(len); + /* If that fails, we fail. */ + if (!out) fail = 1; + + /* Handle failure. */ + if (fail) { + for (j = 0; j < numentries; j++) + if (entries[j]) cJSON_free(entries[j]); + cJSON_free(entries); + return 0; + } + + /* Compose the output array. */ + *out = '['; + ptr = out + 1; + *ptr = 0; + for (j = 0; j < numentries; j++) { + tmplen = strlen(entries[j]); + memcpy(ptr, entries[j], tmplen); + ptr += tmplen; + if (j != numentries - 1) { + *ptr++ = ','; + if (fmt) *ptr++ = ' '; + *ptr = 0; + } + cJSON_free(entries[j]); + } + cJSON_free(entries); + *ptr++ = ']'; + *ptr++ = 0; + } + return out; +} + +/* Build an object from the text. */ +static const char *parse_object(cJSON *item, const char *value) { + cJSON *child; + if (*value != '{') { + ep = value; + return 0; + } /* not an object! */ + + item->type = cJSON_Object; + value = skip(value + 1); + if (*value == '}') return value + 1; /* empty array. */ + + item->child = child = cJSON_New_Item(); + if (!item->child) return 0; + value = skip(parse_string(child, skip(value))); + if (!value) return 0; + child->string = child->valuestring; + child->valuestring = 0; + if (*value != ':') { + ep = value; + return 0; + } /* fail! */ + value = skip(parse_value(child, skip(value + 1))); /* skip any spacing, get the value. */ + if (!value) return 0; + + while (*value == ',') { + cJSON *new_item; + if (!(new_item = cJSON_New_Item())) return 0; /* memory fail */ + child->next = new_item; + new_item->prev = child; + child = new_item; + value = skip(parse_string(child, skip(value + 1))); + if (!value) return 0; + child->string = child->valuestring; + child->valuestring = 0; + if (*value != ':') { + ep = value; + return 0; + } /* fail! */ + value = skip(parse_value(child, skip(value + 1))); /* skip any spacing, get the value. */ + if (!value) return 0; + } + + if (*value == '}') return value + 1; /* end of array */ + ep = value; + return 0; /* malformed. */ +} + +/* Render an object to text. */ +static char *print_object(cJSON *item, int depth, int fmt, printbuffer *p) { + char **entries = 0, **names = 0; + char *out = 0, *ptr, *ret, *str; + int j; + cJSON *child = item->child; + int numentries = 0, fail = 0, k; + size_t tmplen = 0, i = 0, len = 7; + /* Count the number of entries. */ + while (child) numentries++, child = child->next; + /* Explicitly handle empty object case */ + if (!numentries) { + if (p) + out = ensure(p, fmt ? depth + 4 : 3); + else + out = (char *)cJSON_malloc(fmt ? depth + 4 : 3); + if (!out) return 0; + ptr = out; + *ptr++ = '{'; + if (fmt) { + *ptr++ = '\n'; + for (j = 0; j < depth - 1; j++) *ptr++ = '\t'; + } + *ptr++ = '}'; + *ptr++ = 0; + return out; + } + if (p) { + /* Compose the output: */ + i = p->offset; + len = fmt ? 2 : 1; + ptr = ensure(p, len + 1); + if (!ptr) return 0; + *ptr++ = '{'; + if (fmt) *ptr++ = '\n'; + *ptr = 0; + p->offset += len; + child = item->child; + depth++; + while (child) { + if (fmt) { + ptr = ensure(p, depth); + if (!ptr) return 0; + for (j = 0; j < depth; j++) *ptr++ = '\t'; + p->offset += depth; + } + print_string_ptr(child->string, p); + p->offset = update(p); + + len = fmt ? 2 : 1; + ptr = ensure(p, len); + if (!ptr) return 0; + *ptr++ = ':'; + if (fmt) *ptr++ = '\t'; + p->offset += len; + + print_value(child, depth, fmt, p); + p->offset = update(p); + + len = (fmt ? 1 : 0) + (child->next ? 1 : 0); + ptr = ensure(p, len + 1); + if (!ptr) return 0; + if (child->next) *ptr++ = ','; + if (fmt) *ptr++ = '\n'; + *ptr = 0; + p->offset += len; + child = child->next; + } + ptr = ensure(p, fmt ? (depth + 1) : 2); + if (!ptr) return 0; + if (fmt) + for (j = 0; j < depth - 1; j++) *ptr++ = '\t'; + *ptr++ = '}'; + *ptr = 0; + out = (p->buffer) + i; + } else { + /* Allocate space for the names and the objects */ + entries = (char **)cJSON_malloc(numentries * sizeof(char *)); + if (!entries) return 0; + names = (char **)cJSON_malloc(numentries * sizeof(char *)); + if (!names) { + cJSON_free(entries); + return 0; + } + memset(entries, 0, sizeof(char *) * numentries); + memset(names, 0, sizeof(char *) * numentries); + + /* Collect all the results into our arrays: */ + child = item->child; + depth++; + if (fmt) len += depth; + while (child) { + names[i] = str = print_string_ptr(child->string, 0); + entries[i++] = ret = print_value(child, depth, fmt, 0); + if (str && ret) + len += strlen(ret) + strlen(str) + 2 + (fmt ? 2 + depth : 0); + else + fail = 1; + child = child->next; + } + + /* Try to allocate the output string */ + if (!fail) out = (char *)cJSON_malloc(len); + if (!out) fail = 1; + + /* Handle failure */ + if (fail) { + for (j = 0; j < numentries; j++) { + if (names[i]) cJSON_free(names[j]); + if (entries[j]) cJSON_free(entries[j]); + } + cJSON_free(names); + cJSON_free(entries); + return 0; + } + + /* Compose the output: */ + *out = '{'; + ptr = out + 1; + if (fmt) *ptr++ = '\n'; + *ptr = 0; + for (j = 0; j < numentries; j++) { + if (fmt) + for (k = 0; k < depth; k++) *ptr++ = '\t'; + tmplen = strlen(names[j]); + memcpy(ptr, names[j], tmplen); + ptr += tmplen; + *ptr++ = ':'; + if (fmt) *ptr++ = '\t'; + strcpy(ptr, entries[j]); + ptr += strlen(entries[j]); + if (j != numentries - 1) *ptr++ = ','; + if (fmt) *ptr++ = '\n'; + *ptr = 0; + cJSON_free(names[j]); + cJSON_free(entries[j]); + } + + cJSON_free(names); + cJSON_free(entries); + if (fmt) + for (j = 0; j < depth - 1; j++) *ptr++ = '\t'; + *ptr++ = '}'; + *ptr++ = 0; + } + return out; +} + +/* Get Array size/item / object item. */ +int cJSON_GetArraySize(cJSON *array) { + cJSON *c = array->child; + int i = 0; + while (c) i++, c = c->next; + return i; +} +cJSON *cJSON_GetArrayItem(cJSON *array, int item) { + cJSON *c = array->child; + while (c && item > 0) item--, c = c->next; + return c; +} +cJSON *cJSON_GetObjectItem(cJSON *object, const char *string) { + cJSON *c = object->child; + while (c && strcmp(c->string, string)) c = c->next; + return c; +} + +/* Utility for array list handling. */ +static void suffix_object(cJSON *prev, cJSON *item) { + prev->next = item; + item->prev = prev; +} +/* Utility for handling references. */ +static cJSON *create_reference(cJSON *item) { + cJSON *ref = cJSON_New_Item(); + if (!ref) return 0; + memcpy(ref, item, sizeof(cJSON)); + ref->string = 0; + ref->type |= cJSON_IsReference; + ref->next = ref->prev = 0; + return ref; +} + +/* Add item to array/object. */ +void cJSON_AddItemToArray(cJSON *array, cJSON *item) { + cJSON *c = array->child; + if (!item) return; + if (!c) { + array->child = item; + } else { + while (c && c->next) c = c->next; + suffix_object(c, item); + } +} +void cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) { + if (!item) return; + if (item->string) cJSON_free(item->string); + item->string = cJSON_strdup(string); + cJSON_AddItemToArray(object, item); +} +void cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) { + if (!item) return; + if (!(item->type & cJSON_StringIsConst) && item->string) cJSON_free(item->string); + item->string = (char *)string; + item->type |= cJSON_StringIsConst; + cJSON_AddItemToArray(object, item); +} +void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) { cJSON_AddItemToArray(array, create_reference(item)); } +void cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) { + cJSON_AddItemToObject(object, string, create_reference(item)); +} + +cJSON *cJSON_DetachItemFromArray(cJSON *array, int which) { + cJSON *c = array->child; + while (c && which > 0) c = c->next, which--; + if (!c) return 0; + if (c->prev) c->prev->next = c->next; + if (c->next) c->next->prev = c->prev; + if (c == array->child) array->child = c->next; + c->prev = c->next = 0; + return c; +} +void cJSON_DeleteItemFromArray(cJSON *array, int which) { cJSON_Delete(cJSON_DetachItemFromArray(array, which)); } +cJSON *cJSON_DetachItemFromObject(cJSON *object, const char *string) { + int i = 0; + cJSON *c = object->child; + while (c && strcmp(c->string, string)) i++, c = c->next; + if (c) return cJSON_DetachItemFromArray(object, i); + return 0; +} +void cJSON_DeleteItemFromObject(cJSON *object, const char *string) { cJSON_Delete(cJSON_DetachItemFromObject(object, string)); } + +/* Replace array/object items with new ones. */ +void cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) { + cJSON *c = array->child; + while (c && which > 0) c = c->next, which--; + if (!c) { + cJSON_AddItemToArray(array, newitem); + return; + } + newitem->next = c; + newitem->prev = c->prev; + c->prev = newitem; + if (c == array->child) + array->child = newitem; + else + newitem->prev->next = newitem; +} +void cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) { + cJSON *c = array->child; + while (c && which > 0) c = c->next, which--; + if (!c) return; + newitem->next = c->next; + newitem->prev = c->prev; + if (newitem->next) newitem->next->prev = newitem; + if (c == array->child) + array->child = newitem; + else + newitem->prev->next = newitem; + c->next = c->prev = 0; + cJSON_Delete(c); +} +void cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) { + int i = 0; + cJSON *c = object->child; + while (c && strcmp(c->string, string)) i++, c = c->next; + if (c) { + newitem->string = cJSON_strdup(string); + cJSON_ReplaceItemInArray(object, i, newitem); + } +} + +/* Create basic types: */ +cJSON *cJSON_CreateNull(void) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = cJSON_NULL; + return item; +} +cJSON *cJSON_CreateTrue(void) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = cJSON_True; + return item; +} +cJSON *cJSON_CreateFalse(void) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = cJSON_False; + return item; +} +cJSON *cJSON_CreateBool(int b) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = b ? cJSON_True : cJSON_False; + return item; +} +cJSON *cJSON_CreateNumber(double num) { + cJSON *item = cJSON_New_Item(); + if (item) { + item->type = cJSON_Number; + item->valuedouble = num; + item->valueint = (int)num; + } + return item; +} +cJSON *cJSON_CreateString(const char *string) { + cJSON *item = cJSON_New_Item(); + if (item) { + item->type = cJSON_String; + item->valuestring = cJSON_strdup(string); + } + return item; +} +cJSON *cJSON_CreateArray(void) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = cJSON_Array; + return item; +} +cJSON *cJSON_CreateObject(void) { + cJSON *item = cJSON_New_Item(); + if (item) item->type = cJSON_Object; + return item; +} + +/* Create Arrays: */ +cJSON *cJSON_CreateIntArray(const int *numbers, int count) { + int i; + cJSON *n = 0, *p = 0, *a = cJSON_CreateArray(); + for (i = 0; a && i < count; i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!i) + a->child = n; + else + suffix_object(p, n); + p = n; + } + return a; +} +cJSON *cJSON_CreateFloatArray(const float *numbers, int count) { + int i; + cJSON *n = 0, *p = 0, *a = cJSON_CreateArray(); + for (i = 0; a && i < count; i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!i) + a->child = n; + else + suffix_object(p, n); + p = n; + } + return a; +} +cJSON *cJSON_CreateDoubleArray(const double *numbers, int count) { + int i; + cJSON *n = 0, *p = 0, *a = cJSON_CreateArray(); + for (i = 0; a && i < count; i++) { + n = cJSON_CreateNumber(numbers[i]); + if (!i) + a->child = n; + else + suffix_object(p, n); + p = n; + } + return a; +} +cJSON *cJSON_CreateStringArray(const char **strings, int count) { + int i; + cJSON *n = 0, *p = 0, *a = cJSON_CreateArray(); + for (i = 0; a && i < count; i++) { + n = cJSON_CreateString(strings[i]); + if (!i) + a->child = n; + else + suffix_object(p, n); + p = n; + } + return a; +} + +/* Duplication */ +cJSON *cJSON_Duplicate(cJSON *item, int recurse) { + cJSON *newitem, *cptr, *nptr = 0, *newchild; + /* Bail on bad ptr */ + if (!item) return 0; + /* Create new item */ + newitem = cJSON_New_Item(); + if (!newitem) return 0; + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference), newitem->valueint = item->valueint, newitem->valuedouble = item->valuedouble; + if (item->valuestring) { + newitem->valuestring = cJSON_strdup(item->valuestring); + if (!newitem->valuestring) { + cJSON_Delete(newitem); + return 0; + } + } + if (item->string) { + newitem->string = cJSON_strdup(item->string); + if (!newitem->string) { + cJSON_Delete(newitem); + return 0; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) return newitem; + /* Walk the ->next chain for the child. */ + cptr = item->child; + while (cptr) { + newchild = cJSON_Duplicate(cptr, 1); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) { + cJSON_Delete(newitem); + return 0; + } + if (nptr) { + nptr->next = newchild, newchild->prev = nptr; + nptr = newchild; + } /* If newitem->child already set, then crosswire ->prev and ->next and + move on */ + else { + newitem->child = newchild; + nptr = newchild; + } /* Set newitem->child and move to it */ + cptr = cptr->next; + } + return newitem; +} + +void cJSON_Minify(char *json) { + char *into = json; + while (*json) { + if (*json == ' ') + json++; + else if (*json == '\t') + json++; /* Whitespace characters. */ + else if (*json == '\r') + json++; + else if (*json == '\n') + json++; + else if (*json == '/' && json[1] == '/') + while (*json && *json != '\n') json++; /* double-slash comments, to end of line. */ + else if (*json == '/' && json[1] == '*') { + while (*json && !(*json == '*' && json[1] == '/')) json++; + json += 2; + } /* multiline comments. */ + else if (*json == '\"') { + *into++ = *json++; + while (*json && *json != '\"') { + if (*json == '\\') *into++ = *json++; + *into++ = *json++; + } + *into++ = *json++; + } /* string literals, which are \" sensitive. */ + else + *into++ = *json++; /* All other characters. */ + } + *into = 0; /* and null-terminate. */ +} diff --git a/src/third_party/vulkan/loader/cJSON.h b/src/third_party/vulkan/loader/cJSON.h new file mode 100644 index 0000000..f0059ab --- /dev/null +++ b/src/third_party/vulkan/loader/cJSON.h @@ -0,0 +1,174 @@ +/* + Copyright (c) 2009 Dave Gamble + Copyright (c) 2015-2016 The Khronos Group Inc. + Copyright (c) 2015-2016 Valve Corporation + Copyright (c) 2015-2016 LunarG, Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. +*/ + +#ifndef cJSON__h +#define cJSON__h + +#ifdef __cplusplus +extern "C" { +#endif + +/* cJSON Types: */ +#define cJSON_False 0 +#define cJSON_True 1 +#define cJSON_NULL 2 +#define cJSON_Number 3 +#define cJSON_String 4 +#define cJSON_Array 5 +#define cJSON_Object 6 + +#define cJSON_IsReference 256 +#define cJSON_StringIsConst 512 + +/* The cJSON structure: */ +typedef struct cJSON { + struct cJSON *next, *prev; /* next/prev allow you to walk array/object + chains. Alternatively, use + GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *child; /* An array or object item will have a child pointer + pointing to a chain of the items in the + array/object. */ + + int type; /* The type of the item, as above. */ + + char *valuestring; /* The item's string, if type==cJSON_String */ + int valueint; /* The item's number, if type==cJSON_Number */ + double valuedouble; /* The item's number, if type==cJSON_Number */ + + char *string; /* The item's name string, if this item is the child of, or is + in the list of subitems of an object. */ +} cJSON; + +typedef struct cJSON_Hooks { + void *(*malloc_fn)(size_t sz); + void (*free_fn)(void *ptr); +} cJSON_Hooks; + +/* Supply malloc, realloc and free functions to cJSON */ +extern void cJSON_InitHooks(cJSON_Hooks *hooks); + +/* Supply a block of JSON, and this returns a cJSON object you can interrogate. + * Call cJSON_Delete when finished. */ +extern cJSON *cJSON_Parse(const char *value); +/* Render a cJSON entity to text for transfer/storage. Free the char* when + * finished. */ +extern char *cJSON_Print(cJSON *item); +/* Render a cJSON entity to text for transfer/storage without any formatting. + * Free the char* when finished. */ +extern char *cJSON_PrintUnformatted(cJSON *item); +/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess + * at the final size. guessing well reduces reallocation. fmt=0 gives + * unformatted, =1 gives formatted */ +extern char *cJSON_PrintBuffered(cJSON *item, int prebuffer, int fmt); +/* Delete a cJSON entity and all subentities. */ +extern void cJSON_Delete(cJSON *c); +/* Delete an item allocated inside the JSON parser*/ +extern void cJSON_Free(void *p); + +/* Returns the number of items in an array (or object). */ +extern int cJSON_GetArraySize(cJSON *array); +/* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. + */ +extern cJSON *cJSON_GetArrayItem(cJSON *array, int item); +/* Get item "string" from object. Case insensitive. */ +extern cJSON *cJSON_GetObjectItem(cJSON *object, const char *string); + +/* For analysing failed parses. This returns a pointer to the parse error. + * You'll probably need to look a few chars back to make sense of it. Defined + * when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ +extern const char *cJSON_GetErrorPtr(void); + +/* These calls create a cJSON item of the appropriate type. */ +extern cJSON *cJSON_CreateNull(void); +extern cJSON *cJSON_CreateTrue(void); +extern cJSON *cJSON_CreateFalse(void); +extern cJSON *cJSON_CreateBool(int b); +extern cJSON *cJSON_CreateNumber(double num); +extern cJSON *cJSON_CreateString(const char *string); +extern cJSON *cJSON_CreateArray(void); +extern cJSON *cJSON_CreateObject(void); + +/* These utilities create an Array of count items. */ +extern cJSON *cJSON_CreateIntArray(const int *numbers, int count); +extern cJSON *cJSON_CreateFloatArray(const float *numbers, int count); +extern cJSON *cJSON_CreateDoubleArray(const double *numbers, int count); +extern cJSON *cJSON_CreateStringArray(const char **strings, int count); + +/* Append item to the specified array/object. */ +extern void cJSON_AddItemToArray(cJSON *array, cJSON *item); +extern void cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); +extern void cJSON_AddItemToObjectCS(cJSON *object, const char *string, + cJSON *item); /* Use this when string is definitely const (i.e. a literal, + or as good as), and will definitely survive the cJSON + object */ +/* Append reference to item to the specified array/object. Use this when you + * want to add an existing cJSON to a new cJSON, but don't want to corrupt your + * existing cJSON. */ +extern void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); +extern void cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + +/* Remove/Detatch items from Arrays/Objects. */ +extern cJSON *cJSON_DetachItemFromArray(cJSON *array, int which); +extern void cJSON_DeleteItemFromArray(cJSON *array, int which); +extern cJSON *cJSON_DetachItemFromObject(cJSON *object, const char *string); +extern void cJSON_DeleteItemFromObject(cJSON *object, const char *string); + +/* Update array items. */ +extern void cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ +extern void cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); +extern void cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem); + +/* Duplicate a cJSON item */ +extern cJSON *cJSON_Duplicate(cJSON *item, int recurse); +/* Duplicate will create a new, identical cJSON item to the one you pass, in new +memory that will +need to be released. With recurse!=0, it will duplicate any children connected +to the item. +The item->next and ->prev pointers are always zero on return from Duplicate. */ + +/* ParseWithOpts allows you to require (and check) that the JSON is null + * terminated, and to retrieve the pointer to the final byte parsed. */ +extern cJSON *cJSON_ParseWithOpts(const char *value, const char **return_parse_end, int require_null_terminated); + +extern void cJSON_Minify(char *json); + +/* Macros for creating things quickly. */ +#define cJSON_AddNullToObject(object, name) cJSON_AddItemToObject(object, name, cJSON_CreateNull()) +#define cJSON_AddTrueToObject(object, name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue()) +#define cJSON_AddFalseToObject(object, name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse()) +#define cJSON_AddBoolToObject(object, name, b) cJSON_AddItemToObject(object, name, cJSON_CreateBool(b)) +#define cJSON_AddNumberToObject(object, name, n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n)) +#define cJSON_AddStringToObject(object, name, s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s)) + +/* When assigning an integer value, it needs to be propagated to valuedouble + * too. */ +#define cJSON_SetIntValue(object, val) ((object) ? (object)->valueint = (object)->valuedouble = (val) : (val)) +#define cJSON_SetNumberValue(object, val) ((object) ? (object)->valueint = (object)->valuedouble = (val) : (val)) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/loader/debug_utils.c b/src/third_party/vulkan/loader/debug_utils.c new file mode 100644 index 0000000..fa27985 --- /dev/null +++ b/src/third_party/vulkan/loader/debug_utils.c @@ -0,0 +1,996 @@ +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * Copyright (C) 2015-2016 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Courtney Goeltzenleuchter + * Author: Jon Ashburn + * Author: Mark Young + * + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#ifndef WIN32 +#include +#else +#endif +#include "vk_loader_platform.h" +#include "debug_utils.h" +#include "../vk_layer.h" +#include "vk_object_types.h" + +// VK_EXT_debug_report related items + +VkResult util_CreateDebugUtilsMessenger(struct loader_instance *inst, const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerEXT messenger) { + VkLayerDbgFunctionNode *pNewDbgFuncNode = NULL; + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(VkLayerDbgFunctionNode), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } else { +#endif + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)loader_instance_heap_alloc(inst, sizeof(VkLayerDbgFunctionNode), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + if (!pNewDbgFuncNode) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memset(pNewDbgFuncNode, 0, sizeof(VkLayerDbgFunctionNode)); + + pNewDbgFuncNode->is_messenger = true; + pNewDbgFuncNode->messenger.messenger = messenger; + pNewDbgFuncNode->messenger.pfnUserCallback = pCreateInfo->pfnUserCallback; + pNewDbgFuncNode->messenger.messageSeverity = pCreateInfo->messageSeverity; + pNewDbgFuncNode->messenger.messageType = pCreateInfo->messageType; + pNewDbgFuncNode->pUserData = pCreateInfo->pUserData; + pNewDbgFuncNode->pNext = inst->DbgFunctionHead; + inst->DbgFunctionHead = pNewDbgFuncNode; + + return VK_SUCCESS; +} + +static VKAPI_ATTR VkResult VKAPI_CALL +debug_utils_CreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerEXT *pMessenger) { + struct loader_instance *inst = loader_get_instance(instance); + loader_platform_thread_lock_mutex(&loader_lock); + VkResult result = inst->disp->layer_inst_disp.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger); + loader_platform_thread_unlock_mutex(&loader_lock); + return result; +} + +VkBool32 util_SubmitDebugUtilsMessageEXT(const struct loader_instance *inst, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) { + VkBool32 bail = false; + + if (NULL != pCallbackData) { + VkLayerDbgFunctionNode *pTrav = inst->DbgFunctionHead; + VkDebugReportObjectTypeEXT object_type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; + VkDebugReportFlagsEXT object_flags = 0; + uint64_t object_handle = 0; + + debug_utils_AnnotFlagsToReportFlags(messageSeverity, messageTypes, &object_flags); + if (0 < pCallbackData->objectCount) { + debug_utils_AnnotObjectToDebugReportObject(pCallbackData->pObjects, &object_type, &object_handle); + } + + while (pTrav) { + if (pTrav->is_messenger && (pTrav->messenger.messageSeverity & messageSeverity) && + (pTrav->messenger.messageType & messageTypes)) { + if (pTrav->messenger.pfnUserCallback(messageSeverity, messageTypes, pCallbackData, pTrav->pUserData)) { + bail = true; + } + } + if (!pTrav->is_messenger && pTrav->report.msgFlags & object_flags) { + if (pTrav->report.pfnMsgCallback(object_flags, object_type, object_handle, 0, pCallbackData->messageIdNumber, + pCallbackData->pMessageIdName, pCallbackData->pMessage, pTrav->pUserData)) { + bail = true; + } + } + + pTrav = pTrav->pNext; + } + } + + return bail; +} + +void util_DestroyDebugUtilsMessenger(struct loader_instance *inst, VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks *pAllocator) { + VkLayerDbgFunctionNode *pTrav = inst->DbgFunctionHead; + VkLayerDbgFunctionNode *pPrev = pTrav; + + while (pTrav) { + if (pTrav->is_messenger && pTrav->messenger.messenger == messenger) { + pPrev->pNext = pTrav->pNext; + if (inst->DbgFunctionHead == pTrav) inst->DbgFunctionHead = pTrav->pNext; +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, pTrav); + } else { +#endif + loader_instance_heap_free(inst, pTrav); + } + break; + } + pPrev = pTrav; + pTrav = pTrav->pNext; + } +} + +// This utility (used by vkInstanceCreateInfo(), looks at a pNext chain. It +// counts any VkDebugUtilsMessengerCreateInfoEXT structs that it finds. It +// then allocates array that can hold that many structs, as well as that many +// VkDebugUtilsMessengerEXT handles. It then copies each +// VkDebugUtilsMessengerCreateInfoEXT, and initializes each handle. +VkResult util_CopyDebugUtilsMessengerCreateInfos(const void *pChain, const VkAllocationCallbacks *pAllocator, + uint32_t *num_messengers, VkDebugUtilsMessengerCreateInfoEXT **infos, + VkDebugUtilsMessengerEXT **messengers) { + uint32_t n = *num_messengers = 0; + VkDebugUtilsMessengerCreateInfoEXT *pInfos = NULL; + VkDebugUtilsMessengerEXT *pMessengers = NULL; + + const void *pNext = pChain; + while (pNext) { + // 1st, count the number VkDebugUtilsMessengerCreateInfoEXT: + if (((VkDebugUtilsMessengerCreateInfoEXT *)pNext)->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT) { + n++; + } + pNext = (void *)((VkDebugUtilsMessengerCreateInfoEXT *)pNext)->pNext; + } + if (n == 0) { + return VK_SUCCESS; + } + +// 2nd, allocate memory for each VkDebugUtilsMessengerCreateInfoEXT: +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pInfos = *infos = ((VkDebugUtilsMessengerCreateInfoEXT *)pAllocator->pfnAllocation( + pAllocator->pUserData, n * sizeof(VkDebugUtilsMessengerCreateInfoEXT), sizeof(void *), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + } else { +#endif + pInfos = *infos = ((VkDebugUtilsMessengerCreateInfoEXT *)malloc(n * sizeof(VkDebugUtilsMessengerCreateInfoEXT))); + } + if (!pInfos) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } +// 3rd, allocate memory for a unique handle for each callback: +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pMessengers = *messengers = ((VkDebugUtilsMessengerEXT *)pAllocator->pfnAllocation( + pAllocator->pUserData, n * sizeof(VkDebugUtilsMessengerEXT), sizeof(void *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + if (NULL == pMessengers) { + pAllocator->pfnFree(pAllocator->pUserData, pInfos); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } else { +#endif + pMessengers = *messengers = ((VkDebugUtilsMessengerEXT *)malloc(n * sizeof(VkDebugUtilsMessengerEXT))); + if (NULL == pMessengers) { + free(pInfos); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } + // 4th, copy each VkDebugUtilsMessengerCreateInfoEXT for use by + // vkDestroyInstance, and assign a unique handle to each messenger (just + // use the address of the copied VkDebugUtilsMessengerCreateInfoEXT): + pNext = pChain; + while (pNext) { + if (((VkInstanceCreateInfo *)pNext)->sType == VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT) { + memcpy(pInfos, pNext, sizeof(VkDebugUtilsMessengerCreateInfoEXT)); + *pMessengers++ = (VkDebugUtilsMessengerEXT)(uintptr_t)pInfos++; + } + pNext = (void *)((VkInstanceCreateInfo *)pNext)->pNext; + } + + *num_messengers = n; + return VK_SUCCESS; +} + +void util_FreeDebugUtilsMessengerCreateInfos(const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerCreateInfoEXT *infos, + VkDebugUtilsMessengerEXT *messengers) { +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, infos); + pAllocator->pfnFree(pAllocator->pUserData, messengers); + } else { +#endif + free(infos); + free(messengers); + } +} + +VkResult util_CreateDebugUtilsMessengers(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_messengers, VkDebugUtilsMessengerCreateInfoEXT *infos, + VkDebugUtilsMessengerEXT *messengers) { + VkResult rtn = VK_SUCCESS; + for (uint32_t i = 0; i < num_messengers; i++) { + rtn = util_CreateDebugUtilsMessenger(inst, &infos[i], pAllocator, messengers[i]); + if (rtn != VK_SUCCESS) { + for (uint32_t j = 0; j < i; j++) { + util_DestroyDebugUtilsMessenger(inst, messengers[j], pAllocator); + } + return rtn; + } + } + return rtn; +} + +void util_DestroyDebugUtilsMessengers(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_messengers, VkDebugUtilsMessengerEXT *messengers) { + for (uint32_t i = 0; i < num_messengers; i++) { + util_DestroyDebugUtilsMessenger(inst, messengers[i], pAllocator); + } +} + +static VKAPI_ATTR void VKAPI_CALL debug_utils_SubmitDebugUtilsMessageEXT( + VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) { + struct loader_instance *inst = loader_get_instance(instance); + + inst->disp->layer_inst_disp.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData); +} + +static VKAPI_ATTR void VKAPI_CALL debug_utils_DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks *pAllocator) { + struct loader_instance *inst = loader_get_instance(instance); + loader_platform_thread_lock_mutex(&loader_lock); + + inst->disp->layer_inst_disp.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator); + + loader_platform_thread_unlock_mutex(&loader_lock); +} + +// This is the instance chain terminator function for CreateDebugUtilsMessenger +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDebugUtilsMessengerEXT(VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDebugUtilsMessengerEXT *pMessenger) { + VkDebugUtilsMessengerEXT *icd_info = NULL; + const struct loader_icd_term *icd_term; + struct loader_instance *inst = (struct loader_instance *)instance; + VkResult res = VK_SUCCESS; + uint32_t storage_idx; + VkLayerDbgFunctionNode *pNewDbgFuncNode = NULL; + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + icd_info = ((VkDebugUtilsMessengerEXT *)pAllocator->pfnAllocation(pAllocator->pUserData, + inst->total_icd_count * sizeof(VkDebugUtilsMessengerEXT), + sizeof(void *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + if (icd_info) { + memset(icd_info, 0, inst->total_icd_count * sizeof(VkDebugUtilsMessengerEXT)); + } + } else { +#endif + icd_info = calloc(sizeof(VkDebugUtilsMessengerEXT), inst->total_icd_count); + } + if (!icd_info) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (!icd_term->dispatch.CreateDebugUtilsMessengerEXT) { + continue; + } + + res = icd_term->dispatch.CreateDebugUtilsMessengerEXT(icd_term->instance, pCreateInfo, pAllocator, &icd_info[storage_idx]); + + if (res != VK_SUCCESS) { + goto out; + } + storage_idx++; + } + +// Setup the debug report callback in the terminator since a layer may want +// to grab the information itself (RenderDoc) and then return back to the +// user callback a sub-set of the messages. +#if (DEBUG_DISABLE_APP_ALLOCATORS == 0) + if (pAllocator != NULL) { + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(VkLayerDbgFunctionNode), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } else { +#else + { +#endif + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)loader_instance_heap_alloc(inst, sizeof(VkLayerDbgFunctionNode), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + if (!pNewDbgFuncNode) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(pNewDbgFuncNode, 0, sizeof(VkLayerDbgFunctionNode)); + + pNewDbgFuncNode->is_messenger = true; + pNewDbgFuncNode->messenger.pfnUserCallback = pCreateInfo->pfnUserCallback; + pNewDbgFuncNode->messenger.messageSeverity = pCreateInfo->messageSeverity; + pNewDbgFuncNode->messenger.messageType = pCreateInfo->messageType; + pNewDbgFuncNode->pUserData = pCreateInfo->pUserData; + pNewDbgFuncNode->pNext = inst->DbgFunctionHead; + inst->DbgFunctionHead = pNewDbgFuncNode; + + *(VkDebugUtilsMessengerEXT **)pMessenger = icd_info; + pNewDbgFuncNode->messenger.messenger = *pMessenger; + +out: + + // Roll back on errors + if (VK_SUCCESS != res) { + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (NULL == icd_term->dispatch.DestroyDebugUtilsMessengerEXT) { + continue; + } + + if (icd_info && icd_info[storage_idx]) { + icd_term->dispatch.DestroyDebugUtilsMessengerEXT(icd_term->instance, icd_info[storage_idx], pAllocator); + } + storage_idx++; + } + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + if (NULL != pNewDbgFuncNode) { + pAllocator->pfnFree(pAllocator->pUserData, pNewDbgFuncNode); + } + if (NULL != icd_info) { + pAllocator->pfnFree(pAllocator->pUserData, icd_info); + } + } else { +#endif + if (NULL != pNewDbgFuncNode) { + free(pNewDbgFuncNode); + } + if (NULL != icd_info) { + free(icd_info); + } + } + } + + return res; +} + +// This is the instance chain terminator function for DestroyDebugUtilsMessenger +VKAPI_ATTR void VKAPI_CALL terminator_DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks *pAllocator) { + uint32_t storage_idx; + VkDebugUtilsMessengerEXT *icd_info; + const struct loader_icd_term *icd_term; + + struct loader_instance *inst = (struct loader_instance *)instance; + icd_info = *(VkDebugUtilsMessengerEXT **)&messenger; + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (NULL == icd_term->dispatch.DestroyDebugUtilsMessengerEXT) { + continue; + } + + if (icd_info[storage_idx]) { + icd_term->dispatch.DestroyDebugUtilsMessengerEXT(icd_term->instance, icd_info[storage_idx], pAllocator); + } + storage_idx++; + } + + util_DestroyDebugUtilsMessenger(inst, messenger, pAllocator); + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, icd_info); + } else { +#endif + free(icd_info); + } +} + +// This is the instance chain terminator function for SubmitDebugUtilsMessageEXT +VKAPI_ATTR void VKAPI_CALL terminator_SubmitDebugUtilsMessageEXT(VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) { + loader_platform_thread_lock_mutex(&loader_lock); + // NOTE: Just make the callback ourselves because there could be one or more ICDs that support this extension + // and each one will trigger the callback to the user. This would result in multiple callback triggers + // per message. Instead, if we get a messaged up to here, then just trigger the message ourselves and + // return. This would still allow the ICDs to trigger their own messages, but won't get any external ones. + struct loader_instance *inst = (struct loader_instance *)instance; + util_SubmitDebugUtilsMessageEXT(inst, messageSeverity, messageTypes, pCallbackData); + loader_platform_thread_unlock_mutex(&loader_lock); +} + +// VK_EXT_debug_report related items + +VkResult util_CreateDebugReportCallback(struct loader_instance *inst, VkDebugReportCallbackCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT callback) { + VkLayerDbgFunctionNode *pNewDbgFuncNode = NULL; + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(VkLayerDbgFunctionNode), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } else { +#endif + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)loader_instance_heap_alloc(inst, sizeof(VkLayerDbgFunctionNode), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + if (!pNewDbgFuncNode) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memset(pNewDbgFuncNode, 0, sizeof(VkLayerDbgFunctionNode)); + + pNewDbgFuncNode->is_messenger = false; + pNewDbgFuncNode->report.msgCallback = callback; + pNewDbgFuncNode->report.pfnMsgCallback = pCreateInfo->pfnCallback; + pNewDbgFuncNode->report.msgFlags = pCreateInfo->flags; + pNewDbgFuncNode->pUserData = pCreateInfo->pUserData; + pNewDbgFuncNode->pNext = inst->DbgFunctionHead; + inst->DbgFunctionHead = pNewDbgFuncNode; + + return VK_SUCCESS; +} + +static VKAPI_ATTR VkResult VKAPI_CALL +debug_utils_CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pCallback) { + struct loader_instance *inst = loader_get_instance(instance); + loader_platform_thread_lock_mutex(&loader_lock); + VkResult result = inst->disp->layer_inst_disp.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback); + loader_platform_thread_unlock_mutex(&loader_lock); + return result; +} + +// Utility function to handle reporting +VkBool32 util_DebugReportMessage(const struct loader_instance *inst, VkFlags msgFlags, VkDebugReportObjectTypeEXT objectType, + uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { + VkBool32 bail = false; + VkLayerDbgFunctionNode *pTrav = inst->DbgFunctionHead; + VkDebugUtilsMessageSeverityFlagBitsEXT severity; + VkDebugUtilsMessageTypeFlagsEXT types; + VkDebugUtilsMessengerCallbackDataEXT callback_data; + VkDebugUtilsObjectNameInfoEXT object_name; + + debug_utils_ReportFlagsToAnnotFlags(msgFlags, false, &severity, &types); + debug_utils_ReportObjectToAnnotObject(objectType, srcObject, &object_name); + + callback_data.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT; + callback_data.pNext = NULL; + callback_data.flags = 0; + callback_data.pMessageIdName = pLayerPrefix; + callback_data.messageIdNumber = msgCode; + callback_data.pMessage = pMsg; + callback_data.cmdBufLabelCount = 0; + callback_data.pCmdBufLabels = NULL; + callback_data.queueLabelCount = 0; + callback_data.pQueueLabels = NULL; + callback_data.objectCount = 1; + callback_data.pObjects = &object_name; + + while (pTrav) { + if (!pTrav->is_messenger && pTrav->report.msgFlags & msgFlags) { + if (pTrav->report.pfnMsgCallback(msgFlags, objectType, srcObject, location, msgCode, pLayerPrefix, pMsg, + pTrav->pUserData)) { + bail = true; + } + } + if (pTrav->is_messenger && (pTrav->messenger.messageSeverity & severity) && (pTrav->messenger.messageType & types)) { + if (pTrav->messenger.pfnUserCallback(severity, types, &callback_data, pTrav->pUserData)) { + bail = true; + } + } + + pTrav = pTrav->pNext; + } + + return bail; +} + +void util_DestroyDebugReportCallback(struct loader_instance *inst, VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks *pAllocator) { + VkLayerDbgFunctionNode *pTrav = inst->DbgFunctionHead; + VkLayerDbgFunctionNode *pPrev = pTrav; + + while (pTrav) { + if (!pTrav->is_messenger && pTrav->report.msgCallback == callback) { + pPrev->pNext = pTrav->pNext; + if (inst->DbgFunctionHead == pTrav) inst->DbgFunctionHead = pTrav->pNext; +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, pTrav); + } else { +#endif + loader_instance_heap_free(inst, pTrav); + } + break; + } + pPrev = pTrav; + pTrav = pTrav->pNext; + } +} + +// This utility (used by vkInstanceCreateInfo(), looks at a pNext chain. It +// counts any VkDebugReportCallbackCreateInfoEXT structs that it finds. It +// then allocates array that can hold that many structs, as well as that many +// VkDebugReportCallbackEXT handles. It then copies each +// VkDebugReportCallbackCreateInfoEXT, and initializes each handle. +VkResult util_CopyDebugReportCreateInfos(const void *pChain, const VkAllocationCallbacks *pAllocator, uint32_t *num_callbacks, + VkDebugReportCallbackCreateInfoEXT **infos, VkDebugReportCallbackEXT **callbacks) { + uint32_t n = *num_callbacks = 0; + VkDebugReportCallbackCreateInfoEXT *pInfos = NULL; + VkDebugReportCallbackEXT *pCallbacks = NULL; + + const void *pNext = pChain; + while (pNext) { + // 1st, count the number VkDebugReportCallbackCreateInfoEXT: + if (((VkDebugReportCallbackCreateInfoEXT *)pNext)->sType == VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT) { + n++; + } + pNext = (void *)((VkDebugReportCallbackCreateInfoEXT *)pNext)->pNext; + } + if (n == 0) { + return VK_SUCCESS; + } + +// 2nd, allocate memory for each VkDebugReportCallbackCreateInfoEXT: +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pInfos = *infos = ((VkDebugReportCallbackCreateInfoEXT *)pAllocator->pfnAllocation( + pAllocator->pUserData, n * sizeof(VkDebugReportCallbackCreateInfoEXT), sizeof(void *), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + } else { +#endif + pInfos = *infos = ((VkDebugReportCallbackCreateInfoEXT *)malloc(n * sizeof(VkDebugReportCallbackCreateInfoEXT))); + } + if (!pInfos) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } +// 3rd, allocate memory for a unique handle for each callback: +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pCallbacks = *callbacks = ((VkDebugReportCallbackEXT *)pAllocator->pfnAllocation( + pAllocator->pUserData, n * sizeof(VkDebugReportCallbackEXT), sizeof(void *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + if (!pCallbacks) { + pAllocator->pfnFree(pAllocator->pUserData, pInfos); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } else { +#endif + pCallbacks = *callbacks = ((VkDebugReportCallbackEXT *)malloc(n * sizeof(VkDebugReportCallbackEXT))); + if (!pCallbacks) { + free(pInfos); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } + // 4th, copy each VkDebugReportCallbackCreateInfoEXT for use by + // vkDestroyInstance, and assign a unique handle to each callback (just + // use the address of the copied VkDebugReportCallbackCreateInfoEXT): + pNext = pChain; + while (pNext) { + if (((VkInstanceCreateInfo *)pNext)->sType == VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT) { + memcpy(pInfos, pNext, sizeof(VkDebugReportCallbackCreateInfoEXT)); + *pCallbacks++ = (VkDebugReportCallbackEXT)(uintptr_t)pInfos++; + } + pNext = (void *)((VkInstanceCreateInfo *)pNext)->pNext; + } + + *num_callbacks = n; + return VK_SUCCESS; +} + +void util_FreeDebugReportCreateInfos(const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackCreateInfoEXT *infos, + VkDebugReportCallbackEXT *callbacks) { +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, infos); + pAllocator->pfnFree(pAllocator->pUserData, callbacks); + } else { +#endif + free(infos); + free(callbacks); + } +} + +VkResult util_CreateDebugReportCallbacks(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_callbacks, VkDebugReportCallbackCreateInfoEXT *infos, + VkDebugReportCallbackEXT *callbacks) { + VkResult rtn = VK_SUCCESS; + for (uint32_t i = 0; i < num_callbacks; i++) { + rtn = util_CreateDebugReportCallback(inst, &infos[i], pAllocator, callbacks[i]); + if (rtn != VK_SUCCESS) { + for (uint32_t j = 0; j < i; j++) { + util_DestroyDebugReportCallback(inst, callbacks[j], pAllocator); + } + return rtn; + } + } + return rtn; +} + +void util_DestroyDebugReportCallbacks(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, uint32_t num_callbacks, + VkDebugReportCallbackEXT *callbacks) { + for (uint32_t i = 0; i < num_callbacks; i++) { + util_DestroyDebugReportCallback(inst, callbacks[i], pAllocator); + } +} + +static VKAPI_ATTR void VKAPI_CALL debug_utils_DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks *pAllocator) { + struct loader_instance *inst = loader_get_instance(instance); + loader_platform_thread_lock_mutex(&loader_lock); + + inst->disp->layer_inst_disp.DestroyDebugReportCallbackEXT(instance, callback, pAllocator); + + loader_platform_thread_unlock_mutex(&loader_lock); +} + +static VKAPI_ATTR void VKAPI_CALL debug_utils_DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objType, uint64_t object, + size_t location, int32_t msgCode, const char *pLayerPrefix, + const char *pMsg) { + struct loader_instance *inst = loader_get_instance(instance); + + inst->disp->layer_inst_disp.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); +} + +// This is the instance chain terminator function +// for CreateDebugReportCallback +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDebugReportCallbackEXT(VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDebugReportCallbackEXT *pCallback) { + VkDebugReportCallbackEXT *icd_info = NULL; + const struct loader_icd_term *icd_term; + struct loader_instance *inst = (struct loader_instance *)instance; + VkResult res = VK_SUCCESS; + uint32_t storage_idx; + VkLayerDbgFunctionNode *pNewDbgFuncNode = NULL; + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + icd_info = ((VkDebugReportCallbackEXT *)pAllocator->pfnAllocation(pAllocator->pUserData, + inst->total_icd_count * sizeof(VkDebugReportCallbackEXT), + sizeof(void *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)); + if (icd_info) { + memset(icd_info, 0, inst->total_icd_count * sizeof(VkDebugReportCallbackEXT)); + } + } else { +#endif + icd_info = calloc(sizeof(VkDebugReportCallbackEXT), inst->total_icd_count); + } + if (!icd_info) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (!icd_term->dispatch.CreateDebugReportCallbackEXT) { + continue; + } + + res = icd_term->dispatch.CreateDebugReportCallbackEXT(icd_term->instance, pCreateInfo, pAllocator, &icd_info[storage_idx]); + + if (res != VK_SUCCESS) { + goto out; + } + storage_idx++; + } + +// Setup the debug report callback in the terminator since a layer may want +// to grab the information itself (RenderDoc) and then return back to the +// user callback a sub-set of the messages. +#if (DEBUG_DISABLE_APP_ALLOCATORS == 0) + if (pAllocator != NULL) { + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(VkLayerDbgFunctionNode), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } else { +#else + { +#endif + pNewDbgFuncNode = (VkLayerDbgFunctionNode *)loader_instance_heap_alloc(inst, sizeof(VkLayerDbgFunctionNode), + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + if (!pNewDbgFuncNode) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(pNewDbgFuncNode, 0, sizeof(VkLayerDbgFunctionNode)); + + pNewDbgFuncNode->is_messenger = false; + pNewDbgFuncNode->report.pfnMsgCallback = pCreateInfo->pfnCallback; + pNewDbgFuncNode->report.msgFlags = pCreateInfo->flags; + pNewDbgFuncNode->pUserData = pCreateInfo->pUserData; + pNewDbgFuncNode->pNext = inst->DbgFunctionHead; + inst->DbgFunctionHead = pNewDbgFuncNode; + + *(VkDebugReportCallbackEXT **)pCallback = icd_info; + pNewDbgFuncNode->report.msgCallback = *pCallback; + +out: + + // Roll back on errors + if (VK_SUCCESS != res) { + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (NULL == icd_term->dispatch.DestroyDebugReportCallbackEXT) { + continue; + } + + if (icd_info && icd_info[storage_idx]) { + icd_term->dispatch.DestroyDebugReportCallbackEXT(icd_term->instance, icd_info[storage_idx], pAllocator); + } + storage_idx++; + } + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + if (NULL != pNewDbgFuncNode) { + pAllocator->pfnFree(pAllocator->pUserData, pNewDbgFuncNode); + } + if (NULL != icd_info) { + pAllocator->pfnFree(pAllocator->pUserData, icd_info); + } + } else { +#endif + if (NULL != pNewDbgFuncNode) { + free(pNewDbgFuncNode); + } + if (NULL != icd_info) { + free(icd_info); + } + } + } + + return res; +} + +// This is the instance chain terminator function for DestroyDebugReportCallback +VKAPI_ATTR void VKAPI_CALL terminator_DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks *pAllocator) { + uint32_t storage_idx; + VkDebugReportCallbackEXT *icd_info; + const struct loader_icd_term *icd_term; + + struct loader_instance *inst = (struct loader_instance *)instance; + icd_info = *(VkDebugReportCallbackEXT **)&callback; + storage_idx = 0; + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (NULL == icd_term->dispatch.DestroyDebugReportCallbackEXT) { + continue; + } + + if (icd_info[storage_idx]) { + icd_term->dispatch.DestroyDebugReportCallbackEXT(icd_term->instance, icd_info[storage_idx], pAllocator); + } + storage_idx++; + } + + util_DestroyDebugReportCallback(inst, callback, pAllocator); + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator != NULL) { + pAllocator->pfnFree(pAllocator->pUserData, icd_info); + } else { +#endif + free(icd_info); + } +} + +// This is the instance chain terminator function for DebugReportMessage +VKAPI_ATTR void VKAPI_CALL terminator_DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location, + int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { + const struct loader_icd_term *icd_term; + + struct loader_instance *inst = (struct loader_instance *)instance; + + loader_platform_thread_lock_mutex(&loader_lock); + for (icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + if (icd_term->dispatch.DebugReportMessageEXT != NULL) { + icd_term->dispatch.DebugReportMessageEXT(icd_term->instance, flags, objType, object, location, msgCode, pLayerPrefix, + pMsg); + } + } + + // Now that all ICDs have seen the message, call the necessary callbacks. Ignoring "bail" return value + // as there is nothing to bail from at this point. + + util_DebugReportMessage(inst, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); + + loader_platform_thread_unlock_mutex(&loader_lock); +} + +// General utilities + +static const VkExtensionProperties debug_utils_extension_info[] = { + {VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}, + {VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}, +}; + +void debug_utils_AddInstanceExtensions(const struct loader_instance *inst, struct loader_extension_list *ext_list) { + loader_add_to_ext_list(inst, ext_list, sizeof(debug_utils_extension_info) / sizeof(VkExtensionProperties), + debug_utils_extension_info); +} + +void debug_utils_CreateInstance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo) { + ptr_instance->enabled_known_extensions.ext_debug_report = 0; + ptr_instance->enabled_known_extensions.ext_debug_utils = 0; + + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME) == 0) { + ptr_instance->enabled_known_extensions.ext_debug_report = 1; + } else if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_UTILS_EXTENSION_NAME) == 0) { + ptr_instance->enabled_known_extensions.ext_debug_utils = 1; + } + } +} + +bool debug_utils_InstanceGpa(struct loader_instance *ptr_instance, const char *name, void **addr) { + bool ret_type = false; + + *addr = NULL; + + if (!strcmp("vkCreateDebugReportCallbackEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_report == 1 ? (void *)debug_utils_CreateDebugReportCallbackEXT : NULL; + ret_type = true; + } else if (!strcmp("vkDestroyDebugReportCallbackEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_report == 1 ? (void *)debug_utils_DestroyDebugReportCallbackEXT : NULL; + ret_type = true; + } else if (!strcmp("vkDebugReportMessageEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_report == 1 ? (void *)debug_utils_DebugReportMessageEXT : NULL; + return true; + } + if (!strcmp("vkCreateDebugUtilsMessengerEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_utils == 1 ? (void *)debug_utils_CreateDebugUtilsMessengerEXT : NULL; + ret_type = true; + } else if (!strcmp("vkDestroyDebugUtilsMessengerEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_utils == 1 ? (void *)debug_utils_DestroyDebugUtilsMessengerEXT : NULL; + ret_type = true; + } else if (!strcmp("vkSubmitDebugUtilsMessageEXT", name)) { + *addr = ptr_instance->enabled_known_extensions.ext_debug_utils == 1 ? (void *)debug_utils_SubmitDebugUtilsMessageEXT : NULL; + ret_type = true; + } + + return ret_type; +} + +bool debug_utils_ReportFlagsToAnnotFlags(VkDebugReportFlagsEXT dr_flags, bool default_flag_is_spec, + VkDebugUtilsMessageSeverityFlagBitsEXT *da_severity, + VkDebugUtilsMessageTypeFlagsEXT *da_type) { + bool type_set = false; + if (NULL == da_severity || NULL == da_type) { + return false; + } + *da_type = 0; + *da_severity = 0; + + if ((dr_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) != 0) { + *da_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT; + *da_type |= VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT; + type_set = true; + } else if ((dr_flags & (VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT)) != 0) { + *da_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT; + } else if ((dr_flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) != 0) { + *da_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + } else if ((dr_flags & VK_DEBUG_REPORT_DEBUG_BIT_EXT) != 0) { + *da_severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT; + *da_type |= VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT; + type_set = true; + } + + if ((dr_flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) != 0) { + *da_type |= VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + } else if (!type_set) { + if (default_flag_is_spec) { + *da_type |= VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT; + } else { + *da_type |= VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT; + } + } + + return true; +} + +bool debug_utils_AnnotFlagsToReportFlags(VkDebugUtilsMessageSeverityFlagBitsEXT da_severity, + VkDebugUtilsMessageTypeFlagsEXT da_type, VkDebugReportFlagsEXT *dr_flags) { + if (NULL == dr_flags) { + return false; + } + + *dr_flags = 0; + + if ((da_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) != 0) { + *dr_flags |= VK_DEBUG_REPORT_ERROR_BIT_EXT; + } else if ((da_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) != 0) { + if ((da_type & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) != 0) { + *dr_flags |= VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; + } else { + *dr_flags |= VK_DEBUG_REPORT_WARNING_BIT_EXT; + } + } else if ((da_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) != 0) { + *dr_flags |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT; + } else if ((da_severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) != 0) { + *dr_flags |= VK_DEBUG_REPORT_DEBUG_BIT_EXT; + } + + return true; +} + +bool debug_utils_ReportObjectToAnnotObject(VkDebugReportObjectTypeEXT dr_object_type, uint64_t object_handle, + VkDebugUtilsObjectNameInfoEXT *da_object_name_info) { + if (NULL == da_object_name_info) { + return false; + } + da_object_name_info->sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + da_object_name_info->pNext = NULL; + da_object_name_info->objectHandle = (uint64_t)(uintptr_t)object_handle; + da_object_name_info->pObjectName = NULL; + da_object_name_info->objectType = convertDebugReportObjectToCoreObject(dr_object_type); + return true; +} + +bool debug_utils_AnnotObjectToDebugReportObject(const VkDebugUtilsObjectNameInfoEXT *da_object_name_info, + VkDebugReportObjectTypeEXT *dr_object_type, uint64_t *dr_object_handle) { + if (NULL == da_object_name_info || NULL == dr_object_type || NULL == dr_object_handle) { + return false; + } + *dr_object_type = convertCoreObjectToDebugReportObject(da_object_name_info->objectType); + *dr_object_handle = da_object_name_info->objectHandle; + return true; +} diff --git a/src/third_party/vulkan/loader/debug_utils.h b/src/third_party/vulkan/loader/debug_utils.h new file mode 100644 index 0000000..c33a6fc --- /dev/null +++ b/src/third_party/vulkan/loader/debug_utils.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * Copyright (C) 2015-2016 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Courtney Goeltzenleuchter + * Author: Jon Ashburn + * Author: Mark Young + * + */ + +#include "vk_loader_platform.h" +#include "loader.h" + +// General utilities + +void debug_utils_AddInstanceExtensions(const struct loader_instance *inst, struct loader_extension_list *ext_list); +void debug_utils_CreateInstance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo); +bool debug_utils_InstanceGpa(struct loader_instance *ptr_instance, const char *name, void **addr); +bool debug_utils_ReportFlagsToAnnotFlags(VkDebugReportFlagsEXT dr_flags, bool default_flag_is_spec, + VkDebugUtilsMessageSeverityFlagBitsEXT *da_severity, + VkDebugUtilsMessageTypeFlagsEXT *da_type); +bool debug_utils_AnnotFlagsToReportFlags(VkDebugUtilsMessageSeverityFlagBitsEXT da_severity, + VkDebugUtilsMessageTypeFlagsEXT da_type, VkDebugReportFlagsEXT *dr_flags); +bool debug_utils_ReportObjectToAnnotObject(VkDebugReportObjectTypeEXT dr_object_type, uint64_t object_handle, + VkDebugUtilsObjectNameInfoEXT *da_object_name_info); +bool debug_utils_AnnotObjectToDebugReportObject(const VkDebugUtilsObjectNameInfoEXT *da_object_name_info, + VkDebugReportObjectTypeEXT *dr_object_type, uint64_t *dr_object_handle); + +// VK_EXT_debug_utils related items + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDebugUtilsMessengerEXT(VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDebugUtilsMessengerEXT *pMessenger); +VKAPI_ATTR void VKAPI_CALL terminator_DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks *pAllocator); +VKAPI_ATTR void VKAPI_CALL terminator_SubmitDebugUtilsMessageEXT(VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData); +VkResult util_CreateDebugUtilsMessenger(struct loader_instance *inst, const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerEXT messenger); +VkResult util_CreateDebugUtilsMessengers(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_messengers, VkDebugUtilsMessengerCreateInfoEXT *infos, + VkDebugUtilsMessengerEXT *messengers); +VkBool32 util_SubmitDebugUtilsMessageEXT(const struct loader_instance *inst, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData); +VkResult util_CopyDebugUtilsMessengerCreateInfos(const void *pChain, const VkAllocationCallbacks *pAllocator, + uint32_t *num_messengers, VkDebugUtilsMessengerCreateInfoEXT **infos, + VkDebugUtilsMessengerEXT **messengers); +void util_DestroyDebugUtilsMessenger(struct loader_instance *inst, VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks *pAllocator); +void util_DestroyDebugUtilsMessengers(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_messengers, VkDebugUtilsMessengerEXT *messengers); +void util_FreeDebugUtilsMessengerCreateInfos(const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerCreateInfoEXT *infos, + VkDebugUtilsMessengerEXT *messengers); + +// VK_EXT_debug_report related items + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDebugReportCallbackEXT(VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDebugReportCallbackEXT *pCallback); + +VKAPI_ATTR void VKAPI_CALL terminator_DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks *pAllocator); + +VKAPI_ATTR void VKAPI_CALL terminator_DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location, + int32_t msgCode, const char *pLayerPrefix, const char *pMsg); + +VkResult util_CreateDebugReportCallback(struct loader_instance *inst, VkDebugReportCallbackCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT callback); +VkResult util_CreateDebugReportCallbacks(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, + uint32_t num_callbacks, VkDebugReportCallbackCreateInfoEXT *infos, + VkDebugReportCallbackEXT *callbacks); +VkBool32 util_DebugReportMessage(const struct loader_instance *inst, VkFlags msgFlags, VkDebugReportObjectTypeEXT objectType, + uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg); +VkResult util_CopyDebugReportCreateInfos(const void *pChain, const VkAllocationCallbacks *pAllocator, uint32_t *num_callbacks, + VkDebugReportCallbackCreateInfoEXT **infos, VkDebugReportCallbackEXT **callbacks); +void util_DestroyDebugReportCallback(struct loader_instance *inst, VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks *pAllocator); +void util_DestroyDebugReportCallbacks(struct loader_instance *inst, const VkAllocationCallbacks *pAllocator, uint32_t num_callbacks, + VkDebugReportCallbackEXT *callbacks); +void util_FreeDebugReportCreateInfos(const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackCreateInfoEXT *infos, + VkDebugReportCallbackEXT *callbacks); diff --git a/src/third_party/vulkan/loader/dev_ext_trampoline.c b/src/third_party/vulkan/loader/dev_ext_trampoline.c new file mode 100644 index 0000000..55eee0c --- /dev/null +++ b/src/third_party/vulkan/loader/dev_ext_trampoline.c @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Jon Ashburn + * Author: Lenny Komow + */ + +#include "vk_loader_platform.h" +#include "loader.h" +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize(3) // force gcc to use tail-calls +#endif + +// Clang-format does not understand macros. +// clang-format off + +VKAPI_ATTR void VKAPI_CALL vkdev_ext0(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext1(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext2(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext3(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext4(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext5(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext6(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext7(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext8(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext9(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext10(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext11(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext12(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext13(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext14(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext15(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext16(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext17(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext18(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext19(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext20(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext21(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext22(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext23(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext24(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext25(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext26(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext27(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext28(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext29(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext30(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext31(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext32(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext33(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext34(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext35(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext36(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext37(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext38(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext39(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext40(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext41(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext42(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext43(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext44(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext45(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext46(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext47(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext48(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext49(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext50(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext51(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext52(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext53(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext54(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext55(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext56(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext57(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext58(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext59(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext60(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext61(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext62(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext63(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext64(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext65(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext66(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext67(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext68(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext69(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext70(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext71(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext72(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext73(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext74(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext75(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext76(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext77(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext78(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext79(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext80(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext81(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext82(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext83(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext84(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext85(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext86(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext87(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext88(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext89(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext90(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext91(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext92(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext93(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext94(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext95(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext96(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext97(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext98(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext99(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext100(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext101(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext102(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext103(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext104(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext105(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext106(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext107(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext108(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext109(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext110(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext111(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext112(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext113(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext114(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext115(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext116(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext117(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext118(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext119(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext120(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext121(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext122(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext123(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext124(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext125(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext126(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext127(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext128(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext129(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext130(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext131(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext132(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext133(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext134(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext135(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext136(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext137(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext138(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext139(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext140(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext141(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext142(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext143(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext144(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext145(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext146(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext147(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext148(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext149(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext150(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext151(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext152(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext153(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext154(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext155(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext156(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext157(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext158(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext159(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext160(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext161(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext162(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext163(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext164(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext165(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext166(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext167(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext168(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext169(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext170(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext171(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext172(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext173(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext174(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext175(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext176(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext177(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext178(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext179(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext180(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext181(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext182(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext183(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext184(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext185(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext186(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext187(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext188(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext189(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext190(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext191(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext192(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext193(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext194(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext195(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext196(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext197(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext198(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext199(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext200(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext201(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext202(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext203(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext204(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext205(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext206(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext207(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext208(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext209(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext210(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext211(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext212(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext213(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext214(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext215(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext216(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext217(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext218(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext219(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext220(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext221(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext222(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext223(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext224(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext225(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext226(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext227(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext228(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext229(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext230(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext231(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext232(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext233(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext234(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext235(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext236(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext237(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext238(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext239(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext240(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext241(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext242(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext243(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext244(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext245(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext246(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext247(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext248(VkDevice device); +VKAPI_ATTR void VKAPI_CALL vkdev_ext249(VkDevice device); + +void *loader_get_dev_ext_trampoline(uint32_t index) { + switch (index) { +#define CASE_HANDLE(num) case num: return vkdev_ext##num + CASE_HANDLE(0); + CASE_HANDLE(1); + CASE_HANDLE(2); + CASE_HANDLE(3); + CASE_HANDLE(4); + CASE_HANDLE(5); + CASE_HANDLE(6); + CASE_HANDLE(7); + CASE_HANDLE(8); + CASE_HANDLE(9); + CASE_HANDLE(10); + CASE_HANDLE(11); + CASE_HANDLE(12); + CASE_HANDLE(13); + CASE_HANDLE(14); + CASE_HANDLE(15); + CASE_HANDLE(16); + CASE_HANDLE(17); + CASE_HANDLE(18); + CASE_HANDLE(19); + CASE_HANDLE(20); + CASE_HANDLE(21); + CASE_HANDLE(22); + CASE_HANDLE(23); + CASE_HANDLE(24); + CASE_HANDLE(25); + CASE_HANDLE(26); + CASE_HANDLE(27); + CASE_HANDLE(28); + CASE_HANDLE(29); + CASE_HANDLE(30); + CASE_HANDLE(31); + CASE_HANDLE(32); + CASE_HANDLE(33); + CASE_HANDLE(34); + CASE_HANDLE(35); + CASE_HANDLE(36); + CASE_HANDLE(37); + CASE_HANDLE(38); + CASE_HANDLE(39); + CASE_HANDLE(40); + CASE_HANDLE(41); + CASE_HANDLE(42); + CASE_HANDLE(43); + CASE_HANDLE(44); + CASE_HANDLE(45); + CASE_HANDLE(46); + CASE_HANDLE(47); + CASE_HANDLE(48); + CASE_HANDLE(49); + CASE_HANDLE(50); + CASE_HANDLE(51); + CASE_HANDLE(52); + CASE_HANDLE(53); + CASE_HANDLE(54); + CASE_HANDLE(55); + CASE_HANDLE(56); + CASE_HANDLE(57); + CASE_HANDLE(58); + CASE_HANDLE(59); + CASE_HANDLE(60); + CASE_HANDLE(61); + CASE_HANDLE(62); + CASE_HANDLE(63); + CASE_HANDLE(64); + CASE_HANDLE(65); + CASE_HANDLE(66); + CASE_HANDLE(67); + CASE_HANDLE(68); + CASE_HANDLE(69); + CASE_HANDLE(70); + CASE_HANDLE(71); + CASE_HANDLE(72); + CASE_HANDLE(73); + CASE_HANDLE(74); + CASE_HANDLE(75); + CASE_HANDLE(76); + CASE_HANDLE(77); + CASE_HANDLE(78); + CASE_HANDLE(79); + CASE_HANDLE(80); + CASE_HANDLE(81); + CASE_HANDLE(82); + CASE_HANDLE(83); + CASE_HANDLE(84); + CASE_HANDLE(85); + CASE_HANDLE(86); + CASE_HANDLE(87); + CASE_HANDLE(88); + CASE_HANDLE(89); + CASE_HANDLE(90); + CASE_HANDLE(91); + CASE_HANDLE(92); + CASE_HANDLE(93); + CASE_HANDLE(94); + CASE_HANDLE(95); + CASE_HANDLE(96); + CASE_HANDLE(97); + CASE_HANDLE(98); + CASE_HANDLE(99); + CASE_HANDLE(100); + CASE_HANDLE(101); + CASE_HANDLE(102); + CASE_HANDLE(103); + CASE_HANDLE(104); + CASE_HANDLE(105); + CASE_HANDLE(106); + CASE_HANDLE(107); + CASE_HANDLE(108); + CASE_HANDLE(109); + CASE_HANDLE(110); + CASE_HANDLE(111); + CASE_HANDLE(112); + CASE_HANDLE(113); + CASE_HANDLE(114); + CASE_HANDLE(115); + CASE_HANDLE(116); + CASE_HANDLE(117); + CASE_HANDLE(118); + CASE_HANDLE(119); + CASE_HANDLE(120); + CASE_HANDLE(121); + CASE_HANDLE(122); + CASE_HANDLE(123); + CASE_HANDLE(124); + CASE_HANDLE(125); + CASE_HANDLE(126); + CASE_HANDLE(127); + CASE_HANDLE(128); + CASE_HANDLE(129); + CASE_HANDLE(130); + CASE_HANDLE(131); + CASE_HANDLE(132); + CASE_HANDLE(133); + CASE_HANDLE(134); + CASE_HANDLE(135); + CASE_HANDLE(136); + CASE_HANDLE(137); + CASE_HANDLE(138); + CASE_HANDLE(139); + CASE_HANDLE(140); + CASE_HANDLE(141); + CASE_HANDLE(142); + CASE_HANDLE(143); + CASE_HANDLE(144); + CASE_HANDLE(145); + CASE_HANDLE(146); + CASE_HANDLE(147); + CASE_HANDLE(148); + CASE_HANDLE(149); + CASE_HANDLE(150); + CASE_HANDLE(151); + CASE_HANDLE(152); + CASE_HANDLE(153); + CASE_HANDLE(154); + CASE_HANDLE(155); + CASE_HANDLE(156); + CASE_HANDLE(157); + CASE_HANDLE(158); + CASE_HANDLE(159); + CASE_HANDLE(160); + CASE_HANDLE(161); + CASE_HANDLE(162); + CASE_HANDLE(163); + CASE_HANDLE(164); + CASE_HANDLE(165); + CASE_HANDLE(166); + CASE_HANDLE(167); + CASE_HANDLE(168); + CASE_HANDLE(169); + CASE_HANDLE(170); + CASE_HANDLE(171); + CASE_HANDLE(172); + CASE_HANDLE(173); + CASE_HANDLE(174); + CASE_HANDLE(175); + CASE_HANDLE(176); + CASE_HANDLE(177); + CASE_HANDLE(178); + CASE_HANDLE(179); + CASE_HANDLE(180); + CASE_HANDLE(181); + CASE_HANDLE(182); + CASE_HANDLE(183); + CASE_HANDLE(184); + CASE_HANDLE(185); + CASE_HANDLE(186); + CASE_HANDLE(187); + CASE_HANDLE(188); + CASE_HANDLE(189); + CASE_HANDLE(190); + CASE_HANDLE(191); + CASE_HANDLE(192); + CASE_HANDLE(193); + CASE_HANDLE(194); + CASE_HANDLE(195); + CASE_HANDLE(196); + CASE_HANDLE(197); + CASE_HANDLE(198); + CASE_HANDLE(199); + CASE_HANDLE(200); + CASE_HANDLE(201); + CASE_HANDLE(202); + CASE_HANDLE(203); + CASE_HANDLE(204); + CASE_HANDLE(205); + CASE_HANDLE(206); + CASE_HANDLE(207); + CASE_HANDLE(208); + CASE_HANDLE(209); + CASE_HANDLE(210); + CASE_HANDLE(211); + CASE_HANDLE(212); + CASE_HANDLE(213); + CASE_HANDLE(214); + CASE_HANDLE(215); + CASE_HANDLE(216); + CASE_HANDLE(217); + CASE_HANDLE(218); + CASE_HANDLE(219); + CASE_HANDLE(220); + CASE_HANDLE(221); + CASE_HANDLE(222); + CASE_HANDLE(223); + CASE_HANDLE(224); + CASE_HANDLE(225); + CASE_HANDLE(226); + CASE_HANDLE(227); + CASE_HANDLE(228); + CASE_HANDLE(229); + CASE_HANDLE(230); + CASE_HANDLE(231); + CASE_HANDLE(232); + CASE_HANDLE(233); + CASE_HANDLE(234); + CASE_HANDLE(235); + CASE_HANDLE(236); + CASE_HANDLE(237); + CASE_HANDLE(238); + CASE_HANDLE(239); + CASE_HANDLE(240); + CASE_HANDLE(241); + CASE_HANDLE(242); + CASE_HANDLE(243); + CASE_HANDLE(244); + CASE_HANDLE(245); + CASE_HANDLE(246); + CASE_HANDLE(247); + CASE_HANDLE(248); + CASE_HANDLE(249); + } + + return NULL; +} diff --git a/src/third_party/vulkan/loader/dirent_on_windows.c b/src/third_party/vulkan/loader/dirent_on_windows.c new file mode 100644 index 0000000..16318cc --- /dev/null +++ b/src/third_party/vulkan/loader/dirent_on_windows.c @@ -0,0 +1,128 @@ +/* + + Implementation of POSIX directory browsing functions and types for Win32. + + Author: Kevlin Henney (kevlin@acm.org, kevlin@curbralan.com) + History: Created March 1997. Updated June 2003 and July 2012. + Rights: See end of file. + +*/ +#include "dirent_on_windows.h" +#include +#include /* _findfirst and _findnext set errno iff they return -1 */ +#include +#include +#include "vk_loader_platform.h" +#include "loader.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef ptrdiff_t handle_type; /* C99's intptr_t not sufficiently portable */ + +struct DIR { + handle_type handle; /* -1 for failed rewind */ + struct _finddata_t info; + struct dirent result; /* d_name null iff first time */ + char *name; /* null-terminated char string */ +}; + +DIR *opendir(const char *name) { + DIR *dir = 0; + + if (name && name[0]) { + size_t base_length = strlen(name); + const char *all = /* search pattern must end with suitable wildcard */ + strchr("/\\", name[base_length - 1]) ? "*" : "/*"; + + if ((dir = (DIR *)loader_instance_tls_heap_alloc(sizeof *dir)) != 0 && + (dir->name = (char *)loader_instance_tls_heap_alloc(base_length + strlen(all) + 1)) != 0) { + strcat(strcpy(dir->name, name), all); + + if ((dir->handle = (handle_type)_findfirst(dir->name, &dir->info)) != -1) { + dir->result.d_name = 0; + } else /* rollback */ + { + loader_instance_tls_heap_free(dir->name); + loader_instance_tls_heap_free(dir); + dir = 0; + } + } else /* rollback */ + { + loader_instance_tls_heap_free(dir); + dir = 0; + errno = ENOMEM; + } + } else { + errno = EINVAL; + } + + return dir; +} + +int closedir(DIR *dir) { + int result = -1; + + if (dir) { + if (dir->handle != -1) { + result = _findclose(dir->handle); + } + + loader_instance_tls_heap_free(dir->name); + loader_instance_tls_heap_free(dir); + } + + if (result == -1) /* map all errors to EBADF */ + { + errno = EBADF; + } + + return result; +} + +struct dirent *readdir(DIR *dir) { + struct dirent *result = 0; + + if (dir && dir->handle != -1) { + if (!dir->result.d_name || _findnext(dir->handle, &dir->info) != -1) { + result = &dir->result; + result->d_name = dir->info.name; + } + } else { + errno = EBADF; + } + + return result; +} + +void rewinddir(DIR *dir) { + if (dir && dir->handle != -1) { + _findclose(dir->handle); + dir->handle = (handle_type)_findfirst(dir->name, &dir->info); + dir->result.d_name = 0; + } else { + errno = EBADF; + } +} + +#ifdef __cplusplus +} +#endif + +/* + + Copyright Kevlin Henney, 1997, 2003, 2012. All rights reserved. + Copyright (c) 2015 The Khronos Group Inc. + Copyright (c) 2015 Valve Corporation + Copyright (c) 2015 LunarG, Inc. + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose is hereby granted without fee, provided + that this copyright and permissions notice appear in all copies and + derivatives. + + This software is supplied "as is" without express or implied warranty. + + But that said, if there are any problems please get in touch. + +*/ diff --git a/src/third_party/vulkan/loader/dirent_on_windows.h b/src/third_party/vulkan/loader/dirent_on_windows.h new file mode 100644 index 0000000..8600f8e --- /dev/null +++ b/src/third_party/vulkan/loader/dirent_on_windows.h @@ -0,0 +1,51 @@ +#ifndef DIRENT_INCLUDED +#define DIRENT_INCLUDED + +/* + + Declaration of POSIX directory browsing functions and types for Win32. + + Author: Kevlin Henney (kevlin@acm.org, kevlin@curbralan.com) + History: Created March 1997. Updated June 2003. + Rights: See end of file. + +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct DIR DIR; + +struct dirent { + char *d_name; +}; + +DIR *opendir(const char *); +int closedir(DIR *); +struct dirent *readdir(DIR *); +void rewinddir(DIR *); + +/* + + Copyright Kevlin Henney, 1997, 2003. All rights reserved. + Copyright (c) 2015 The Khronos Group Inc. + Copyright (c) 2015 Valve Corporation + Copyright (c) 2015 LunarG, Inc. + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose is hereby granted without fee, provided + that this copyright and permissions notice appear in all copies and + derivatives. + + This software is supplied "as is" without express or implied warranty. + + But that said, if there are any problems please get in touch. + +*/ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/loader/dxgi_loader.c b/src/third_party/vulkan/loader/dxgi_loader.c new file mode 100644 index 0000000..c2a3fa5 --- /dev/null +++ b/src/third_party/vulkan/loader/dxgi_loader.c @@ -0,0 +1,23 @@ +#include "dxgi_loader.h" + +#include + +static HMODULE load_dxgi_module() { + TCHAR systemPath[MAX_PATH] = ""; + GetSystemDirectory(systemPath, MAX_PATH); + StringCchCat(systemPath, MAX_PATH, TEXT("\\dxgi.dll")); + + return LoadLibrary(systemPath); +} + +typedef HRESULT (APIENTRY *PFN_CreateDXGIFactory1)(REFIID riid, void **ppFactory); + +HRESULT dyn_CreateDXGIFactory1(REFIID riid, void **ppFactory) { + PFN_CreateDXGIFactory1 fpCreateDXGIFactory1 = + (PFN_CreateDXGIFactory1)GetProcAddress(load_dxgi_module(), "CreateDXGIFactory1"); + + if (fpCreateDXGIFactory1 != NULL) + return fpCreateDXGIFactory1(riid, ppFactory); + + return DXGI_ERROR_NOT_FOUND; +} \ No newline at end of file diff --git a/src/third_party/vulkan/loader/dxgi_loader.h b/src/third_party/vulkan/loader/dxgi_loader.h new file mode 100644 index 0000000..00daf08 --- /dev/null +++ b/src/third_party/vulkan/loader/dxgi_loader.h @@ -0,0 +1,8 @@ +#ifndef DXGI_LOADER_H +#define DXGI_LOADER_H + +#include + +HRESULT dyn_CreateDXGIFactory1(REFIID riid, void **ppFactory); + +#endif \ No newline at end of file diff --git a/src/third_party/vulkan/loader/extension_manual.c b/src/third_party/vulkan/loader/extension_manual.c new file mode 100644 index 0000000..1d4b2d0 --- /dev/null +++ b/src/third_party/vulkan/loader/extension_manual.c @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Young + * Author: Lenny Komow + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include "vk_loader_platform.h" +#include "loader.h" +#include "vk_loader_extensions.h" +#include "../vk_icd.h" +#include "wsi.h" +#include "debug_utils.h" + +// ---- Manually added trampoline/terminator functions + +// These functions, for whatever reason, require more complex changes than +// can easily be automatically generated. + +// ---- VK_NV_external_memory_capabilities extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL +GetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, + VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV *pExternalImageFormatProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + + return disp->GetPhysicalDeviceExternalImageFormatPropertiesNV( + unwrapped_phys_dev, format, type, tiling, usage, flags, + externalHandleType, pExternalImageFormatProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL +terminator_GetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, + VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV *pExternalImageFormatProperties) { + struct loader_physical_device_term *phys_dev_term = + (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + if (!icd_term->dispatch.GetPhysicalDeviceExternalImageFormatPropertiesNV) { + if (externalHandleType) { + return VK_ERROR_FORMAT_NOT_SUPPORTED; + } + + if (!icd_term->dispatch.GetPhysicalDeviceImageFormatProperties) { + return VK_ERROR_INITIALIZATION_FAILED; + } + + pExternalImageFormatProperties->externalMemoryFeatures = 0; + pExternalImageFormatProperties->exportFromImportedHandleTypes = 0; + pExternalImageFormatProperties->compatibleHandleTypes = 0; + + return icd_term->dispatch.GetPhysicalDeviceImageFormatProperties( + phys_dev_term->phys_dev, format, type, tiling, usage, flags, + &pExternalImageFormatProperties->imageFormatProperties); + } + + return icd_term->dispatch.GetPhysicalDeviceExternalImageFormatPropertiesNV( + phys_dev_term->phys_dev, format, type, tiling, usage, flags, + externalHandleType, pExternalImageFormatProperties); +} + +// ---- VK_EXT_display_surface_counter extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT *pSurfaceCapabilities) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceSurfaceCapabilities2EXT(unwrapped_phys_dev, surface, pSurfaceCapabilities); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT *pSurfaceCapabilities) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + VkIcdSurface *icd_surface = (VkIcdSurface *)(surface); + uint8_t icd_index = phys_dev_term->icd_index; + + // Unwrap the surface if needed + VkSurfaceKHR unwrapped_surface = surface; + if (icd_surface->real_icd_surfaces != NULL && (void *)icd_surface->real_icd_surfaces[icd_index] != NULL) { + unwrapped_surface = icd_surface->real_icd_surfaces[icd_index]; + } + + if (icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilities2EXT != NULL) { + // Pass the call to the driver + return icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilities2EXT(phys_dev_term->phys_dev, unwrapped_surface, + pSurfaceCapabilities); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceCapabilities2EXT: Emulating call in ICD \"%s\" using " + "vkGetPhysicalDeviceSurfaceCapabilitiesKHR", + icd_term->scanned_icd->lib_name); + + VkSurfaceCapabilitiesKHR surface_caps; + VkResult res = + icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev_term->phys_dev, unwrapped_surface, &surface_caps); + pSurfaceCapabilities->minImageCount = surface_caps.minImageCount; + pSurfaceCapabilities->maxImageCount = surface_caps.maxImageCount; + pSurfaceCapabilities->currentExtent = surface_caps.currentExtent; + pSurfaceCapabilities->minImageExtent = surface_caps.minImageExtent; + pSurfaceCapabilities->maxImageExtent = surface_caps.maxImageExtent; + pSurfaceCapabilities->maxImageArrayLayers = surface_caps.maxImageArrayLayers; + pSurfaceCapabilities->supportedTransforms = surface_caps.supportedTransforms; + pSurfaceCapabilities->currentTransform = surface_caps.currentTransform; + pSurfaceCapabilities->supportedCompositeAlpha = surface_caps.supportedCompositeAlpha; + pSurfaceCapabilities->supportedUsageFlags = surface_caps.supportedUsageFlags; + pSurfaceCapabilities->supportedSurfaceCounters = 0; + + if (pSurfaceCapabilities->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceCapabilities2EXT: Emulation found unrecognized structure type in " + "pSurfaceCapabilities->pNext - this struct will be ignored"); + } + + return res; + } +} + +// ---- VK_EXT_direct_mode_display extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL ReleaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->ReleaseDisplayEXT(unwrapped_phys_dev, display); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_ReleaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + if (icd_term->dispatch.ReleaseDisplayEXT == NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD \"%s\" associated with VkPhysicalDevice does not support vkReleaseDisplayEXT - Consequently, the call is " + "invalid because it should not be possible to acquire a display on this device", + icd_term->scanned_icd->lib_name); + } + return icd_term->dispatch.ReleaseDisplayEXT(phys_dev_term->phys_dev, display); +} + +// ---- VK_EXT_acquire_xlib_display extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +VKAPI_ATTR VkResult VKAPI_CALL AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display *dpy, VkDisplayKHR display) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->AcquireXlibDisplayEXT(unwrapped_phys_dev, dpy, display); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display *dpy, + VkDisplayKHR display) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + if (icd_term->dispatch.AcquireXlibDisplayEXT != NULL) { + // Pass the call to the driver + return icd_term->dispatch.AcquireXlibDisplayEXT(phys_dev_term->phys_dev, dpy, display); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkAcquireXLibDisplayEXT: Emulating call in ICD \"%s\" by returning error", icd_term->scanned_icd->lib_name); + + // Fail for the unsupported command + return VK_ERROR_INITIALIZATION_FAILED; + } +} + +VKAPI_ATTR VkResult VKAPI_CALL GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display *dpy, RROutput rrOutput, + VkDisplayKHR *pDisplay) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetRandROutputDisplayEXT(unwrapped_phys_dev, dpy, rrOutput, pDisplay); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display *dpy, RROutput rrOutput, + VkDisplayKHR *pDisplay) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + if (icd_term->dispatch.GetRandROutputDisplayEXT != NULL) { + // Pass the call to the driver + return icd_term->dispatch.GetRandROutputDisplayEXT(phys_dev_term->phys_dev, dpy, rrOutput, pDisplay); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetRandROutputDisplayEXT: Emulating call in ICD \"%s\" by returning null display", + icd_term->scanned_icd->lib_name); + + // Return a null handle to indicate this can't be done + *pDisplay = VK_NULL_HANDLE; + return VK_SUCCESS; + } +} + +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceSurfacePresentModes2EXT(unwrapped_phys_dev, pSurfaceInfo, pPresentModeCount, pPresentModes); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceSurfacePresentModes2EXT) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceSurfacePresentModes2EXT"); + } + VkIcdSurface *icd_surface = (VkIcdSurface *)(pSurfaceInfo->surface); + uint8_t icd_index = phys_dev_term->icd_index; + if (NULL != icd_surface->real_icd_surfaces && NULL != (void *)icd_surface->real_icd_surfaces[icd_index]) { + const VkPhysicalDeviceSurfaceInfo2KHR surface_info_copy = { + .sType = pSurfaceInfo->sType, + .pNext = pSurfaceInfo->pNext, + .surface = icd_surface->real_icd_surfaces[icd_index], + }; + return icd_term->dispatch.GetPhysicalDeviceSurfacePresentModes2EXT(phys_dev_term->phys_dev, &surface_info_copy, pPresentModeCount, pPresentModes); + } + return icd_term->dispatch.GetPhysicalDeviceSurfacePresentModes2EXT(phys_dev_term->phys_dev, pSurfaceInfo, pPresentModeCount, pPresentModes); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeviceGroupSurfacePresentModes2EXT(device, pSurfaceInfo, pModes); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.GetDeviceGroupSurfacePresentModes2EXT) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pSurfaceInfo->surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[icd_index]) { + const VkPhysicalDeviceSurfaceInfo2KHR surface_info_copy = { + .sType = pSurfaceInfo->sType, + .pNext = pSurfaceInfo->pNext, + .surface = icd_surface->real_icd_surfaces[icd_index], + }; + return icd_term->dispatch.GetDeviceGroupSurfacePresentModes2EXT(device, &surface_info_copy, pModes); + } + return icd_term->dispatch.GetDeviceGroupSurfacePresentModes2EXT(device, pSurfaceInfo, pModes); + } + return VK_SUCCESS; +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_EXT_tooling_info extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceToolPropertiesEXT(unwrapped_phys_dev, pToolCount, pToolProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties) { + return VK_SUCCESS; +} diff --git a/src/third_party/vulkan/loader/extension_manual.h b/src/third_party/vulkan/loader/extension_manual.h new file mode 100644 index 0000000..b60a799 --- /dev/null +++ b/src/third_party/vulkan/loader/extension_manual.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Young + */ + +#pragma once + +// ---- Manually added trampoline/terminator functions + +// These functions, for whatever reason, require more complex changes than +// can easily be automatically generated. + +VKAPI_ATTR VkResult VKAPI_CALL +GetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, + VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV *pExternalImageFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL +terminator_GetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, + VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV *pExternalImageFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL ReleaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_ReleaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +VKAPI_ATTR VkResult VKAPI_CALL AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, + VkDisplayKHR* pDisplay); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif // VK_USE_PLATFORM_WIN32_KHR + +VKAPI_ATTR VkResult VKAPI_CALL GetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +// ---- VK_EXT_tooling_info extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); diff --git a/src/third_party/vulkan/loader/gpa_helper.h b/src/third_party/vulkan/loader/gpa_helper.h new file mode 100644 index 0000000..01dec6f --- /dev/null +++ b/src/third_party/vulkan/loader/gpa_helper.h @@ -0,0 +1,249 @@ +/* + * + * Copyright (c) 2015-18, 2020 The Khronos Group Inc. + * Copyright (c) 2015-18, 2020 Valve Corporation + * Copyright (c) 2015-18, 2020 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Jon Ashburn + */ + +#include +#include "debug_utils.h" +#include "wsi.h" + +static inline void *trampolineGetProcAddr(struct loader_instance *inst, const char *funcName) { + // Don't include or check global functions + if (!strcmp(funcName, "vkGetInstanceProcAddr")) return vkGetInstanceProcAddr; + if (!strcmp(funcName, "vkDestroyInstance")) return vkDestroyInstance; + if (!strcmp(funcName, "vkEnumeratePhysicalDevices")) return vkEnumeratePhysicalDevices; + if (!strcmp(funcName, "vkGetPhysicalDeviceFeatures")) return vkGetPhysicalDeviceFeatures; + if (!strcmp(funcName, "vkGetPhysicalDeviceFormatProperties")) return vkGetPhysicalDeviceFormatProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceImageFormatProperties")) return vkGetPhysicalDeviceImageFormatProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceSparseImageFormatProperties")) return vkGetPhysicalDeviceSparseImageFormatProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceProperties")) return vkGetPhysicalDeviceProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceQueueFamilyProperties")) return vkGetPhysicalDeviceQueueFamilyProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties")) return vkGetPhysicalDeviceMemoryProperties; + if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties")) return vkEnumerateDeviceLayerProperties; + if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties")) return vkEnumerateDeviceExtensionProperties; + if (!strcmp(funcName, "vkCreateDevice")) return vkCreateDevice; + if (!strcmp(funcName, "vkGetDeviceProcAddr")) return vkGetDeviceProcAddr; + if (!strcmp(funcName, "vkDestroyDevice")) return vkDestroyDevice; + if (!strcmp(funcName, "vkGetDeviceQueue")) return vkGetDeviceQueue; + if (!strcmp(funcName, "vkQueueSubmit")) return vkQueueSubmit; + if (!strcmp(funcName, "vkQueueWaitIdle")) return vkQueueWaitIdle; + if (!strcmp(funcName, "vkDeviceWaitIdle")) return vkDeviceWaitIdle; + if (!strcmp(funcName, "vkAllocateMemory")) return vkAllocateMemory; + if (!strcmp(funcName, "vkFreeMemory")) return vkFreeMemory; + if (!strcmp(funcName, "vkMapMemory")) return vkMapMemory; + if (!strcmp(funcName, "vkUnmapMemory")) return vkUnmapMemory; + if (!strcmp(funcName, "vkFlushMappedMemoryRanges")) return vkFlushMappedMemoryRanges; + if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges")) return vkInvalidateMappedMemoryRanges; + if (!strcmp(funcName, "vkGetDeviceMemoryCommitment")) return vkGetDeviceMemoryCommitment; + if (!strcmp(funcName, "vkGetImageSparseMemoryRequirements")) return vkGetImageSparseMemoryRequirements; + if (!strcmp(funcName, "vkGetImageMemoryRequirements")) return vkGetImageMemoryRequirements; + if (!strcmp(funcName, "vkGetBufferMemoryRequirements")) return vkGetBufferMemoryRequirements; + if (!strcmp(funcName, "vkBindImageMemory")) return vkBindImageMemory; + if (!strcmp(funcName, "vkBindBufferMemory")) return vkBindBufferMemory; + if (!strcmp(funcName, "vkQueueBindSparse")) return vkQueueBindSparse; + if (!strcmp(funcName, "vkCreateFence")) return vkCreateFence; + if (!strcmp(funcName, "vkDestroyFence")) return vkDestroyFence; + if (!strcmp(funcName, "vkGetFenceStatus")) return vkGetFenceStatus; + if (!strcmp(funcName, "vkResetFences")) return vkResetFences; + if (!strcmp(funcName, "vkWaitForFences")) return vkWaitForFences; + if (!strcmp(funcName, "vkCreateSemaphore")) return vkCreateSemaphore; + if (!strcmp(funcName, "vkDestroySemaphore")) return vkDestroySemaphore; + if (!strcmp(funcName, "vkCreateEvent")) return vkCreateEvent; + if (!strcmp(funcName, "vkDestroyEvent")) return vkDestroyEvent; + if (!strcmp(funcName, "vkGetEventStatus")) return vkGetEventStatus; + if (!strcmp(funcName, "vkSetEvent")) return vkSetEvent; + if (!strcmp(funcName, "vkResetEvent")) return vkResetEvent; + if (!strcmp(funcName, "vkCreateQueryPool")) return vkCreateQueryPool; + if (!strcmp(funcName, "vkDestroyQueryPool")) return vkDestroyQueryPool; + if (!strcmp(funcName, "vkGetQueryPoolResults")) return vkGetQueryPoolResults; + if (!strcmp(funcName, "vkCreateBuffer")) return vkCreateBuffer; + if (!strcmp(funcName, "vkDestroyBuffer")) return vkDestroyBuffer; + if (!strcmp(funcName, "vkCreateBufferView")) return vkCreateBufferView; + if (!strcmp(funcName, "vkDestroyBufferView")) return vkDestroyBufferView; + if (!strcmp(funcName, "vkCreateImage")) return vkCreateImage; + if (!strcmp(funcName, "vkDestroyImage")) return vkDestroyImage; + if (!strcmp(funcName, "vkGetImageSubresourceLayout")) return vkGetImageSubresourceLayout; + if (!strcmp(funcName, "vkCreateImageView")) return vkCreateImageView; + if (!strcmp(funcName, "vkDestroyImageView")) return vkDestroyImageView; + if (!strcmp(funcName, "vkCreateShaderModule")) return vkCreateShaderModule; + if (!strcmp(funcName, "vkDestroyShaderModule")) return vkDestroyShaderModule; + if (!strcmp(funcName, "vkCreatePipelineCache")) return vkCreatePipelineCache; + if (!strcmp(funcName, "vkDestroyPipelineCache")) return vkDestroyPipelineCache; + if (!strcmp(funcName, "vkGetPipelineCacheData")) return vkGetPipelineCacheData; + if (!strcmp(funcName, "vkMergePipelineCaches")) return vkMergePipelineCaches; + if (!strcmp(funcName, "vkCreateGraphicsPipelines")) return vkCreateGraphicsPipelines; + if (!strcmp(funcName, "vkCreateComputePipelines")) return vkCreateComputePipelines; + if (!strcmp(funcName, "vkDestroyPipeline")) return vkDestroyPipeline; + if (!strcmp(funcName, "vkCreatePipelineLayout")) return vkCreatePipelineLayout; + if (!strcmp(funcName, "vkDestroyPipelineLayout")) return vkDestroyPipelineLayout; + if (!strcmp(funcName, "vkCreateSampler")) return vkCreateSampler; + if (!strcmp(funcName, "vkDestroySampler")) return vkDestroySampler; + if (!strcmp(funcName, "vkCreateDescriptorSetLayout")) return vkCreateDescriptorSetLayout; + if (!strcmp(funcName, "vkDestroyDescriptorSetLayout")) return vkDestroyDescriptorSetLayout; + if (!strcmp(funcName, "vkCreateDescriptorPool")) return vkCreateDescriptorPool; + if (!strcmp(funcName, "vkDestroyDescriptorPool")) return vkDestroyDescriptorPool; + if (!strcmp(funcName, "vkResetDescriptorPool")) return vkResetDescriptorPool; + if (!strcmp(funcName, "vkAllocateDescriptorSets")) return vkAllocateDescriptorSets; + if (!strcmp(funcName, "vkFreeDescriptorSets")) return vkFreeDescriptorSets; + if (!strcmp(funcName, "vkUpdateDescriptorSets")) return vkUpdateDescriptorSets; + if (!strcmp(funcName, "vkCreateFramebuffer")) return vkCreateFramebuffer; + if (!strcmp(funcName, "vkDestroyFramebuffer")) return vkDestroyFramebuffer; + if (!strcmp(funcName, "vkCreateRenderPass")) return vkCreateRenderPass; + if (!strcmp(funcName, "vkDestroyRenderPass")) return vkDestroyRenderPass; + if (!strcmp(funcName, "vkGetRenderAreaGranularity")) return vkGetRenderAreaGranularity; + if (!strcmp(funcName, "vkCreateCommandPool")) return vkCreateCommandPool; + if (!strcmp(funcName, "vkDestroyCommandPool")) return vkDestroyCommandPool; + if (!strcmp(funcName, "vkResetCommandPool")) return vkResetCommandPool; + if (!strcmp(funcName, "vkAllocateCommandBuffers")) return vkAllocateCommandBuffers; + if (!strcmp(funcName, "vkFreeCommandBuffers")) return vkFreeCommandBuffers; + if (!strcmp(funcName, "vkBeginCommandBuffer")) return vkBeginCommandBuffer; + if (!strcmp(funcName, "vkEndCommandBuffer")) return vkEndCommandBuffer; + if (!strcmp(funcName, "vkResetCommandBuffer")) return vkResetCommandBuffer; + if (!strcmp(funcName, "vkCmdBindPipeline")) return vkCmdBindPipeline; + if (!strcmp(funcName, "vkCmdBindDescriptorSets")) return vkCmdBindDescriptorSets; + if (!strcmp(funcName, "vkCmdBindVertexBuffers")) return vkCmdBindVertexBuffers; + if (!strcmp(funcName, "vkCmdBindIndexBuffer")) return vkCmdBindIndexBuffer; + if (!strcmp(funcName, "vkCmdSetViewport")) return vkCmdSetViewport; + if (!strcmp(funcName, "vkCmdSetScissor")) return vkCmdSetScissor; + if (!strcmp(funcName, "vkCmdSetLineWidth")) return vkCmdSetLineWidth; + if (!strcmp(funcName, "vkCmdSetDepthBias")) return vkCmdSetDepthBias; + if (!strcmp(funcName, "vkCmdSetBlendConstants")) return vkCmdSetBlendConstants; + if (!strcmp(funcName, "vkCmdSetDepthBounds")) return vkCmdSetDepthBounds; + if (!strcmp(funcName, "vkCmdSetStencilCompareMask")) return vkCmdSetStencilCompareMask; + if (!strcmp(funcName, "vkCmdSetStencilWriteMask")) return vkCmdSetStencilWriteMask; + if (!strcmp(funcName, "vkCmdSetStencilReference")) return vkCmdSetStencilReference; + if (!strcmp(funcName, "vkCmdDraw")) return vkCmdDraw; + if (!strcmp(funcName, "vkCmdDrawIndexed")) return vkCmdDrawIndexed; + if (!strcmp(funcName, "vkCmdDrawIndirect")) return vkCmdDrawIndirect; + if (!strcmp(funcName, "vkCmdDrawIndexedIndirect")) return vkCmdDrawIndexedIndirect; + if (!strcmp(funcName, "vkCmdDispatch")) return vkCmdDispatch; + if (!strcmp(funcName, "vkCmdDispatchIndirect")) return vkCmdDispatchIndirect; + if (!strcmp(funcName, "vkCmdCopyBuffer")) return vkCmdCopyBuffer; + if (!strcmp(funcName, "vkCmdCopyImage")) return vkCmdCopyImage; + if (!strcmp(funcName, "vkCmdBlitImage")) return vkCmdBlitImage; + if (!strcmp(funcName, "vkCmdCopyBufferToImage")) return vkCmdCopyBufferToImage; + if (!strcmp(funcName, "vkCmdCopyImageToBuffer")) return vkCmdCopyImageToBuffer; + if (!strcmp(funcName, "vkCmdUpdateBuffer")) return vkCmdUpdateBuffer; + if (!strcmp(funcName, "vkCmdFillBuffer")) return vkCmdFillBuffer; + if (!strcmp(funcName, "vkCmdClearColorImage")) return vkCmdClearColorImage; + if (!strcmp(funcName, "vkCmdClearDepthStencilImage")) return vkCmdClearDepthStencilImage; + if (!strcmp(funcName, "vkCmdClearAttachments")) return vkCmdClearAttachments; + if (!strcmp(funcName, "vkCmdResolveImage")) return vkCmdResolveImage; + if (!strcmp(funcName, "vkCmdSetEvent")) return vkCmdSetEvent; + if (!strcmp(funcName, "vkCmdResetEvent")) return vkCmdResetEvent; + if (!strcmp(funcName, "vkCmdWaitEvents")) return vkCmdWaitEvents; + if (!strcmp(funcName, "vkCmdPipelineBarrier")) return vkCmdPipelineBarrier; + if (!strcmp(funcName, "vkCmdBeginQuery")) return vkCmdBeginQuery; + if (!strcmp(funcName, "vkCmdEndQuery")) return vkCmdEndQuery; + if (!strcmp(funcName, "vkCmdResetQueryPool")) return vkCmdResetQueryPool; + if (!strcmp(funcName, "vkCmdWriteTimestamp")) return vkCmdWriteTimestamp; + if (!strcmp(funcName, "vkCmdCopyQueryPoolResults")) return vkCmdCopyQueryPoolResults; + if (!strcmp(funcName, "vkCmdPushConstants")) return vkCmdPushConstants; + if (!strcmp(funcName, "vkCmdBeginRenderPass")) return vkCmdBeginRenderPass; + if (!strcmp(funcName, "vkCmdNextSubpass")) return vkCmdNextSubpass; + if (!strcmp(funcName, "vkCmdEndRenderPass")) return vkCmdEndRenderPass; + if (!strcmp(funcName, "vkCmdExecuteCommands")) return vkCmdExecuteCommands; + + // Core 1.1 functions + if (!strcmp(funcName, "vkEnumeratePhysicalDeviceGroups")) return vkEnumeratePhysicalDeviceGroups; + if (!strcmp(funcName, "vkGetPhysicalDeviceFeatures2")) return vkGetPhysicalDeviceFeatures2; + if (!strcmp(funcName, "vkGetPhysicalDeviceProperties2")) return vkGetPhysicalDeviceProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceFormatProperties2")) return vkGetPhysicalDeviceFormatProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceImageFormatProperties2")) return vkGetPhysicalDeviceImageFormatProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceQueueFamilyProperties2")) return vkGetPhysicalDeviceQueueFamilyProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties2")) return vkGetPhysicalDeviceMemoryProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceSparseImageFormatProperties2")) + return vkGetPhysicalDeviceSparseImageFormatProperties2; + if (!strcmp(funcName, "vkGetPhysicalDeviceExternalBufferProperties")) return vkGetPhysicalDeviceExternalBufferProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceExternalSemaphoreProperties")) return vkGetPhysicalDeviceExternalSemaphoreProperties; + if (!strcmp(funcName, "vkGetPhysicalDeviceExternalFenceProperties")) return vkGetPhysicalDeviceExternalFenceProperties; + if (!strcmp(funcName, "vkBindBufferMemory2")) return vkBindBufferMemory2; + if (!strcmp(funcName, "vkBindImageMemory2")) return vkBindImageMemory2; + if (!strcmp(funcName, "vkGetDeviceGroupPeerMemoryFeatures")) return vkGetDeviceGroupPeerMemoryFeatures; + if (!strcmp(funcName, "vkCmdSetDeviceMask")) return vkCmdSetDeviceMask; + if (!strcmp(funcName, "vkCmdDispatchBase")) return vkCmdDispatchBase; + if (!strcmp(funcName, "vkGetImageMemoryRequirements2")) return vkGetImageMemoryRequirements2; + if (!strcmp(funcName, "vkTrimCommandPool")) return vkTrimCommandPool; + if (!strcmp(funcName, "vkGetDeviceQueue2")) return vkGetDeviceQueue2; + if (!strcmp(funcName, "vkCreateSamplerYcbcrConversion")) return vkCreateSamplerYcbcrConversion; + if (!strcmp(funcName, "vkDestroySamplerYcbcrConversion")) return vkDestroySamplerYcbcrConversion; + if (!strcmp(funcName, "vkGetDescriptorSetLayoutSupport")) return vkGetDescriptorSetLayoutSupport; + if (!strcmp(funcName, "vkCreateDescriptorUpdateTemplate")) return vkCreateDescriptorUpdateTemplate; + if (!strcmp(funcName, "vkDestroyDescriptorUpdateTemplate")) return vkDestroyDescriptorUpdateTemplate; + if (!strcmp(funcName, "vkUpdateDescriptorSetWithTemplate")) return vkUpdateDescriptorSetWithTemplate; + if (!strcmp(funcName, "vkGetImageSparseMemoryRequirements2")) return vkGetImageSparseMemoryRequirements2; + if (!strcmp(funcName, "vkGetBufferMemoryRequirements2")) return vkGetBufferMemoryRequirements2; + + // Core 1.2 functions + if (!strcmp(funcName, "vkCreateRenderPass2")) return vkCreateRenderPass2; + if (!strcmp(funcName, "vkCmdBeginRenderPass2")) return vkCmdBeginRenderPass2; + if (!strcmp(funcName, "vkCmdNextSubpass2")) return vkCmdNextSubpass2; + if (!strcmp(funcName, "vkCmdEndRenderPass2")) return vkCmdEndRenderPass2; + if (!strcmp(funcName, "vkCmdDrawIndirectCount")) return vkCmdDrawIndirectCount; + if (!strcmp(funcName, "vkCmdDrawIndexedIndirectCount")) return vkCmdDrawIndexedIndirectCount; + if (!strcmp(funcName, "vkGetSemaphoreCounterValue")) return vkGetSemaphoreCounterValue; + if (!strcmp(funcName, "vkWaitSemaphores")) return vkWaitSemaphores; + if (!strcmp(funcName, "vkSignalSemaphore")) return vkSignalSemaphore; + if (!strcmp(funcName, "vkGetBufferDeviceAddress")) return vkGetBufferDeviceAddress; + if (!strcmp(funcName, "vkGetBufferOpaqueCaptureAddress")) return vkGetBufferOpaqueCaptureAddress; + if (!strcmp(funcName, "vkGetDeviceMemoryOpaqueCaptureAddress")) return vkGetDeviceMemoryOpaqueCaptureAddress; + if (!strcmp(funcName, "vkResetQueryPool")) return vkResetQueryPool; + + // Instance extensions + void *addr; + if (debug_utils_InstanceGpa(inst, funcName, &addr)) return addr; + + if (wsi_swapchain_instance_gpa(inst, funcName, &addr)) return addr; + + if (extension_instance_gpa(inst, funcName, &addr)) return addr; + + // Unknown physical device extensions + if (loader_phys_dev_ext_gpa(inst, funcName, true, &addr, NULL)) return addr; + + // Unknown device extensions + addr = loader_dev_ext_gpa(inst, funcName); + return addr; +} + +static inline void *globalGetProcAddr(const char *name) { + if (!name || name[0] != 'v' || name[1] != 'k') return NULL; + + name += 2; + if (!strcmp(name, "CreateInstance")) return vkCreateInstance; + if (!strcmp(name, "EnumerateInstanceExtensionProperties")) return vkEnumerateInstanceExtensionProperties; + if (!strcmp(name, "EnumerateInstanceLayerProperties")) return vkEnumerateInstanceLayerProperties; + if (!strcmp(name, "EnumerateInstanceVersion")) return vkEnumerateInstanceVersion; + if (!strcmp(name, "GetInstanceProcAddr")) return vkGetInstanceProcAddr; + + return NULL; +} + +static inline void *loader_non_passthrough_gdpa(const char *name) { + if (!name || name[0] != 'v' || name[1] != 'k') return NULL; + + name += 2; + + if (!strcmp(name, "GetDeviceProcAddr")) return vkGetDeviceProcAddr; + if (!strcmp(name, "DestroyDevice")) return vkDestroyDevice; + if (!strcmp(name, "GetDeviceQueue")) return vkGetDeviceQueue; + if (!strcmp(name, "GetDeviceQueue2")) return vkGetDeviceQueue2; + if (!strcmp(name, "AllocateCommandBuffers")) return vkAllocateCommandBuffers; + + return NULL; +} diff --git a/src/third_party/vulkan/loader/loader.c b/src/third_party/vulkan/loader/loader.c new file mode 100644 index 0000000..c817aa3 --- /dev/null +++ b/src/third_party/vulkan/loader/loader.c @@ -0,0 +1,8751 @@ +/* + * + * Copyright (c) 2014-2020 The Khronos Group Inc. + * Copyright (c) 2014-2020 Valve Corporation + * Copyright (c) 2014-2020 LunarG, Inc. + * Copyright (C) 2015 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + + * + * Author: Jon Ashburn + * Author: Courtney Goeltzenleuchter + * Author: Mark Young + * Author: Lenny Komow + * + */ + +// This needs to be defined first, or else we'll get redefinitions on NTSTATUS values +#ifdef _WIN32 +#define UMDF_USING_NTSTATUS +#include +#endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include + +#if defined(__APPLE__) +#include +#include +#endif + +// Time related functions +#include + +#include +#if defined(_WIN32) +#include "dirent_on_windows.h" +#else // _WIN32 +#include +#endif // _WIN32 +#include "vk_loader_platform.h" +#include "loader.h" +#include "gpa_helper.h" +#include "debug_utils.h" +#include "wsi.h" +#include "../vk_icd.h" +#include "cJSON.h" +#include "murmurhash.h" + +#if defined(_WIN32) +#include +#include +#include +#include +#include +#ifdef __MINGW32__ +#undef strcpy // fix error with redfined strcpy when building with MinGW-w64 +#endif +#include +#include "adapters.h" + +typedef HRESULT (APIENTRY *PFN_CreateDXGIFactory1)(REFIID riid, void **ppFactory); +static PFN_CreateDXGIFactory1 fpCreateDXGIFactory1; +#endif + +// This is a CMake generated file with #defines for any functions/includes +// that it found present. This is currently necessary to properly determine +// if secure_getenv or __secure_getenv are present +#if !defined(VULKAN_NON_CMAKE_BUILD) +#include "loader_cmake_config.h" +#endif // !defined(VULKAN_NON_CMAKE_BUILD) + +// Generated file containing all the extension data +#include "vk_loader_extensions.c" + +// Environment Variable information +#define VK_ICD_FILENAMES_ENV_VAR "VK_ICD_FILENAMES" +#define VK_LAYER_PATH_ENV_VAR "VK_LAYER_PATH" + +// Override layer information +#define VK_OVERRIDE_LAYER_NAME "VK_LAYER_LUNARG_override" + +struct loader_struct loader = {0}; +// TLS for instance for alloc/free callbacks +THREAD_LOCAL_DECL struct loader_instance *tls_instance; + +static size_t loader_platform_combine_path(char *dest, size_t len, ...); + +struct loader_phys_dev_per_icd { + uint32_t count; + VkPhysicalDevice *phys_devs; + struct loader_icd_term *this_icd_term; +}; + +enum loader_debug { + LOADER_INFO_BIT = 0x01, + LOADER_WARN_BIT = 0x02, + LOADER_PERF_BIT = 0x04, + LOADER_ERROR_BIT = 0x08, + LOADER_DEBUG_BIT = 0x10, +}; + +uint32_t g_loader_debug = 0; +uint32_t g_loader_log_msgs = 0; + +enum loader_data_files_type { + LOADER_DATA_FILE_MANIFEST_ICD = 0, + LOADER_DATA_FILE_MANIFEST_LAYER, + LOADER_DATA_FILE_NUM_TYPES // Not a real field, used for possible loop terminator +}; + +// thread safety lock for accessing global data structures such as "loader" +// all entrypoints on the instance chain need to be locked except GPA +// additionally CreateDevice and DestroyDevice needs to be locked +loader_platform_thread_mutex loader_lock; +loader_platform_thread_mutex loader_json_lock; +loader_platform_thread_mutex loader_preload_icd_lock; + +// A list of ICDs that gets initialized when the loader does its global initialization. This list should never be used by anything +// other than EnumerateInstanceExtensionProperties(), vkDestroyInstance, and loader_release(). This list does not change +// functionality, but the fact that the libraries already been loaded causes any call that needs to load ICD libraries to speed up +// significantly. This can have a huge impact when making repeated calls to vkEnumerateInstanceExtensionProperties and +// vkCreateInstance. +static struct loader_icd_tramp_list scanned_icds; + +LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_init); + +void *loader_instance_heap_alloc(const struct loader_instance *instance, size_t size, VkSystemAllocationScope alloc_scope) { + void *pMemory = NULL; +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (instance && instance->alloc_callbacks.pfnAllocation) { + // These are internal structures, so it's best to align everything to + // the largest unit size which is the size of a uint64_t. + pMemory = instance->alloc_callbacks.pfnAllocation(instance->alloc_callbacks.pUserData, size, sizeof(uint64_t), alloc_scope); + } else { +#endif + pMemory = malloc(size); + } + + return pMemory; +} + +void loader_instance_heap_free(const struct loader_instance *instance, void *pMemory) { + if (pMemory != NULL) { +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (instance && instance->alloc_callbacks.pfnFree) { + instance->alloc_callbacks.pfnFree(instance->alloc_callbacks.pUserData, pMemory); + } else { +#endif + free(pMemory); + } + } +} + +void *loader_instance_heap_realloc(const struct loader_instance *instance, void *pMemory, size_t orig_size, size_t size, + VkSystemAllocationScope alloc_scope) { + void *pNewMem = NULL; + if (pMemory == NULL || orig_size == 0) { + pNewMem = loader_instance_heap_alloc(instance, size, alloc_scope); + } else if (size == 0) { + loader_instance_heap_free(instance, pMemory); +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) +#else + } else if (instance && instance->alloc_callbacks.pfnReallocation) { + // These are internal structures, so it's best to align everything to + // the largest unit size which is the size of a uint64_t. + pNewMem = instance->alloc_callbacks.pfnReallocation(instance->alloc_callbacks.pUserData, pMemory, size, sizeof(uint64_t), + alloc_scope); +#endif + } else { + pNewMem = realloc(pMemory, size); + } + return pNewMem; +} + +void *loader_instance_tls_heap_alloc(size_t size) { + return loader_instance_heap_alloc(tls_instance, size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); +} + +void loader_instance_tls_heap_free(void *pMemory) { loader_instance_heap_free(tls_instance, pMemory); } + +void *loader_device_heap_alloc(const struct loader_device *device, size_t size, VkSystemAllocationScope alloc_scope) { + void *pMemory = NULL; +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (device && device->alloc_callbacks.pfnAllocation) { + // These are internal structures, so it's best to align everything to + // the largest unit size which is the size of a uint64_t. + pMemory = device->alloc_callbacks.pfnAllocation(device->alloc_callbacks.pUserData, size, sizeof(uint64_t), alloc_scope); + } else { +#endif + pMemory = malloc(size); + } + return pMemory; +} + +void loader_device_heap_free(const struct loader_device *device, void *pMemory) { + if (pMemory != NULL) { +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (device && device->alloc_callbacks.pfnFree) { + device->alloc_callbacks.pfnFree(device->alloc_callbacks.pUserData, pMemory); + } else { +#endif + free(pMemory); + } + } +} + +void *loader_device_heap_realloc(const struct loader_device *device, void *pMemory, size_t orig_size, size_t size, + VkSystemAllocationScope alloc_scope) { + void *pNewMem = NULL; + if (pMemory == NULL || orig_size == 0) { + pNewMem = loader_device_heap_alloc(device, size, alloc_scope); + } else if (size == 0) { + loader_device_heap_free(device, pMemory); +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) +#else + } else if (device && device->alloc_callbacks.pfnReallocation) { + // These are internal structures, so it's best to align everything to + // the largest unit size which is the size of a uint64_t. + pNewMem = device->alloc_callbacks.pfnReallocation(device->alloc_callbacks.pUserData, pMemory, size, sizeof(uint64_t), + alloc_scope); +#endif + } else { + pNewMem = realloc(pMemory, size); + } + return pNewMem; +} + +// Environment variables +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) + +static inline bool IsHighIntegrity() { + return geteuid() != getuid() || getegid() != getgid(); +} + +static inline char *loader_getenv(const char *name, const struct loader_instance *inst) { + // No allocation of memory necessary for Linux, but we should at least touch + // the inst pointer to get rid of compiler warnings. + (void)inst; + return getenv(name); +} + +static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) { + char *out; +#if defined(__APPLE__) + // Apple does not appear to have a secure getenv implementation. + // The main difference between secure getenv and getenv is that secure getenv + // returns NULL if the process is being run with elevated privileges by a normal user. + // The idea is to prevent the reading of malicious environment variables by a process + // that can do damage. + // This algorithm is derived from glibc code that sets an internal + // variable (__libc_enable_secure) if the process is running under setuid or setgid. + return IsHighIntegrity() ? NULL : loader_getenv(name, inst); +#elif defined(__Fuchsia__) + return loader_getenv(name, inst); +#else +// Linux +#if defined(HAVE_SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH) + (void)inst; + out = secure_getenv(name); +#elif defined(HAVE___SECURE_GETENV) && !defined(USE_UNSAFE_FILE_SEARCH) + (void)inst; + out = __secure_getenv(name); +#else + out = loader_getenv(name, inst); +#if !defined(USE_UNSAFE_FILE_SEARCH) + loader_log(inst, LOADER_INFO_BIT, 0, "Loader is using non-secure environment variable lookup for %s", name); +#endif +#endif + return out; +#endif +} + +static inline void loader_free_getenv(char *val, const struct loader_instance *inst) { + // No freeing of memory necessary for Linux, but we should at least touch + // the val and inst pointers to get rid of compiler warnings. + (void)val; + (void)inst; +} + +#elif defined(WIN32) + +static inline bool IsHighIntegrity() { + HANDLE process_token; + if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY | TOKEN_QUERY_SOURCE, &process_token)) { + // Maximum possible size of SID_AND_ATTRIBUTES is maximum size of a SID + size of attributes DWORD. + uint8_t mandatory_label_buffer[SECURITY_MAX_SID_SIZE + sizeof(DWORD)]; + DWORD buffer_size; + if (GetTokenInformation(process_token, TokenIntegrityLevel, mandatory_label_buffer, sizeof(mandatory_label_buffer), + &buffer_size) != 0) { + const TOKEN_MANDATORY_LABEL *mandatory_label = (const TOKEN_MANDATORY_LABEL *)mandatory_label_buffer; + const DWORD sub_authority_count = *GetSidSubAuthorityCount(mandatory_label->Label.Sid); + const DWORD integrity_level = *GetSidSubAuthority(mandatory_label->Label.Sid, sub_authority_count - 1); + + CloseHandle(process_token); + return integrity_level > SECURITY_MANDATORY_MEDIUM_RID; + } + + CloseHandle(process_token); + } + + return false; +} + +static inline char *loader_getenv(const char *name, const struct loader_instance *inst) { + char *retVal; + DWORD valSize; + + valSize = GetEnvironmentVariableA(name, NULL, 0); + + // valSize DOES include the null terminator, so for any set variable + // will always be at least 1. If it's 0, the variable wasn't set. + if (valSize == 0) return NULL; + + // Allocate the space necessary for the registry entry + if (NULL != inst && NULL != inst->alloc_callbacks.pfnAllocation) { + retVal = (char *)inst->alloc_callbacks.pfnAllocation(inst->alloc_callbacks.pUserData, valSize, sizeof(char *), + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + } else { + retVal = (char *)malloc(valSize); + } + + if (NULL != retVal) { + GetEnvironmentVariableA(name, retVal, valSize); + } + + return retVal; +} + +static inline char *loader_secure_getenv(const char *name, const struct loader_instance *inst) { +#if !defined(USE_UNSAFE_FILE_SEARCH) + if (IsHighIntegrity()) { + loader_log(inst, LOADER_INFO_BIT, 0, "Loader is running with elevated permissions. Environment variable %s will be ignored", + name); + return NULL; + } +#endif + + return loader_getenv(name, inst); +} + +static inline void loader_free_getenv(char *val, const struct loader_instance *inst) { + if (NULL != inst && NULL != inst->alloc_callbacks.pfnFree) { + inst->alloc_callbacks.pfnFree(inst->alloc_callbacks.pUserData, val); + } else { + free((void *)val); + } +} + +#else + +static inline char *loader_getenv(const char *name, const struct loader_instance *inst) { + // stub func + (void)inst; + (void)name; + return NULL; +} +static inline void loader_free_getenv(char *val, const struct loader_instance *inst) { + // stub func + (void)val; + (void)inst; +} + +#endif + +void loader_log(const struct loader_instance *inst, VkFlags msg_type, int32_t msg_code, const char *format, ...) { + char msg[512]; + char cmd_line_msg[512]; + size_t cmd_line_size = sizeof(cmd_line_msg); + va_list ap; + int ret; + + va_start(ap, format); + ret = vsnprintf(msg, sizeof(msg), format, ap); + if ((ret >= (int)sizeof(msg)) || ret < 0) { + msg[sizeof(msg) - 1] = '\0'; + } + va_end(ap); + + if (inst) { + VkDebugUtilsMessageSeverityFlagBitsEXT severity = 0; + VkDebugUtilsMessageTypeFlagsEXT type; + VkDebugUtilsMessengerCallbackDataEXT callback_data; + VkDebugUtilsObjectNameInfoEXT object_name; + + if ((msg_type & LOADER_INFO_BIT) != 0) { + severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT; + } else if ((msg_type & LOADER_WARN_BIT) != 0) { + severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT; + } else if ((msg_type & LOADER_ERROR_BIT) != 0) { + severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + } else if ((msg_type & LOADER_DEBUG_BIT) != 0) { + severity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT; + } + + if ((msg_type & LOADER_PERF_BIT) != 0) { + type = VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + } else { + type = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT; + } + + callback_data.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT; + callback_data.pNext = NULL; + callback_data.flags = 0; + callback_data.pMessageIdName = "Loader Message"; + callback_data.messageIdNumber = 0; + callback_data.pMessage = msg; + callback_data.queueLabelCount = 0; + callback_data.pQueueLabels = NULL; + callback_data.cmdBufLabelCount = 0; + callback_data.pCmdBufLabels = NULL; + callback_data.objectCount = 1; + callback_data.pObjects = &object_name; + object_name.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; + object_name.pNext = NULL; + object_name.objectType = VK_OBJECT_TYPE_INSTANCE; + object_name.objectHandle = (uint64_t)(uintptr_t)inst; + object_name.pObjectName = NULL; + + util_SubmitDebugUtilsMessageEXT(inst, severity, type, &callback_data); + } + + if (!(msg_type & g_loader_log_msgs)) { + return; + } + + cmd_line_msg[0] = '\0'; + cmd_line_size -= 1; + size_t original_size = cmd_line_size; + + if ((msg_type & LOADER_INFO_BIT) != 0) { + strncat(cmd_line_msg, "INFO", cmd_line_size); + cmd_line_size -= 4; + } + if ((msg_type & LOADER_WARN_BIT) != 0) { + if (cmd_line_size != original_size) { + strncat(cmd_line_msg, " | ", cmd_line_size); + cmd_line_size -= 3; + } + strncat(cmd_line_msg, "WARNING", cmd_line_size); + cmd_line_size -= 7; + } + if ((msg_type & LOADER_PERF_BIT) != 0) { + if (cmd_line_size != original_size) { + strncat(cmd_line_msg, " | ", cmd_line_size); + cmd_line_size -= 3; + } + strncat(cmd_line_msg, "PERF", cmd_line_size); + cmd_line_size -= 4; + } + if ((msg_type & LOADER_ERROR_BIT) != 0) { + if (cmd_line_size != original_size) { + strncat(cmd_line_msg, " | ", cmd_line_size); + cmd_line_size -= 3; + } + strncat(cmd_line_msg, "ERROR", cmd_line_size); + cmd_line_size -= 5; + } + if ((msg_type & LOADER_DEBUG_BIT) != 0) { + if (cmd_line_size != original_size) { + strncat(cmd_line_msg, " | ", cmd_line_size); + cmd_line_size -= 3; + } + strncat(cmd_line_msg, "DEBUG", cmd_line_size); + cmd_line_size -= 5; + } + if (cmd_line_size != original_size) { + strncat(cmd_line_msg, ": ", cmd_line_size); + cmd_line_size -= 2; + } + + if (0 < cmd_line_size) { + // If the message is too long, trim it down + if (strlen(msg) > cmd_line_size) { + msg[cmd_line_size - 1] = '\0'; + } + strncat(cmd_line_msg, msg, cmd_line_size); + } else { + // Shouldn't get here, but check to make sure if we've already overrun + // the string boundary + assert(false); + } + +#if defined(WIN32) + OutputDebugString(cmd_line_msg); + OutputDebugString("\n"); +#endif + + fputs(cmd_line_msg, stderr); + fputc('\n', stderr); +} + +VKAPI_ATTR VkResult VKAPI_CALL vkSetInstanceDispatch(VkInstance instance, void *object) { + struct loader_instance *inst = loader_get_instance(instance); + if (!inst) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkSetInstanceDispatch: Can not retrieve Instance " + "dispatch table."); + return VK_ERROR_INITIALIZATION_FAILED; + } + loader_set_dispatch(object, inst->disp); + return VK_SUCCESS; +} + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDeviceDispatch(VkDevice device, void *object) { + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL); + + if (NULL == icd_term) { + return VK_ERROR_INITIALIZATION_FAILED; + } + loader_set_dispatch(object, &dev->loader_dispatch); + return VK_SUCCESS; +} + +#if defined(_WIN32) + +// Append the JSON path data to the list and allocate/grow the list if it's not large enough. +// Function returns true if filename was appended to reg_data list. +// Caller should free reg_data. +static bool loaderAddJsonEntry(const struct loader_instance *inst, + char **reg_data, // list of JSON files + PDWORD total_size, // size of reg_data + LPCSTR key_name, // key name - used for debug prints - i.e. VulkanDriverName + DWORD key_type, // key data type + LPSTR json_path, // JSON string to add to the list reg_data + DWORD json_size, // size in bytes of json_path + VkResult *result) { + // Check for and ignore duplicates. + if (*reg_data && strstr(*reg_data, json_path)) { + // Success. The json_path is already in the list. + return true; + } + + if (NULL == *reg_data) { + *reg_data = loader_instance_heap_alloc(inst, *total_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == *reg_data) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderAddJsonEntry: Failed to allocate space for registry data for key %s", json_path); + *result = VK_ERROR_OUT_OF_HOST_MEMORY; + return false; + } + *reg_data[0] = '\0'; + } else if (strlen(*reg_data) + json_size + 1 > *total_size) { + void *new_ptr = + loader_instance_heap_realloc(inst, *reg_data, *total_size, *total_size * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderAddJsonEntry: Failed to reallocate space for registry value of size %d for key %s", *total_size * 2, + json_path); + *result = VK_ERROR_OUT_OF_HOST_MEMORY; + return false; + } + *reg_data = new_ptr; + *total_size *= 2; + } + + for (char *curr_filename = json_path; curr_filename[0] != '\0'; curr_filename += strlen(curr_filename) + 1) { + if (strlen(*reg_data) == 0) { + (void)snprintf(*reg_data, json_size + 1, "%s", curr_filename); + } else { + (void)snprintf(*reg_data + strlen(*reg_data), json_size + 2, "%c%s", PATH_SEPARATOR, curr_filename); + } + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "%s: Located json file \"%s\" from PnP registry: %s", __FUNCTION__, + curr_filename, key_name); + + if (key_type == REG_SZ) { + break; + } + } + return true; +} + +// Find the list of registry files (names VulkanDriverName/VulkanDriverNameWow) in hkr. +// +// This function looks for filename in given device handle, filename is then added to return list +// function return true if filename was appended to reg_data list +// If error occurs result is updated with failure reason +bool loaderGetDeviceRegistryEntry(const struct loader_instance *inst, char **reg_data, PDWORD total_size, DEVINST dev_id, + LPCSTR value_name, VkResult *result) { + HKEY hkrKey = INVALID_HANDLE_VALUE; + DWORD requiredSize, data_type; + char *manifest_path = NULL; + bool found = false; + + if (NULL == total_size || NULL == reg_data) { + *result = VK_ERROR_INITIALIZATION_FAILED; + return false; + } + + CONFIGRET status = CM_Open_DevNode_Key(dev_id, KEY_QUERY_VALUE, 0, RegDisposition_OpenExisting, &hkrKey, CM_REGISTRY_SOFTWARE); + if (status != CR_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: Failed to open registry key for DeviceID(%d)", dev_id); + *result = VK_ERROR_INITIALIZATION_FAILED; + return false; + } + + // query value + LSTATUS ret = RegQueryValueEx( + hkrKey, + value_name, + NULL, + NULL, + NULL, + &requiredSize); + + if (ret != ERROR_SUCCESS) { + if (ret == ERROR_FILE_NOT_FOUND) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: Device ID(%d) Does not contain a value for \"%s\"", dev_id, value_name); + } else { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: DeviceID(%d) Failed to obtain %s size", dev_id, value_name); + } + goto out; + } + + manifest_path = loader_instance_heap_alloc(inst, requiredSize, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (manifest_path == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: Failed to allocate space for DriverName."); + *result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + ret = RegQueryValueEx( + hkrKey, + value_name, + NULL, + &data_type, + (BYTE *)manifest_path, + &requiredSize + ); + + if (ret != ERROR_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: DeviceID(%d) Failed to obtain %s", value_name); + + *result = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + if (data_type != REG_SZ && data_type != REG_MULTI_SZ) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetDeviceRegistryEntry: Invalid %s data type. Expected REG_SZ or REG_MULTI_SZ.", value_name); + *result = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + found = loaderAddJsonEntry(inst, reg_data, total_size, value_name, data_type, manifest_path, requiredSize, result); + +out: + if (manifest_path != NULL) { + loader_instance_heap_free(inst, manifest_path); + } + RegCloseKey(hkrKey); + return found; +} + +// Find the list of registry files (names VulkanDriverName/VulkanDriverNameWow) in hkr . +// +// This function looks for display devices and childish software components +// for a list of files which are added to a returned list (function return +// value). +// Function return is a string with a ';' separated list of filenames. +// Function return is NULL if no valid name/value pairs are found in the key, +// or the key is not found. +// +// *reg_data contains a string list of filenames as pointer. +// When done using the returned string list, the caller should free the pointer. +VkResult loaderGetDeviceRegistryFiles(const struct loader_instance *inst, char **reg_data, PDWORD reg_data_size, + LPCSTR value_name) { + static const wchar_t *softwareComponentGUID = L"{5c4c3332-344d-483c-8739-259e934c9cc8}"; + static const wchar_t *displayGUID = L"{4d36e968-e325-11ce-bfc1-08002be10318}"; +#ifdef CM_GETIDLIST_FILTER_PRESENT + const ULONG flags = CM_GETIDLIST_FILTER_CLASS | CM_GETIDLIST_FILTER_PRESENT; +#else + const ULONG flags = 0x300; +#endif + + wchar_t childGuid[MAX_GUID_STRING_LEN + 2]; // +2 for brackets {} + ULONG childGuidSize = sizeof(childGuid); + + DEVINST devID = 0, childID = 0; + wchar_t *pDeviceNames = NULL; + ULONG deviceNamesSize = 0; + VkResult result = VK_SUCCESS; + bool found = false; + + if (NULL == reg_data) { + result = VK_ERROR_INITIALIZATION_FAILED; + return result; + } + + // if after obtaining the DeviceNameSize, new device is added start over + do { + CM_Get_Device_ID_List_SizeW(&deviceNamesSize, displayGUID, flags); + + if (pDeviceNames != NULL) { + loader_instance_heap_free(inst, pDeviceNames); + } + + pDeviceNames = loader_instance_heap_alloc(inst, deviceNamesSize * sizeof(wchar_t), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (pDeviceNames == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: Failed to allocate space for display device names."); + result = VK_ERROR_OUT_OF_HOST_MEMORY; + return result; + } + } while (CM_Get_Device_ID_ListW(displayGUID, pDeviceNames, deviceNamesSize, flags) == CR_BUFFER_SMALL); + + if (pDeviceNames) { + for (wchar_t *deviceName = pDeviceNames; *deviceName; deviceName += wcslen(deviceName) + 1) { + CONFIGRET status = CM_Locate_DevNodeW(&devID, deviceName, CM_LOCATE_DEVNODE_NORMAL); + if (CR_SUCCESS != status) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: failed to open DevNode %ls", + deviceName); + continue; + } + ULONG ulStatus, ulProblem; + status = CM_Get_DevNode_Status(&ulStatus, &ulProblem, devID, 0); + + if (CR_SUCCESS != status) + { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: failed to probe device status %ls", + deviceName); + continue; + } + if ((ulStatus & DN_HAS_PROBLEM) && (ulProblem == CM_PROB_NEED_RESTART || ulProblem == DN_NEED_RESTART)) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: device %ls is pending reboot, skipping ...", deviceName); + continue; + } + + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "loaderGetDeviceRegistryFiles: opening device %ls", deviceName); + + if (loaderGetDeviceRegistryEntry(inst, reg_data, reg_data_size, devID, value_name, &result)) { + found = true; + continue; + } + else if (result == VK_ERROR_OUT_OF_HOST_MEMORY) { + break; + } + + status = CM_Get_Child(&childID, devID, 0); + if (status != CR_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: unable to open child-device error:%d", status); + continue; + } + + do { + wchar_t buffer[MAX_DEVICE_ID_LEN]; + CM_Get_Device_IDW(childID, buffer, MAX_DEVICE_ID_LEN, 0); + + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: Opening child device %d - %ls", childID, buffer); + + status = CM_Get_DevNode_Registry_PropertyW(childID, CM_DRP_CLASSGUID, NULL, &childGuid, &childGuidSize, 0); + if (status != CR_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: unable to obtain GUID for:%d error:%d", childID, status); + + result = VK_ERROR_INITIALIZATION_FAILED; + continue; + } + + if (wcscmp(childGuid, softwareComponentGUID) != 0) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "loaderGetDeviceRegistryFiles: GUID for %d is not SoftwareComponent skipping", childID); + continue; + } + + if (loaderGetDeviceRegistryEntry(inst, reg_data, reg_data_size, childID, value_name, &result)) { + found = true; + break; // check next-display-device + } + + } while (CM_Get_Sibling(&childID, childID, 0) == CR_SUCCESS); + } + + loader_instance_heap_free(inst, pDeviceNames); + } + + if (!found && result != VK_ERROR_OUT_OF_HOST_MEMORY) { + result = VK_ERROR_INITIALIZATION_FAILED; + } + + return result; +} + +static char *loader_get_next_path(char *path); + +// Find the list of registry files (names within a key) in key "location". +// +// This function looks in the registry (hive = DEFAULT_VK_REGISTRY_HIVE) key as +// given in "location" +// for a list or name/values which are added to a returned list (function return +// value). +// The DWORD values within the key must be 0 or they are skipped. +// Function return is a string with a ';' separated list of filenames. +// Function return is NULL if no valid name/value pairs are found in the key, +// or the key is not found. +// +// *reg_data contains a string list of filenames as pointer. +// When done using the returned string list, the caller should free the pointer. +VkResult loaderGetRegistryFiles(const struct loader_instance *inst, char *location, bool use_secondary_hive, char **reg_data, + PDWORD reg_data_size) { + // This list contains all of the allowed ICDs. This allows us to verify that a device is actually present from the vendor + // specified. This does disallow other vendors, but any new driver should use the device-specific registries anyway. + static const struct { + const char *filename; + int vendor_id; + } known_drivers[] = { +#if defined(_WIN64) + { + .filename = "igvk64.json", + .vendor_id = 0x8086, + }, + { + .filename = "nv-vk64.json", + .vendor_id = 0x10de, + }, + { + .filename = "amd-vulkan64.json", + .vendor_id = 0x1002, + }, + { + .filename = "amdvlk64.json", + .vendor_id = 0x1002, + }, +#else + { + .filename = "igvk32.json", + .vendor_id = 0x8086, + }, + { + .filename = "nv-vk32.json", + .vendor_id = 0x10de, + }, + { + .filename = "amd-vulkan32.json", + .vendor_id = 0x1002, + }, + { + .filename = "amdvlk32.json", + .vendor_id = 0x1002, + }, +#endif + }; + + LONG rtn_value; + HKEY hive = DEFAULT_VK_REGISTRY_HIVE, key; + DWORD access_flags; + char name[2048]; + char *loc = location; + char *next; + DWORD name_size = sizeof(name); + DWORD value; + DWORD value_size = sizeof(value); + VkResult result = VK_SUCCESS; + bool found = false; + IDXGIFactory1 *dxgi_factory = NULL; + bool is_driver = !strcmp(location, VK_DRIVERS_INFO_REGISTRY_LOC); + + if (NULL == reg_data) { + result = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + if (is_driver) { + HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory1, (void **)&dxgi_factory); + if (hres != S_OK) { + loader_log( + inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderGetRegistryFiles: Failed to create dxgi factory for ICD registry verification. No ICDs will be added from " + "legacy registry locations"); + goto out; + } + } + + while (*loc) { + next = loader_get_next_path(loc); + access_flags = KEY_QUERY_VALUE; + rtn_value = RegOpenKeyEx(hive, loc, 0, access_flags, &key); + if (ERROR_SUCCESS == rtn_value) { + for (DWORD idx = 0; + (rtn_value = RegEnumValue(key, idx++, name, &name_size, NULL, NULL, (LPBYTE)&value, &value_size)) == ERROR_SUCCESS; + name_size = sizeof(name), value_size = sizeof(value)) { + if (value_size == sizeof(value) && value == 0) { + if (NULL == *reg_data) { + *reg_data = loader_instance_heap_alloc(inst, *reg_data_size, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == *reg_data) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetRegistryFiles: Failed to allocate space for registry data for key %s", name); + RegCloseKey(key); + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + *reg_data[0] = '\0'; + } else if (strlen(*reg_data) + name_size + 1 > *reg_data_size) { + void *new_ptr = loader_instance_heap_realloc(inst, *reg_data, *reg_data_size, *reg_data_size * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log( + inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetRegistryFiles: Failed to reallocate space for registry value of size %d for key %s", + *reg_data_size * 2, name); + RegCloseKey(key); + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + *reg_data = new_ptr; + *reg_data_size *= 2; + } + + // We've now found a json file. If this is an ICD, we still need to check if there is actually a device + // that matches this ICD + loader_log( + inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Located json file \"%s\" from registry \"%s\\%s\"", name, + hive == DEFAULT_VK_REGISTRY_HIVE ? DEFAULT_VK_REGISTRY_HIVE_STR : SECONDARY_VK_REGISTRY_HIVE_STR, location); + if (is_driver) { + int i; + for (i = 0; i < sizeof(known_drivers) / sizeof(known_drivers[0]); ++i) { + if (!strcmp(name + strlen(name) - strlen(known_drivers[i].filename), known_drivers[i].filename)) { + break; + } + } + if (i == sizeof(known_drivers) / sizeof(known_drivers[0])) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Driver %s is not recognized as a known driver. It will be assumed to be active", name); + } else { + bool found_gpu = false; + for (int j = 0;; ++j) { + IDXGIAdapter1 *adapter; + HRESULT hres = dxgi_factory->lpVtbl->EnumAdapters1(dxgi_factory, j, &adapter); + if (hres == DXGI_ERROR_NOT_FOUND) { + break; + } else if (hres != S_OK) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Failed to enumerate DXGI adapters at index %d. As a result, drivers may be skipped", j); + continue; + } + + DXGI_ADAPTER_DESC1 description; + hres = adapter->lpVtbl->GetDesc1(adapter, &description); + if (hres != S_OK) { + loader_log( + inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Failed to get DXGI adapter information at index %d. As a result, drivers may be skipped", j); + continue; + } + + if (description.VendorId == known_drivers[i].vendor_id) { + found_gpu = true; + break; + } + } + + if (!found_gpu) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Dropping driver %s as no corresponding DXGI adapter was found", name); + continue; + } + } + } + + if (strlen(*reg_data) == 0) { + // The list is emtpy. Add the first entry. + (void)snprintf(*reg_data, name_size + 1, "%s", name); + found = true; + } else { + // At this point the reg_data variable contains other JSON paths, likely from the PNP/device section + // of the registry that we want to have precedence over this non-device specific section of the registry. + // To make sure we avoid enumerating old JSON files/drivers that might be present in the non-device specific + // area of the registry when a newer device specific JSON file is present, do a check before adding. + // Find the file name, without path, of the JSON file found in the non-device specific registry location. + // If the same JSON file name is already found in the list, don't add it again. + bool foundDuplicate = false; + char *pLastSlashName = strrchr(name, '\\'); + if (pLastSlashName != NULL) { + char *foundMatch = strstr(*reg_data, pLastSlashName + 1); + if (foundMatch != NULL) { + foundDuplicate = true; + } + } + + if (foundDuplicate == false) { + // Add the new entry to the list. + (void)snprintf(*reg_data + strlen(*reg_data), name_size + 2, "%c%s", PATH_SEPARATOR, name); + found = true; + } else { + loader_log( + inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Skipping adding of json file \"%s\" from registry \"%s\\%s\" to the list due to duplication", name, + hive == DEFAULT_VK_REGISTRY_HIVE ? DEFAULT_VK_REGISTRY_HIVE_STR : SECONDARY_VK_REGISTRY_HIVE_STR, + location); + } + } + } + } + RegCloseKey(key); + } + + // Advance the location - if the next location is in the secondary hive, then reset the locations and advance the hive + if (use_secondary_hive && (hive == DEFAULT_VK_REGISTRY_HIVE) && (*next == '\0')) { + loc = location; + hive = SECONDARY_VK_REGISTRY_HIVE; + } else { + loc = next; + } + } + + if (!found && result != VK_ERROR_OUT_OF_HOST_MEMORY) { + result = VK_ERROR_INITIALIZATION_FAILED; + } + +out: + if (is_driver && dxgi_factory != NULL) { + dxgi_factory->lpVtbl->Release(dxgi_factory); + } + + return result; +} + +#endif // WIN32 + +// Combine path elements, separating each element with the platform-specific +// directory separator, and save the combined string to a destination buffer, +// not exceeding the given length. Path elements are given as variable args, +// with a NULL element terminating the list. +// +// \returns the total length of the combined string, not including an ASCII +// NUL termination character. This length may exceed the available storage: +// in this case, the written string will be truncated to avoid a buffer +// overrun, and the return value will greater than or equal to the storage +// size. A NULL argument may be provided as the destination buffer in order +// to determine the required string length without actually writing a string. +static size_t loader_platform_combine_path(char *dest, size_t len, ...) { + size_t required_len = 0; + va_list ap; + const char *component; + + va_start(ap, len); + + while ((component = va_arg(ap, const char *))) { + if (required_len > 0) { + // This path element is not the first non-empty element; prepend + // a directory separator if space allows + if (dest && required_len + 1 < len) { + (void)snprintf(dest + required_len, len - required_len, "%c", DIRECTORY_SYMBOL); + } + required_len++; + } + + if (dest && required_len < len) { + strncpy(dest + required_len, component, len - required_len); + } + required_len += strlen(component); + } + + va_end(ap); + + // strncpy(3) won't add a NUL terminating byte in the event of truncation. + if (dest && required_len >= len) { + dest[len - 1] = '\0'; + } + + return required_len; +} + +// Given string of three part form "maj.min.pat" convert to a vulkan version number. +static uint32_t loader_make_version(char *vers_str) { + uint32_t major = 0, minor = 0, patch = 0; + char *vers_tok; + + if (!vers_str) { + return 0; + } + + vers_tok = strtok(vers_str, ".\"\n\r"); + if (NULL != vers_tok) { + major = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + minor = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + patch = (uint16_t)atoi(vers_tok); + } + } + } + + return VK_MAKE_VERSION(major, minor, patch); +} + +bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2) { + return strcmp(op1->extensionName, op2->extensionName) == 0 ? true : false; +} + +// Search the given ext_array for an extension matching the given vk_ext_prop +bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count, + const VkExtensionProperties *ext_array) { + for (uint32_t i = 0; i < count; i++) { + if (compare_vk_extension_properties(vk_ext_prop, &ext_array[i])) return true; + } + return false; +} + +// Search the given ext_list for an extension matching the given vk_ext_prop +bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list) { + for (uint32_t i = 0; i < ext_list->count; i++) { + if (compare_vk_extension_properties(&ext_list->list[i], vk_ext_prop)) return true; + } + return false; +} + +// Search the given ext_list for a device extension matching the given ext_prop +bool has_vk_dev_ext_property(const VkExtensionProperties *ext_prop, const struct loader_device_extension_list *ext_list) { + for (uint32_t i = 0; i < ext_list->count; i++) { + if (compare_vk_extension_properties(&ext_list->list[i].props, ext_prop)) return true; + } + return false; +} + +// Get the next unused layer property in the list. Init the property to zero. +static struct loader_layer_properties *loaderGetNextLayerPropertySlot(const struct loader_instance *inst, + struct loader_layer_list *layer_list) { + if (layer_list->capacity == 0) { + layer_list->list = + loader_instance_heap_alloc(inst, sizeof(struct loader_layer_properties) * 64, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (layer_list->list == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderGetNextLayerPropertySlot: Out of memory can " + "not add any layer properties to list"); + return NULL; + } + memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64); + layer_list->capacity = sizeof(struct loader_layer_properties) * 64; + } + + // Ensure enough room to add an entry + if ((layer_list->count + 1) * sizeof(struct loader_layer_properties) > layer_list->capacity) { + void *new_ptr = loader_instance_heap_realloc(inst, layer_list->list, layer_list->capacity, layer_list->capacity * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderGetNextLayerPropertySlot: realloc failed for layer list"); + return NULL; + } + layer_list->list = new_ptr; + memset((uint8_t *)layer_list->list + layer_list->capacity, 0, layer_list->capacity); + layer_list->capacity *= 2; + } + + layer_list->count++; + return &(layer_list->list[layer_list->count - 1]); +} + +// Search the given layer list for a layer property matching the given layer name +static struct loader_layer_properties *loaderFindLayerProperty(const char *name, const struct loader_layer_list *layer_list) { + for (uint32_t i = 0; i < layer_list->count; i++) { + const VkLayerProperties *item = &layer_list->list[i].info; + if (strcmp(name, item->layerName) == 0) return &layer_list->list[i]; + } + return NULL; +} + +// Search the given layer list for a layer matching the given layer name +static bool loaderFindLayerNameInList(const char *name, const struct loader_layer_list *layer_list) { + if (NULL == layer_list) { + return false; + } + if (NULL != loaderFindLayerProperty(name, layer_list)) { + return true; + } + return false; +} + +// Search the given meta-layer's component list for a layer matching the given layer name +static bool loaderFindLayerNameInMetaLayer(const struct loader_instance *inst, const char *layer_name, + struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) { + for (uint32_t comp_layer = 0; comp_layer < meta_layer_props->num_component_layers; comp_layer++) { + if (!strcmp(meta_layer_props->component_layer_names[comp_layer], layer_name)) { + return true; + } + struct loader_layer_properties *comp_layer_props = + loaderFindLayerProperty(meta_layer_props->component_layer_names[comp_layer], layer_list); + if (comp_layer_props->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + return loaderFindLayerNameInMetaLayer(inst, layer_name, layer_list, comp_layer_props); + } + } + return false; +} + +// Search the override layer's blacklist for a layer matching the given layer name +static bool loaderFindLayerNameInBlacklist(const struct loader_instance *inst, const char *layer_name, + struct loader_layer_list *layer_list, struct loader_layer_properties *meta_layer_props) { + for (uint32_t black_layer = 0; black_layer < meta_layer_props->num_blacklist_layers; ++black_layer) { + if (!strcmp(meta_layer_props->blacklist_layer_names[black_layer], layer_name)) { + return true; + } + } + return false; +} + +// Remove all layer properties entries from the list +void loaderDeleteLayerListAndProperties(const struct loader_instance *inst, struct loader_layer_list *layer_list) { + uint32_t i, j, k; + struct loader_device_extension_list *dev_ext_list; + struct loader_dev_ext_props *ext_props; + if (!layer_list) return; + + for (i = 0; i < layer_list->count; i++) { + if (NULL != layer_list->list[i].blacklist_layer_names) { + loader_instance_heap_free(inst, layer_list->list[i].blacklist_layer_names); + layer_list->list[i].blacklist_layer_names = NULL; + } + if (NULL != layer_list->list[i].component_layer_names) { + loader_instance_heap_free(inst, layer_list->list[i].component_layer_names); + layer_list->list[i].component_layer_names = NULL; + } + if (NULL != layer_list->list[i].override_paths) { + loader_instance_heap_free(inst, layer_list->list[i].override_paths); + layer_list->list[i].override_paths = NULL; + } + if (NULL != layer_list->list[i].app_key_paths) { + loader_instance_heap_free(inst, layer_list->list[i].app_key_paths); + layer_list->list[i].app_key_paths = NULL; + } + loader_destroy_generic_list(inst, (struct loader_generic_list *)&layer_list->list[i].instance_extension_list); + dev_ext_list = &layer_list->list[i].device_extension_list; + if (dev_ext_list->capacity > 0 && NULL != dev_ext_list->list) { + for (j = 0; j < dev_ext_list->count; j++) { + ext_props = &dev_ext_list->list[j]; + if (ext_props->entrypoint_count > 0) { + for (k = 0; k < ext_props->entrypoint_count; k++) { + loader_instance_heap_free(inst, ext_props->entrypoints[k]); + } + loader_instance_heap_free(inst, ext_props->entrypoints); + } + } + } + loader_destroy_generic_list(inst, (struct loader_generic_list *)dev_ext_list); + } + layer_list->count = 0; + + if (layer_list->capacity > 0) { + layer_list->capacity = 0; + loader_instance_heap_free(inst, layer_list->list); + } +} + +void loaderRemoveLayerInList(const struct loader_instance *inst, struct loader_layer_list *layer_list, uint32_t layer_to_remove) { + if (layer_list == NULL || layer_to_remove >= layer_list->count) { + return; + } + if (layer_list->list[layer_to_remove].type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + // Delete the component layers + loader_instance_heap_free(inst, layer_list->list[layer_to_remove].component_layer_names); + loader_instance_heap_free(inst, layer_list->list[layer_to_remove].override_paths); + loader_instance_heap_free(inst, layer_list->list[layer_to_remove].blacklist_layer_names); + loader_instance_heap_free(inst, layer_list->list[layer_to_remove].app_key_paths); + } + + // Remove the current invalid meta-layer from the layer list. Use memmove since we are + // overlapping the source and destination addresses. + memmove(&layer_list->list[layer_to_remove], &layer_list->list[layer_to_remove + 1], + sizeof(struct loader_layer_properties) * (layer_list->count - 1 - layer_to_remove)); + + // Make sure to clear out the removed layer, in case new layers are added in the previous location + memset(&layer_list->list[layer_list->count - 1], 0, sizeof(struct loader_layer_properties)); + + // Decrement the count (because we now have one less) and decrement the loop index since we need to + // re-check this index. + layer_list->count--; +} + +// Remove all layers in the layer list that are blacklisted by the override layer. +// NOTE: This should only be called if an override layer is found and not expired. +void loaderRemoveLayersInBlacklist(const struct loader_instance *inst, struct loader_layer_list *layer_list) { + struct loader_layer_properties *override_prop = loaderFindLayerProperty(VK_OVERRIDE_LAYER_NAME, layer_list); + if (NULL == override_prop) { + return; + } + + for (int32_t j = 0; j < (int32_t)(layer_list->count); j++) { + struct loader_layer_properties cur_layer_prop = layer_list->list[j]; + const char *cur_layer_name = &cur_layer_prop.info.layerName[0]; + + // Skip the override layer itself. + if (!strcmp(VK_OVERRIDE_LAYER_NAME, cur_layer_name)) { + continue; + } + + // If found in the override layer's blacklist, remove it + if (loaderFindLayerNameInBlacklist(inst, cur_layer_name, layer_list, override_prop)) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "loaderRemoveLayersInBlacklist: Override layer is active and layer %s is in the blacklist" + " inside of it. Removing that layer from current layer list.", + cur_layer_name); + + if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + // Delete the component layers + loader_instance_heap_free(inst, cur_layer_prop.component_layer_names); + loader_instance_heap_free(inst, cur_layer_prop.override_paths); + // Never need to free the blacklist, since it can only exist in the override layer + } + + // Remove the current invalid meta-layer from the layer list. Use memmove since we are + // overlapping the source and destination addresses. + memmove(&layer_list->list[j], &layer_list->list[j + 1], + sizeof(struct loader_layer_properties) * (layer_list->count - 1 - j)); + + // Decrement the count (because we now have one less) and decrement the loop index since we need to + // re-check this index. + layer_list->count--; + j--; + + // Re-do the query for the override layer + override_prop = loaderFindLayerProperty(VK_OVERRIDE_LAYER_NAME, layer_list); + } + } +} + +// Remove all layers in the layer list that are not found inside any implicit meta-layers. +void loaderRemoveLayersNotInImplicitMetaLayers(const struct loader_instance *inst, struct loader_layer_list *layer_list) { + int32_t i; + int32_t j; + int32_t layer_count = (int32_t)(layer_list->count); + + for (i = 0; i < layer_count; i++) { + layer_list->list[i].keep = false; + } + + for (i = 0; i < layer_count; i++) { + struct loader_layer_properties cur_layer_prop = layer_list->list[i]; + + if (0 == (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { + cur_layer_prop.keep = true; + } else { + continue; + } + + if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + for (j = 0; j < layer_count; j++) { + struct loader_layer_properties layer_to_check = layer_list->list[j]; + + if (i == j) { + continue; + } + + // For all layers found in this meta layer, we want to keep them as well. + if (loaderFindLayerNameInMetaLayer(inst, layer_to_check.info.layerName, layer_list, &cur_layer_prop)) { + cur_layer_prop.keep = true; + } + } + } + } + + // Remove any layers we don't want to keep (Don't use layer_count here as we need it to be + // dynamically updated if we delete a layer property in the list). + for (i = 0; i < (int32_t)(layer_list->count); i++) { + struct loader_layer_properties cur_layer_prop = layer_list->list[i]; + if (!cur_layer_prop.keep) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "loaderRemoveLayersNotInImplicitMetaLayers : Implicit meta-layers are active, and layer %s is not list" + " inside of any. So removing layer from current layer list.", + cur_layer_prop.info.layerName); + + if (cur_layer_prop.type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + // Delete the component layers + loader_instance_heap_free(inst, cur_layer_prop.component_layer_names); + loader_instance_heap_free(inst, cur_layer_prop.override_paths); + } + + // Remove the current invalid meta-layer from the layer list. Use memmove since we are + // overlapping the source and destination addresses. + memmove(&layer_list->list[i], &layer_list->list[i + 1], + sizeof(struct loader_layer_properties) * (layer_list->count - 1 - i)); + + // Decrement the count (because we now have one less) and decrement the loop index since we need to + // re-check this index. + layer_list->count--; + i--; + } + } +} + +static VkResult loader_add_instance_extensions(const struct loader_instance *inst, + const PFN_vkEnumerateInstanceExtensionProperties fp_get_props, const char *lib_name, + struct loader_extension_list *ext_list) { + uint32_t i, count = 0; + VkExtensionProperties *ext_props; + VkResult res = VK_SUCCESS; + + if (!fp_get_props) { + // No EnumerateInstanceExtensionProperties defined + goto out; + } + + res = fp_get_props(NULL, &count, NULL); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_instance_extensions: Error getting Instance " + "extension count from %s", + lib_name); + goto out; + } + + if (count == 0) { + // No ExtensionProperties to report + goto out; + } + + ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); + if (NULL == ext_props) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + res = fp_get_props(NULL, &count, ext_props); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_instance_extensions: Error getting Instance " + "extensions from %s", + lib_name); + goto out; + } + + for (i = 0; i < count; i++) { + char spec_version[64]; + + bool ext_unsupported = wsi_unsupported_instance_extension(&ext_props[i]); + if (!ext_unsupported) { + (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion), + VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion)); + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Instance Extension: %s (%s) version %s", ext_props[i].extensionName, + lib_name, spec_version); + + res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_instance_extensions: Failed to add %s " + "to Instance extension list", + lib_name); + goto out; + } + } + } + +out: + return res; +} + +// Initialize ext_list with the physical device extensions. +// The extension properties are passed as inputs in count and ext_props. +static VkResult loader_init_device_extensions(const struct loader_instance *inst, struct loader_physical_device_term *phys_dev_term, + uint32_t count, VkExtensionProperties *ext_props, + struct loader_extension_list *ext_list) { + VkResult res; + uint32_t i; + + res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties)); + if (VK_SUCCESS != res) { + return res; + } + + for (i = 0; i < count; i++) { + char spec_version[64]; + (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion), + VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion)); + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Device Extension: %s (%s) version %s", ext_props[i].extensionName, + phys_dev_term->this_icd_term->scanned_icd->lib_name, spec_version); + res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); + if (res != VK_SUCCESS) return res; + } + + return VK_SUCCESS; +} + +VkResult loader_add_device_extensions(const struct loader_instance *inst, + PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties, + VkPhysicalDevice physical_device, const char *lib_name, + struct loader_extension_list *ext_list) { + uint32_t i, count; + VkResult res; + VkExtensionProperties *ext_props; + + res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, NULL); + if (res == VK_SUCCESS && count > 0) { + ext_props = loader_stack_alloc(count * sizeof(VkExtensionProperties)); + if (!ext_props) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_device_extensions: Failed to allocate space" + " for device extension properties."); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + res = fpEnumerateDeviceExtensionProperties(physical_device, NULL, &count, ext_props); + if (res != VK_SUCCESS) { + return res; + } + for (i = 0; i < count; i++) { + char spec_version[64]; + (void)snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", VK_VERSION_MAJOR(ext_props[i].specVersion), + VK_VERSION_MINOR(ext_props[i].specVersion), VK_VERSION_PATCH(ext_props[i].specVersion)); + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Device Extension: %s (%s) version %s", ext_props[i].extensionName, + lib_name, spec_version); + res = loader_add_to_ext_list(inst, ext_list, 1, &ext_props[i]); + if (res != VK_SUCCESS) { + return res; + } + } + } else { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_device_extensions: Error getting physical " + "device extension info count from library %s", + lib_name); + return res; + } + + return VK_SUCCESS; +} + +VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size) { + size_t capacity = 32 * element_size; + list_info->count = 0; + list_info->capacity = 0; + list_info->list = loader_instance_heap_alloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (list_info->list == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_init_generic_list: Failed to allocate space " + "for generic list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memset(list_info->list, 0, capacity); + list_info->capacity = capacity; + return VK_SUCCESS; +} + +void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list) { + loader_instance_heap_free(inst, list->list); + list->count = 0; + list->capacity = 0; +} + +// Append non-duplicate extension properties defined in props to the given ext_list. +// Return - Vk_SUCCESS on success +VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list, + uint32_t prop_list_count, const VkExtensionProperties *props) { + uint32_t i; + const VkExtensionProperties *cur_ext; + + if (ext_list->list == NULL || ext_list->capacity == 0) { + VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(VkExtensionProperties)); + if (VK_SUCCESS != res) { + return res; + } + } + + for (i = 0; i < prop_list_count; i++) { + cur_ext = &props[i]; + + // look for duplicates + if (has_vk_extension_property(cur_ext, ext_list)) { + continue; + } + + // add to list at end + // check for enough capacity + if (ext_list->count * sizeof(VkExtensionProperties) >= ext_list->capacity) { + void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (new_ptr == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_to_ext_list: Failed to reallocate " + "space for extension list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + ext_list->list = new_ptr; + + // double capacity + ext_list->capacity *= 2; + } + + memcpy(&ext_list->list[ext_list->count], cur_ext, sizeof(VkExtensionProperties)); + ext_list->count++; + } + return VK_SUCCESS; +} + +// Append one extension property defined in props with entrypoints defined in entries to the given +// ext_list. Do not append if a duplicate. +// Return - Vk_SUCCESS on success +VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list, + const VkExtensionProperties *props, uint32_t entry_count, char **entrys) { + uint32_t idx; + if (ext_list->list == NULL || ext_list->capacity == 0) { + VkResult res = loader_init_generic_list(inst, (struct loader_generic_list *)ext_list, sizeof(struct loader_dev_ext_props)); + if (VK_SUCCESS != res) { + return res; + } + } + + // look for duplicates + if (has_vk_dev_ext_property(props, ext_list)) { + return VK_SUCCESS; + } + + idx = ext_list->count; + // add to list at end + // check for enough capacity + if (idx * sizeof(struct loader_dev_ext_props) >= ext_list->capacity) { + void *new_ptr = loader_instance_heap_realloc(inst, ext_list->list, ext_list->capacity, ext_list->capacity * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_to_dev_ext_list: Failed to reallocate space for device extension list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + ext_list->list = new_ptr; + + // double capacity + ext_list->capacity *= 2; + } + + memcpy(&ext_list->list[idx].props, props, sizeof(*props)); + ext_list->list[idx].entrypoint_count = entry_count; + if (entry_count == 0) { + ext_list->list[idx].entrypoints = NULL; + } else { + ext_list->list[idx].entrypoints = + loader_instance_heap_alloc(inst, sizeof(char *) * entry_count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (ext_list->list[idx].entrypoints == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_to_dev_ext_list: Failed to allocate space " + "for device extension entrypoint list in list %d", + idx); + ext_list->list[idx].entrypoint_count = 0; + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + for (uint32_t i = 0; i < entry_count; i++) { + ext_list->list[idx].entrypoints[i] = + loader_instance_heap_alloc(inst, strlen(entrys[i]) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (ext_list->list[idx].entrypoints[i] == NULL) { + for (uint32_t j = 0; j < i; j++) { + loader_instance_heap_free(inst, ext_list->list[idx].entrypoints[j]); + } + loader_instance_heap_free(inst, ext_list->list[idx].entrypoints); + ext_list->list[idx].entrypoint_count = 0; + ext_list->list[idx].entrypoints = NULL; + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_to_dev_ext_list: Failed to allocate space " + "for device extension entrypoint %d name", + i); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + strcpy(ext_list->list[idx].entrypoints[i], entrys[i]); + } + } + ext_list->count++; + + return VK_SUCCESS; +} + +// Prototypes needed. +bool loaderAddMetaLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop, + struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list, + const struct loader_layer_list *source_list); + +// Manage lists of VkLayerProperties +static bool loaderInitLayerList(const struct loader_instance *inst, struct loader_layer_list *list) { + list->capacity = 32 * sizeof(struct loader_layer_properties); + list->list = loader_instance_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (list->list == NULL) { + return false; + } + memset(list->list, 0, list->capacity); + list->count = 0; + return true; +} + +// Search the given layer list for a list matching the given VkLayerProperties +bool loaderListHasLayerProperty(const VkLayerProperties *vk_layer_prop, const struct loader_layer_list *list) { + for (uint32_t i = 0; i < list->count; i++) { + if (strcmp(vk_layer_prop->layerName, list->list[i].info.layerName) == 0) return true; + } + return false; +} + +void loaderDestroyLayerList(const struct loader_instance *inst, struct loader_device *device, + struct loader_layer_list *layer_list) { + if (device) { + loader_device_heap_free(device, layer_list->list); + } else { + loader_instance_heap_free(inst, layer_list->list); + } + layer_list->count = 0; + layer_list->capacity = 0; +} + +// Append non-duplicate layer properties defined in prop_list to the given layer_info list +VkResult loaderAddLayerPropertiesToList(const struct loader_instance *inst, struct loader_layer_list *list, + uint32_t prop_list_count, const struct loader_layer_properties *props) { + uint32_t i; + struct loader_layer_properties *layer; + + if (list->list == NULL || list->capacity == 0) { + if (!loaderInitLayerList(inst, list)) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } + + if (list->list == NULL) return VK_SUCCESS; + + for (i = 0; i < prop_list_count; i++) { + layer = (struct loader_layer_properties *)&props[i]; + + // Look for duplicates, and skip + if (loaderListHasLayerProperty(&layer->info, list)) { + continue; + } + + // Check for enough capacity + if (((list->count + 1) * sizeof(struct loader_layer_properties)) >= list->capacity) { + size_t new_capacity = list->capacity * 2; + void *new_ptr = + loader_instance_heap_realloc(inst, list->list, list->capacity, new_capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderAddLayerPropertiesToList: Realloc failed for when attempting to add new layer"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + list->list = new_ptr; + list->capacity = new_capacity; + } + + memcpy(&list->list[list->count], layer, sizeof(struct loader_layer_properties)); + list->count++; + } + + return VK_SUCCESS; +} + +// Search the given search_list for any layers in the props list. Add these to the +// output layer_list. Don't add duplicates to the output layer_list. +static VkResult loaderAddLayerNamesToList(const struct loader_instance *inst, struct loader_layer_list *output_list, + struct loader_layer_list *expanded_output_list, uint32_t name_count, + const char *const *names, const struct loader_layer_list *source_list) { + struct loader_layer_properties *layer_prop; + VkResult err = VK_SUCCESS; + + for (uint32_t i = 0; i < name_count; i++) { + const char *source_name = names[i]; + layer_prop = loaderFindLayerProperty(source_name, source_list); + if (NULL == layer_prop) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loaderAddLayerNamesToList: Unable to find layer %s", source_name); + err = VK_ERROR_LAYER_NOT_PRESENT; + continue; + } + + // If not a meta-layer, simply add it. + if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { + if (!loaderListHasLayerProperty(&layer_prop->info, output_list)) { + loaderAddLayerPropertiesToList(inst, output_list, 1, layer_prop); + } + if (!loaderListHasLayerProperty(&layer_prop->info, expanded_output_list)) { + loaderAddLayerPropertiesToList(inst, expanded_output_list, 1, layer_prop); + } + } else { + if (!loaderListHasLayerProperty(&layer_prop->info, output_list) || + !loaderListHasLayerProperty(&layer_prop->info, expanded_output_list)) { + loaderAddMetaLayer(inst, layer_prop, output_list, expanded_output_list, source_list); + } + } + } + + return err; +} + +static bool checkExpiration(const struct loader_instance *inst, const struct loader_layer_properties *prop) { + time_t current = time(NULL); + struct tm tm_current = *localtime(¤t); + + struct tm tm_expiration = { + .tm_sec = 0, + .tm_min = prop->expiration.minute, + .tm_hour = prop->expiration.hour, + .tm_mday = prop->expiration.day, + .tm_mon = prop->expiration.month - 1, + .tm_year = prop->expiration.year - 1900, + .tm_isdst = tm_current.tm_isdst, + // wday and yday are ignored by mktime + }; + time_t expiration = mktime(&tm_expiration); + + return current < expiration; +} + +// Determine if the provided implicit layer should be enabled by querying the appropriate environmental variables. +// For an implicit layer, at least a disable environment variable is required. +bool loaderImplicitLayerIsEnabled(const struct loader_instance *inst, const struct loader_layer_properties *prop) { + bool enable = false; + char *env_value = NULL; + + // If no enable_environment variable is specified, this implicit layer is always be enabled by default. + if (prop->enable_env_var.name[0] == 0) { + enable = true; + } else { + // Otherwise, only enable this layer if the enable environment variable is defined + env_value = loader_getenv(prop->enable_env_var.name, inst); + if (env_value && !strcmp(prop->enable_env_var.value, env_value)) { + enable = true; + } + loader_free_getenv(env_value, inst); + } + + // The disable_environment has priority over everything else. If it is defined, the layer is always + // disabled. + env_value = loader_getenv(prop->disable_env_var.name, inst); + if (env_value) { + enable = false; + } + loader_free_getenv(env_value, inst); + + // If this layer has an expiration, check it to determine if this layer has expired. + if (prop->has_expiration) { + enable = checkExpiration(inst, prop); + } + + // Enable this layer if it is included in the override layer + if (inst != NULL && inst->override_layer_present) { + struct loader_layer_properties *override = NULL; + for (uint32_t i = 0; i < inst->instance_layer_list.count; ++i) { + if (strcmp(inst->instance_layer_list.list[i].info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { + override = &inst->instance_layer_list.list[i]; + break; + } + } + if (override != NULL) { + for (uint32_t i = 0; i < override->num_component_layers; ++i) { + if (strcmp(override->component_layer_names[i], prop->info.layerName) == 0) { + enable = true; + break; + } + } + } + } + + return enable; +} + +// Check the individual implicit layer for the enable/disable environment variable settings. Only add it after +// every check has passed indicating it should be used. +static void loaderAddImplicitLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop, + struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list, + const struct loader_layer_list *source_list) { + bool enable = loaderImplicitLayerIsEnabled(inst, prop); + + // If the implicit layer is supposed to be enable, make sure the layer supports at least the same API version + // that the application is asking (i.e. layer's API >= app's API). If it's not, disable this layer. + if (enable) { + uint16_t layer_api_major_version = VK_VERSION_MAJOR(prop->info.specVersion); + uint16_t layer_api_minor_version = VK_VERSION_MINOR(prop->info.specVersion); + if (inst->app_api_major_version > layer_api_major_version || + (inst->app_api_major_version == layer_api_major_version && inst->app_api_minor_version > layer_api_minor_version)) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "loader_add_implicit_layer: Disabling implicit layer %s for using an old API version %d.%d versus " + "application requested %d.%d", + prop->info.layerName, layer_api_major_version, layer_api_minor_version, inst->app_api_major_version, + inst->app_api_minor_version); + enable = false; + } + } + + if (enable) { + if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { + if (!loaderListHasLayerProperty(&prop->info, target_list)) { + loaderAddLayerPropertiesToList(inst, target_list, 1, prop); + } + if (NULL != expanded_target_list && !loaderListHasLayerProperty(&prop->info, expanded_target_list)) { + loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, prop); + } + } else { + if (!loaderListHasLayerProperty(&prop->info, target_list) || + (NULL != expanded_target_list && !loaderListHasLayerProperty(&prop->info, expanded_target_list))) { + loaderAddMetaLayer(inst, prop, target_list, expanded_target_list, source_list); + } + } + } +} + +// Add the component layers of a meta-layer to the active list of layers +bool loaderAddMetaLayer(const struct loader_instance *inst, const struct loader_layer_properties *prop, + struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list, + const struct loader_layer_list *source_list) { + bool found = true; + + // If the meta-layer isn't present in the unexpanded list, add it. + if (!loaderListHasLayerProperty(&prop->info, target_list)) { + loaderAddLayerPropertiesToList(inst, target_list, 1, prop); + } + + // We need to add all the individual component layers + for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) { + bool found_comp = false; + const struct loader_layer_properties *search_prop = + loaderFindLayerProperty(prop->component_layer_names[comp_layer], source_list); + if (search_prop != NULL) { + found_comp = true; + + // If the component layer is itself an implicit layer, we need to do the implicit layer enable + // checks + if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { + loaderAddImplicitLayer(inst, search_prop, target_list, expanded_target_list, source_list); + } else { + if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { + found = loaderAddMetaLayer(inst, search_prop, target_list, expanded_target_list, source_list); + } else { + // Otherwise, just make sure it hasn't already been added to either list before we add it + if (!loaderListHasLayerProperty(&search_prop->info, target_list)) { + loaderAddLayerPropertiesToList(inst, target_list, 1, search_prop); + } + if (NULL != expanded_target_list && !loaderListHasLayerProperty(&search_prop->info, expanded_target_list)) { + loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, search_prop); + } + } + } + } + if (!found_comp) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddMetaLayer: Failed to find layer name %s component layer " + "%s to activate", + search_prop->info.layerName, prop->component_layer_names[comp_layer]); + found = false; + } + } + + // Add this layer to the overall target list (not the expanded one) + if (found && !loaderListHasLayerProperty(&prop->info, target_list)) { + loaderAddLayerPropertiesToList(inst, target_list, 1, prop); + } + + return found; +} + +// Search the source_list for any layer with a name that matches the given name and a type +// that matches the given type. Add all matching layers to the target_list. +// Do not add if found loader_layer_properties is already on the target_list. +VkResult loaderAddLayerNameToList(const struct loader_instance *inst, const char *name, const enum layer_type_flags type_flags, + const struct loader_layer_list *source_list, struct loader_layer_list *target_list, + struct loader_layer_list *expanded_target_list) { + VkResult res = VK_SUCCESS; + bool found = false; + for (uint32_t i = 0; i < source_list->count; i++) { + struct loader_layer_properties *source_prop = &source_list->list[i]; + if (0 == strcmp(source_prop->info.layerName, name) && (source_prop->type_flags & type_flags) == type_flags) { + // If not a meta-layer, simply add it. + if (0 == (source_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) { + if (!loaderListHasLayerProperty(&source_prop->info, target_list) && + VK_SUCCESS == loaderAddLayerPropertiesToList(inst, target_list, 1, source_prop)) { + found = true; + } + if (!loaderListHasLayerProperty(&source_prop->info, expanded_target_list) && + VK_SUCCESS == loaderAddLayerPropertiesToList(inst, expanded_target_list, 1, source_prop)) { + found = true; + } + } else { + found = loaderAddMetaLayer(inst, source_prop, target_list, expanded_target_list, source_list); + } + } + } + if (!found) { + if (strcmp(name, "VK_LAYER_LUNARG_standard_validation")) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddLayerNameToList: Failed to find layer name %s to activate", name); + } else { + res = VK_ERROR_LAYER_NOT_PRESENT; + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "Layer VK_LAYER_LUNARG_standard_validation has been changed to VK_LAYER_KHRONOS_validation. Please use the " + "new version of the layer."); + } + } + return res; +} + +static VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) { + for (uint32_t i = 0; i < list->count; i++) { + if (strcmp(name, list->list[i].extensionName) == 0) return &list->list[i]; + } + return NULL; +} + +static VkExtensionProperties *get_dev_extension_property(const char *name, const struct loader_device_extension_list *list) { + for (uint32_t i = 0; i < list->count; i++) { + if (strcmp(name, list->list[i].props.extensionName) == 0) return &list->list[i].props; + } + return NULL; +} + +// For Instance extensions implemented within the loader (i.e. DEBUG_REPORT +// the extension must provide two entry points for the loader to use: +// - "trampoline" entry point - this is the address returned by GetProcAddr +// and will always do what's necessary to support a +// global call. +// - "terminator" function - this function will be put at the end of the +// instance chain and will contain the necessary logic +// to call / process the extension for the appropriate +// ICDs that are available. +// There is no generic mechanism for including these functions, the references +// must be placed into the appropriate loader entry points. +// GetInstanceProcAddr: call extension GetInstanceProcAddr to check for GetProcAddr +// requests +// loader_coalesce_extensions(void) - add extension records to the list of global +// extension available to the app. +// instance_disp - add function pointer for terminator function +// to this array. +// The extension itself should be in a separate file that will be linked directly +// with the loader. +VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, + struct loader_extension_list *inst_exts) { + struct loader_extension_list icd_exts; + VkResult res = VK_SUCCESS; + char *env_value; + bool filter_extensions = true; + + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Build ICD instance extension list"); + + // Check if a user wants to disable the instance extension filtering behavior + env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); + if (NULL != env_value && atoi(env_value) != 0) { + filter_extensions = false; + } + loader_free_getenv(env_value, inst); + + // traverse scanned icd list adding non-duplicate extensions to the list + for (uint32_t i = 0; i < icd_tramp_list->count; i++) { + res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); + if (VK_SUCCESS != res) { + goto out; + } + res = loader_add_instance_extensions(inst, icd_tramp_list->scanned_list[i].EnumerateInstanceExtensionProperties, + icd_tramp_list->scanned_list[i].lib_name, &icd_exts); + if (VK_SUCCESS == res) { + if (filter_extensions) { + // Remove any extensions not recognized by the loader + for (int32_t j = 0; j < (int32_t)icd_exts.count; j++) { + // See if the extension is in the list of supported extensions + bool found = false; + for (uint32_t k = 0; LOADER_INSTANCE_EXTENSIONS[k] != NULL; k++) { + if (strcmp(icd_exts.list[j].extensionName, LOADER_INSTANCE_EXTENSIONS[k]) == 0) { + found = true; + break; + } + } + + // If it isn't in the list, remove it + if (!found) { + for (uint32_t k = j + 1; k < icd_exts.count; k++) { + icd_exts.list[k - 1] = icd_exts.list[k]; + } + --icd_exts.count; + --j; + } + } + } + + res = loader_add_to_ext_list(inst, inst_exts, icd_exts.count, icd_exts.list); + } + loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); + if (VK_SUCCESS != res) { + goto out; + } + }; + + // Traverse loader's extensions, adding non-duplicate extensions to the list + debug_utils_AddInstanceExtensions(inst, inst_exts); + +out: + return res; +} + +struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index) { + *found_dev = NULL; + for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { + uint32_t index = 0; + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term; icd_term = icd_term->next) { + for (struct loader_device *dev = icd_term->logical_device_list; dev; dev = dev->next) + // Value comparison of device prevents object wrapping by layers + if (loader_get_dispatch(dev->icd_device) == loader_get_dispatch(device) || + (dev->chain_device != VK_NULL_HANDLE && + loader_get_dispatch(dev->chain_device) == loader_get_dispatch(device))) { + *found_dev = dev; + if (NULL != icd_index) { + *icd_index = index; + } + return icd_term; + } + index++; + } + } + return NULL; +} + +void loader_destroy_logical_device(const struct loader_instance *inst, struct loader_device *dev, + const VkAllocationCallbacks *pAllocator) { + if (pAllocator) { + dev->alloc_callbacks = *pAllocator; + } + if (NULL != dev->expanded_activated_layer_list.list) { + loaderDeactivateLayers(inst, dev, &dev->expanded_activated_layer_list); + } + if (NULL != dev->app_activated_layer_list.list) { + loaderDestroyLayerList(inst, dev, &dev->app_activated_layer_list); + } + loader_device_heap_free(dev, dev); +} + +struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator) { + struct loader_device *new_dev; +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator) { + new_dev = (struct loader_device *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(struct loader_device), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + } else { +#endif + new_dev = (struct loader_device *)malloc(sizeof(struct loader_device)); + } + + if (!new_dev) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_logical_device: Failed to alloc struct " + "loader_device"); + return NULL; + } + + memset(new_dev, 0, sizeof(struct loader_device)); + if (pAllocator) { + new_dev->alloc_callbacks = *pAllocator; + } + + return new_dev; +} + +void loader_add_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, struct loader_device *dev) { + dev->next = icd_term->logical_device_list; + icd_term->logical_device_list = dev; +} + +void loader_remove_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, + struct loader_device *found_dev, const VkAllocationCallbacks *pAllocator) { + struct loader_device *dev, *prev_dev; + + if (!icd_term || !found_dev) return; + + prev_dev = NULL; + dev = icd_term->logical_device_list; + while (dev && dev != found_dev) { + prev_dev = dev; + dev = dev->next; + } + + if (prev_dev) + prev_dev->next = found_dev->next; + else + icd_term->logical_device_list = found_dev->next; + loader_destroy_logical_device(inst, found_dev, pAllocator); +} + +static void loader_icd_destroy(struct loader_instance *ptr_inst, struct loader_icd_term *icd_term, + const VkAllocationCallbacks *pAllocator) { + ptr_inst->total_icd_count--; + for (struct loader_device *dev = icd_term->logical_device_list; dev;) { + struct loader_device *next_dev = dev->next; + loader_destroy_logical_device(ptr_inst, dev, pAllocator); + dev = next_dev; + } + + loader_instance_heap_free(ptr_inst, icd_term); +} + +static struct loader_icd_term *loader_icd_create(const struct loader_instance *inst) { + struct loader_icd_term *icd_term; + + icd_term = loader_instance_heap_alloc(inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (!icd_term) { + return NULL; + } + + memset(icd_term, 0, sizeof(struct loader_icd_term)); + + return icd_term; +} + +static struct loader_icd_term *loader_icd_add(struct loader_instance *ptr_inst, const struct loader_scanned_icd *scanned_icd) { + struct loader_icd_term *icd_term; + + icd_term = loader_icd_create(ptr_inst); + if (!icd_term) { + return NULL; + } + + icd_term->scanned_icd = scanned_icd; + icd_term->this_instance = ptr_inst; + + // Prepend to the list + icd_term->next = ptr_inst->icd_terms; + ptr_inst->icd_terms = icd_term; + ptr_inst->total_icd_count++; + + return icd_term; +} + +// Determine the ICD interface version to use. +// @param icd +// @param pVersion Output parameter indicating which version to use or 0 if +// the negotiation API is not supported by the ICD +// @return bool indicating true if the selected interface version is supported +// by the loader, false indicates the version is not supported +bool loader_get_icd_interface_version(PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version, uint32_t *pVersion) { + if (fp_negotiate_icd_version == NULL) { + // ICD does not support the negotiation API, it supports version 0 or 1 + // calling code must determine if it is version 0 or 1 + *pVersion = 0; + } else { + // ICD supports the negotiation API, so call it with the loader's + // latest version supported + *pVersion = CURRENT_LOADER_ICD_INTERFACE_VERSION; + VkResult result = fp_negotiate_icd_version(pVersion); + + if (result == VK_ERROR_INCOMPATIBLE_DRIVER) { + // ICD no longer supports the loader's latest interface version so + // fail loading the ICD + return false; + } + } + +#if MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION > 0 + if (*pVersion < MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION) { + // Loader no longer supports the ICD's latest interface version so fail + // loading the ICD + return false; + } +#endif + return true; +} + +void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { + if (0 != icd_tramp_list->capacity) { + for (uint32_t i = 0; i < icd_tramp_list->count; i++) { + loader_platform_close_library(icd_tramp_list->scanned_list[i].handle); + loader_instance_heap_free(inst, icd_tramp_list->scanned_list[i].lib_name); + } + loader_instance_heap_free(inst, icd_tramp_list->scanned_list); + icd_tramp_list->capacity = 0; + icd_tramp_list->count = 0; + icd_tramp_list->scanned_list = NULL; + } +} + +static VkResult loader_scanned_icd_init(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { + VkResult err = VK_SUCCESS; + loader_scanned_icd_clear(inst, icd_tramp_list); + icd_tramp_list->capacity = 8 * sizeof(struct loader_scanned_icd); + icd_tramp_list->scanned_list = loader_instance_heap_alloc(inst, icd_tramp_list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == icd_tramp_list->scanned_list) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_init: Realloc failed for layer list when " + "attempting to add new layer"); + err = VK_ERROR_OUT_OF_HOST_MEMORY; + } + return err; +} + +static VkResult loader_scanned_icd_add(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, + const char *filename, uint32_t api_version) { + loader_platform_dl_handle handle; + PFN_vkCreateInstance fp_create_inst; + PFN_vkEnumerateInstanceExtensionProperties fp_get_inst_ext_props; + PFN_vkGetInstanceProcAddr fp_get_proc_addr; + PFN_GetPhysicalDeviceProcAddr fp_get_phys_dev_proc_addr = NULL; + PFN_vkNegotiateLoaderICDInterfaceVersion fp_negotiate_icd_version; +#if defined(VK_USE_PLATFORM_WIN32_KHR) + PFN_vk_icdEnumerateAdapterPhysicalDevices fp_enum_dxgi_adapter_phys_devs = NULL; +#endif + struct loader_scanned_icd *new_scanned_icd; + uint32_t interface_vers; + VkResult res = VK_SUCCESS; + + // TODO implement smarter opening/closing of libraries. For now this + // function leaves libraries open and the scanned_icd_clear closes them +#if defined(__Fuchsia__) + handle = loader_platform_open_driver(filename); +#else + handle = loader_platform_open_library(filename); +#endif + if (NULL == handle) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(filename)); + goto out; + } + + // Get and settle on an ICD interface version + fp_negotiate_icd_version = loader_platform_get_proc_address(handle, "vk_icdNegotiateLoaderICDInterfaceVersion"); + + if (!loader_get_icd_interface_version(fp_negotiate_icd_version, &interface_vers)) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: ICD %s doesn't support interface" + " version compatible with loader, skip this ICD.", + filename); + goto out; + } + + fp_get_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetInstanceProcAddr"); + if (NULL == fp_get_proc_addr) { + assert(interface_vers == 0); + // Use deprecated interface from version 0 + fp_get_proc_addr = loader_platform_get_proc_address(handle, "vkGetInstanceProcAddr"); + if (NULL == fp_get_proc_addr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Attempt to retrieve either " + "\'vkGetInstanceProcAddr\' or " + "\'vk_icdGetInstanceProcAddr\' from ICD %s failed.", + filename); + goto out; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_scanned_icd_add: Using deprecated ICD " + "interface of \'vkGetInstanceProcAddr\' instead of " + "\'vk_icdGetInstanceProcAddr\' for ICD %s", + filename); + } + fp_create_inst = loader_platform_get_proc_address(handle, "vkCreateInstance"); + if (NULL == fp_create_inst) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Failed querying " + "\'vkCreateInstance\' via dlsym/loadlibrary for " + "ICD %s", + filename); + goto out; + } + fp_get_inst_ext_props = loader_platform_get_proc_address(handle, "vkEnumerateInstanceExtensionProperties"); + if (NULL == fp_get_inst_ext_props) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Could not get \'vkEnumerate" + "InstanceExtensionProperties\' via dlsym/loadlibrary " + "for ICD %s", + filename); + goto out; + } + } else { + // Use newer interface version 1 or later + if (interface_vers == 0) { + interface_vers = 1; + } + + fp_create_inst = (PFN_vkCreateInstance)fp_get_proc_addr(NULL, "vkCreateInstance"); + if (NULL == fp_create_inst) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Could not get " + "\'vkCreateInstance\' via \'vk_icdGetInstanceProcAddr\'" + " for ICD %s", + filename); + goto out; + } + fp_get_inst_ext_props = + (PFN_vkEnumerateInstanceExtensionProperties)fp_get_proc_addr(NULL, "vkEnumerateInstanceExtensionProperties"); + if (NULL == fp_get_inst_ext_props) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Could not get \'vkEnumerate" + "InstanceExtensionProperties\' via " + "\'vk_icdGetInstanceProcAddr\' for ICD %s", + filename); + goto out; + } + fp_get_phys_dev_proc_addr = loader_platform_get_proc_address(handle, "vk_icdGetPhysicalDeviceProcAddr"); +#if defined(VK_USE_PLATFORM_WIN32_KHR) + if (interface_vers >= 6) { + fp_enum_dxgi_adapter_phys_devs = loader_platform_get_proc_address(handle, "vk_icdEnumerateAdapterPhysicalDevices"); + } +#endif + } + + // check for enough capacity + if ((icd_tramp_list->count * sizeof(struct loader_scanned_icd)) >= icd_tramp_list->capacity) { + void *new_ptr = loader_instance_heap_realloc(inst, icd_tramp_list->scanned_list, icd_tramp_list->capacity, + icd_tramp_list->capacity * 2, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_scanned_icd_add: Realloc failed on icd library list for ICD %s", filename); + goto out; + } + icd_tramp_list->scanned_list = new_ptr; + + // double capacity + icd_tramp_list->capacity *= 2; + } + + new_scanned_icd = &(icd_tramp_list->scanned_list[icd_tramp_list->count]); + new_scanned_icd->handle = handle; + new_scanned_icd->api_version = api_version; + new_scanned_icd->GetInstanceProcAddr = fp_get_proc_addr; + new_scanned_icd->GetPhysicalDeviceProcAddr = fp_get_phys_dev_proc_addr; + new_scanned_icd->EnumerateInstanceExtensionProperties = fp_get_inst_ext_props; + new_scanned_icd->CreateInstance = fp_create_inst; +#if defined(VK_USE_PLATFORM_WIN32_KHR) + new_scanned_icd->EnumerateAdapterPhysicalDevices = fp_enum_dxgi_adapter_phys_devs; +#endif + new_scanned_icd->interface_version = interface_vers; + + new_scanned_icd->lib_name = (char *)loader_instance_heap_alloc(inst, strlen(filename) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_scanned_icd->lib_name) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_scanned_icd_add: Out of memory can't add ICD %s", filename); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + strcpy(new_scanned_icd->lib_name, filename); + icd_tramp_list->count++; + +out: + + return res; +} + +static void loader_debug_init(void) { + char *env, *orig; + + if (g_loader_debug > 0) return; + + g_loader_debug = 0; + + // Parse comma-separated debug options + orig = env = loader_getenv("VK_LOADER_DEBUG", NULL); + while (env) { + char *p = strchr(env, ','); + size_t len; + + if (p) + len = p - env; + else + len = strlen(env); + + if (len > 0) { + if (strncmp(env, "all", len) == 0) { + g_loader_debug = ~0u; + g_loader_log_msgs = ~0u; + } else if (strncmp(env, "warn", len) == 0) { + g_loader_debug |= LOADER_WARN_BIT; + g_loader_log_msgs |= VK_DEBUG_REPORT_WARNING_BIT_EXT; + } else if (strncmp(env, "info", len) == 0) { + g_loader_debug |= LOADER_INFO_BIT; + g_loader_log_msgs |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT; + } else if (strncmp(env, "perf", len) == 0) { + g_loader_debug |= LOADER_PERF_BIT; + g_loader_log_msgs |= VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; + } else if (strncmp(env, "error", len) == 0) { + g_loader_debug |= LOADER_ERROR_BIT; + g_loader_log_msgs |= VK_DEBUG_REPORT_ERROR_BIT_EXT; + } else if (strncmp(env, "debug", len) == 0) { + g_loader_debug |= LOADER_DEBUG_BIT; + g_loader_log_msgs |= VK_DEBUG_REPORT_DEBUG_BIT_EXT; + } + } + + if (!p) break; + + env = p + 1; + } + + loader_free_getenv(orig, NULL); +} + +void loader_initialize(void) { + // initialize mutexes + loader_platform_thread_create_mutex(&loader_lock); + loader_platform_thread_create_mutex(&loader_json_lock); + loader_platform_thread_create_mutex(&loader_preload_icd_lock); + // initialize logging + loader_debug_init(); + + // initial cJSON to use alloc callbacks + cJSON_Hooks alloc_fns = { + .malloc_fn = loader_instance_tls_heap_alloc, .free_fn = loader_instance_tls_heap_free, + }; + cJSON_InitHooks(&alloc_fns); + +#if defined(_WIN32) + // This is needed to ensure that newer APIs are available right away + // and not after the first call that has been statically linked + LoadLibrary("gdi32.dll"); + + TCHAR systemPath[MAX_PATH] = ""; + GetSystemDirectory(systemPath, MAX_PATH); + StringCchCat(systemPath, MAX_PATH, TEXT("\\dxgi.dll")); + HMODULE dxgi_module = LoadLibrary(systemPath); + fpCreateDXGIFactory1 = dxgi_module == NULL ? NULL : + (PFN_CreateDXGIFactory1)GetProcAddress(dxgi_module, "CreateDXGIFactory1"); +#endif +} + +struct loader_data_files { + uint32_t count; + uint32_t alloc_count; + char **filename_list; +}; + +void loader_release() { + // Guarantee release of the preloaded ICD libraries. This may have already been called in vkDestroyInstance. + loader_unload_preloaded_icds(); + + // release mutexes + loader_platform_thread_delete_mutex(&loader_lock); + loader_platform_thread_delete_mutex(&loader_json_lock); + loader_platform_thread_delete_mutex(&loader_preload_icd_lock); +} + +// Preload the ICD libraries that are likely to be needed so we don't repeatedly load/unload them later +void loader_preload_icds(void) { + loader_platform_thread_lock_mutex(&loader_preload_icd_lock); + + // Already preloaded, skip loading again. + if (scanned_icds.scanned_list != NULL) { + loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); + return; + } + + memset(&scanned_icds, 0, sizeof(scanned_icds)); + VkResult result = loader_icd_scan(NULL, &scanned_icds); + if (result != VK_SUCCESS) { + loader_scanned_icd_clear(NULL, &scanned_icds); + } + loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); +} + +// Release the ICD libraries that were preloaded +void loader_unload_preloaded_icds(void) { + loader_platform_thread_lock_mutex(&loader_preload_icd_lock); + loader_scanned_icd_clear(NULL, &scanned_icds); + loader_platform_thread_unlock_mutex(&loader_preload_icd_lock); +} + +// Get next file or dirname given a string list or registry key path +// +// \returns +// A pointer to first char in the next path. +// The next path (or NULL) in the list is returned in next_path. +// Note: input string is modified in some cases. PASS IN A COPY! +static char *loader_get_next_path(char *path) { + uint32_t len; + char *next; + + if (path == NULL) return NULL; + next = strchr(path, PATH_SEPARATOR); + if (next == NULL) { + len = (uint32_t)strlen(path); + next = path + len; + } else { + *next = '\0'; + next++; + } + + return next; +} + +// Given a path which is absolute or relative, expand the path if relative or +// leave the path unmodified if absolute. The base path to prepend to relative +// paths is given in rel_base. +// +// @return - A string in out_fullpath of the full absolute path +static void loader_expand_path(const char *path, const char *rel_base, size_t out_size, char *out_fullpath) { + if (loader_platform_is_path_absolute(path)) { + // do not prepend a base to an absolute path + rel_base = ""; + } + + loader_platform_combine_path(out_fullpath, out_size, rel_base, path, NULL); +} + +// Given a filename (file) and a list of paths (dir), try to find an existing +// file in the paths. If filename already is a path then no searching in the given paths. +// +// @return - A string in out_fullpath of either the full path or file. +static void loader_get_fullpath(const char *file, const char *dirs, size_t out_size, char *out_fullpath) { + if (!loader_platform_is_path(file) && *dirs) { + char *dirs_copy, *dir, *next_dir; + + dirs_copy = loader_stack_alloc(strlen(dirs) + 1); + strcpy(dirs_copy, dirs); + + // find if file exists after prepending paths in given list + for (dir = dirs_copy; *dir && (next_dir = loader_get_next_path(dir)); dir = next_dir) { + loader_platform_combine_path(out_fullpath, out_size, dir, file, NULL); + if (loader_platform_file_exists(out_fullpath)) { + return; + } + } + } + + (void)snprintf(out_fullpath, out_size, "%s", file); +} + +// Read a JSON file into a buffer. +// +// @return - A pointer to a cJSON object representing the JSON parse tree. +// This returned buffer should be freed by caller. +static VkResult loader_get_json(const struct loader_instance *inst, const char *filename, cJSON **json) { + FILE *file = NULL; + char *json_buf; + size_t len; + VkResult res = VK_SUCCESS; + + if (NULL == json) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Received invalid JSON file"); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + *json = NULL; + + file = fopen(filename, "rb"); + if (!file) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Failed to open JSON file %s", filename); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + // NOTE: We can't just use fseek(file, 0, SEEK_END) because that isn't guaranteed to be supported on all systems + do { + // We're just seeking the end of the file, so this buffer is never used + char buffer[256]; + (void)!fread(buffer, 1, sizeof(buffer), file); + } while (!feof(file)); + len = ftell(file); + fseek(file, 0, SEEK_SET); + json_buf = (char *)loader_stack_alloc(len + 1); + if (json_buf == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_get_json: Failed to allocate space for " + "JSON file %s buffer of length %d", + filename, len); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + if (fread(json_buf, sizeof(char), len, file) != len) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_get_json: Failed to read JSON file %s.", filename); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + json_buf[len] = '\0'; + + // Can't be a valid json if the string is of length zero + if (len == 0) { + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + // Parse text from file + *json = cJSON_Parse(json_buf); + if (*json == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_get_json: Failed to parse JSON file %s, " + "this is usually because something ran out of " + "memory.", + filename); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + +out: + if (NULL != file) { + fclose(file); + } + + return res; +} + +// Verify that all component layers in a meta-layer are valid. +static bool verifyMetaLayerComponentLayers(const struct loader_instance *inst, struct loader_layer_properties *prop, + struct loader_layer_list *instance_layers) { + bool success = true; + const uint32_t expected_major = VK_VERSION_MAJOR(prop->info.specVersion); + const uint32_t expected_minor = VK_VERSION_MINOR(prop->info.specVersion); + + for (uint32_t comp_layer = 0; comp_layer < prop->num_component_layers; comp_layer++) { + if (!loaderFindLayerNameInList(prop->component_layer_names[comp_layer], instance_layers)) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "verifyMetaLayerComponentLayers: Meta-layer %s can't find component layer %s at index %d." + " Skipping this layer.", + prop->info.layerName, prop->component_layer_names[comp_layer], comp_layer); + } + success = false; + break; + } else { + struct loader_layer_properties *comp_prop = + loaderFindLayerProperty(prop->component_layer_names[comp_layer], instance_layers); + if (comp_prop == NULL) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "verifyMetaLayerComponentLayers: Meta-layer %s can't find property for component layer " + "%s at index %d. Skipping this layer.", + prop->info.layerName, prop->component_layer_names[comp_layer], comp_layer); + } + success = false; + break; + } + + // Check the version of each layer, they need to at least match MAJOR and MINOR + uint32_t cur_major = VK_VERSION_MAJOR(comp_prop->info.specVersion); + uint32_t cur_minor = VK_VERSION_MINOR(comp_prop->info.specVersion); + if (cur_major != expected_major || cur_minor != expected_minor) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "verifyMetaLayerComponentLayers: Meta-layer uses API version %d.%d, but component " + "layer %d uses API version %d.%d. Skipping this layer.", + expected_major, expected_minor, comp_layer, cur_major, cur_minor); + } + success = false; + break; + } + + // Make sure the layer isn't using it's own name + if (!strcmp(prop->info.layerName, prop->component_layer_names[comp_layer])) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "verifyMetaLayerComponentLayers: Meta-layer %s lists itself in its component layer " + "list at index %d. Skipping this layer.", + prop->info.layerName, comp_layer); + } + success = false; + break; + } + if (comp_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "verifyMetaLayerComponentLayers: Adding meta-layer %s which also contains meta-layer %s", + prop->info.layerName, comp_prop->info.layerName); + } + + // Make sure if the layer is using a meta-layer in its component list that we also verify that. + if (!verifyMetaLayerComponentLayers(inst, comp_prop, instance_layers)) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Meta-layer %s component layer %s can not find all component layers." + " Skipping this layer.", + prop->info.layerName, prop->component_layer_names[comp_layer]); + } + success = false; + break; + } + } + + // Add any instance and device extensions from component layers to this layer + // list, so that anyone querying extensions will only need to look at the meta-layer + for (uint32_t ext = 0; ext < comp_prop->instance_extension_list.count; ext++) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "Meta-layer %s component layer %s adding instance extension %s", prop->info.layerName, + prop->component_layer_names[comp_layer], comp_prop->instance_extension_list.list[ext].extensionName); + } + if (!has_vk_extension_property(&comp_prop->instance_extension_list.list[ext], &prop->instance_extension_list)) { + loader_add_to_ext_list(inst, &prop->instance_extension_list, 1, &comp_prop->instance_extension_list.list[ext]); + } + } + + for (uint32_t ext = 0; ext < comp_prop->device_extension_list.count; ext++) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "Meta-layer %s component layer %s adding device extension %s", prop->info.layerName, + prop->component_layer_names[comp_layer], + comp_prop->device_extension_list.list[ext].props.extensionName); + } + if (!has_vk_dev_ext_property(&comp_prop->device_extension_list.list[ext].props, &prop->device_extension_list)) { + loader_add_to_dev_ext_list(inst, &prop->device_extension_list, + &comp_prop->device_extension_list.list[ext].props, 0, NULL); + } + } + } + } + if (success) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Meta-layer %s all %d component layers appear to be valid.", + prop->info.layerName, prop->num_component_layers); + } + return success; +} + +// Verify that all meta-layers in a layer list are valid. +static void VerifyAllMetaLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers, + bool *override_layer_present) { + *override_layer_present = false; + for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) { + struct loader_layer_properties *prop = &instance_layers->list[i]; + + // If this is a meta-layer, make sure it is valid + if ((prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) && !verifyMetaLayerComponentLayers(inst, prop, instance_layers)) { + if (NULL != inst) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "Removing meta-layer %s from instance layer list since it appears invalid.", prop->info.layerName); + } + + // Delete the component layers + loader_instance_heap_free(inst, prop->component_layer_names); + if (prop->blacklist_layer_names != NULL) { + loader_instance_heap_free(inst, prop->blacklist_layer_names); + } + if (prop->override_paths != NULL) { + loader_instance_heap_free(inst, prop->override_paths); + } + + // Remove the current invalid meta-layer from the layer list. Use memmove since we are + // overlapping the source and destination addresses. + memmove(&instance_layers->list[i], &instance_layers->list[i + 1], + sizeof(struct loader_layer_properties) * (instance_layers->count - 1 - i)); + + // Decrement the count (because we now have one less) and decrement the loop index since we need to + // re-check this index. + instance_layers->count--; + i--; + } else if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop)) { + *override_layer_present = true; + } + } +} + +// If the current working directory matches any app_key_path of the layers, remove all other override layers. +// Otherwise if no matching app_key was found, remove all but the global override layer, which has no app_key_path. +static void RemoveAllNonValidOverrideLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) { + if (instance_layers == NULL) { + return; + } + + char cur_path[MAX_STRING_SIZE]; + char *ret = loader_platform_executable_path(cur_path, sizeof(cur_path)); + if (ret == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "RemoveAllNonValidOverrideLayers: Failed to get executable path and name"); + return; + } + + // Find out if there is an override layer with same the app_key_path as the path to the current executable. + // If more than one is found, remove it and use the first layer + // Remove any layers which aren't global and do not have the same app_key_path as the path to the current executable. + bool found_active_override_layer = false; + int global_layer_index = -1; + for (uint32_t i = 0; i < instance_layers->count; i++) { + struct loader_layer_properties *props = &instance_layers->list[i]; + if (strcmp(props->info.layerName, VK_OVERRIDE_LAYER_NAME) == 0) { + if (props->num_app_key_paths > 0) { // not the global layer + for (uint32_t j = 0; j < props->num_app_key_paths; j++) { + if (strcmp(props->app_key_paths[j], cur_path) == 0) { + if (!found_active_override_layer) { + found_active_override_layer = true; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "RemoveAllNonValidOverrideLayers: Multiple override layers where the same" + "path in app_keys was found. Using the first layer found"); + + // Remove duplicate active override layers that have the same app_key_path + loaderRemoveLayerInList(inst, instance_layers, i); + i--; + } + } + } + if (!found_active_override_layer) { + // Remove non-global override layers that don't have an app_key that matches cur_path + loaderRemoveLayerInList(inst, instance_layers, i); + i--; + } + } else { + if (global_layer_index == -1) { + global_layer_index = i; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "RemoveAllNonValidOverrideLayers: Multiple global override layers " + "found. Using the first global layer found"); + loaderRemoveLayerInList(inst, instance_layers, i); + i--; + } + } + } + } + // Remove global layer if layer with same the app_key_path as the path to the current executable is found + if (found_active_override_layer && global_layer_index >= 0) { + loaderRemoveLayerInList(inst, instance_layers, global_layer_index); + } + // Should be at most 1 override layer in the list now. + if (found_active_override_layer) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Using the override layer for app key %s", cur_path); + } else if (global_layer_index >= 0) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Using the global override layer"); + } +} + +// This structure is used to store the json file version +// in a more manageable way. +typedef struct { + uint16_t major; + uint16_t minor; + uint16_t patch; +} layer_json_version; + +static inline bool layer_json_supports_pre_instance_tag(const layer_json_version *layer_json) { + // Supported versions started in 1.1.2, so anything newer + return layer_json->major > 1 || layer_json->minor > 1 || (layer_json->minor == 1 && layer_json->patch > 1); +} + +static VkResult loaderReadLayerJson(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, + cJSON *layer_node, layer_json_version version, cJSON *item, cJSON *disable_environment, + bool is_implicit, char *filename) { + char *temp; + char *name, *type, *library_path_str, *api_version; + char *implementation_version, *description; + cJSON *ext_item; + cJSON *library_path; + cJSON *component_layers; + cJSON *override_paths; + cJSON *blacklisted_layers; + VkExtensionProperties ext_prop; + VkResult result = VK_ERROR_INITIALIZATION_FAILED; + struct loader_layer_properties *props = NULL; + int i, j; + +// The following are required in the "layer" object: +// (required) "name" +// (required) "type" +// (required) "library_path" +// (required) "api_version" +// (required) "implementation_version" +// (required) "description" +// (required for implicit layers) "disable_environment" +#define GET_JSON_OBJECT(node, var) \ + { \ + var = cJSON_GetObjectItem(node, #var); \ + if (var == NULL) { \ + layer_node = layer_node->next; \ + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ + "Didn't find required layer object %s in manifest " \ + "JSON file, skipping this layer", \ + #var); \ + goto out; \ + } \ + } +#define GET_JSON_ITEM(node, var) \ + { \ + item = cJSON_GetObjectItem(node, #var); \ + if (item == NULL) { \ + layer_node = layer_node->next; \ + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ + "Didn't find required layer value %s in manifest JSON " \ + "file, skipping this layer", \ + #var); \ + goto out; \ + } \ + temp = cJSON_Print(item); \ + if (temp == NULL) { \ + layer_node = layer_node->next; \ + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ + "Problem accessing layer value %s in manifest JSON " \ + "file, skipping this layer", \ + #var); \ + result = VK_ERROR_OUT_OF_HOST_MEMORY; \ + goto out; \ + } \ + temp[strlen(temp) - 1] = '\0'; \ + var = loader_stack_alloc(strlen(temp) + 1); \ + strcpy(var, &temp[1]); \ + cJSON_Free(temp); \ + } + GET_JSON_ITEM(layer_node, name) + GET_JSON_ITEM(layer_node, type) + GET_JSON_ITEM(layer_node, api_version) + GET_JSON_ITEM(layer_node, implementation_version) + GET_JSON_ITEM(layer_node, description) + + // Add list entry + if (!strcmp(type, "DEVICE")) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Device layers are deprecated skipping this layer"); + layer_node = layer_node->next; + goto out; + } + + // Allow either GLOBAL or INSTANCE type interchangeably to handle + // layers that must work with older loaders + if (!strcmp(type, "INSTANCE") || !strcmp(type, "GLOBAL")) { + if (layer_instance_list == NULL) { + layer_node = layer_node->next; + goto out; + } + props = loaderGetNextLayerPropertySlot(inst, layer_instance_list); + if (NULL == props) { + // Error already triggered in loaderGetNextLayerPropertySlot. + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + props->type_flags = VK_LAYER_TYPE_FLAG_INSTANCE_LAYER; + if (!is_implicit) { + props->type_flags |= VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER; + } + } else { + layer_node = layer_node->next; + goto out; + } + + // Expiration date for override layer. Field starte with JSON file 1.1.2 and + // is completely optional. So, no check put in place. + if (!strcmp(name, VK_OVERRIDE_LAYER_NAME)) { + cJSON *expiration; + + if (version.major < 1 && version.minor < 1 && version.patch < 2) { + loader_log( + inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Override layer expiration date not added until version 1.1.2. Please update JSON file version appropriately."); + } + + props->is_override = true; + expiration = cJSON_GetObjectItem(layer_node, "expiration_date"); + if (NULL != expiration) { + char date_copy[32]; + uint8_t cur_item = 0; + + // Get the string for the current item + temp = cJSON_Print(expiration); + if (temp == NULL) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Problem accessing layer value 'expiration_date' in manifest JSON file, skipping this layer"); + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strcpy(date_copy, &temp[1]); + cJSON_Free(temp); + + if (strlen(date_copy) == 16) { + char *cur_start = &date_copy[0]; + char *next_dash = strchr(date_copy, '-'); + if (NULL != next_dash) { + while (cur_item < 5 && strlen(cur_start)) { + if (next_dash != NULL) { + *next_dash = '\0'; + } + switch (cur_item) { + case 0: // Year + props->expiration.year = atoi(cur_start); + break; + case 1: // Month + props->expiration.month = atoi(cur_start); + break; + case 2: // Day + props->expiration.day = atoi(cur_start); + break; + case 3: // Hour + props->expiration.hour = atoi(cur_start); + break; + case 4: // Minute + props->expiration.minute = atoi(cur_start); + props->has_expiration = true; + break; + default: // Ignore + break; + } + if (next_dash != NULL) { + cur_start = next_dash + 1; + next_dash = strchr(cur_start, '-'); + } + cur_item++; + } + } + } + } + } + + // Library path no longer required unless component_layers is also not defined + library_path = cJSON_GetObjectItem(layer_node, "library_path"); + component_layers = cJSON_GetObjectItem(layer_node, "component_layers"); + if (NULL != library_path) { + if (NULL != component_layers) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Indicating meta-layer-specific component_layers, but also " + "defining layer library path. Both are not compatible, so " + "skipping this layer"); + goto out; + } + props->num_component_layers = 0; + props->component_layer_names = NULL; + + temp = cJSON_Print(library_path); + if (NULL == temp) { + layer_node = layer_node->next; + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Problem accessing layer value library_path in manifest JSON " + "file, skipping this layer"); + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + library_path_str = loader_stack_alloc(strlen(temp) + 1); + strcpy(library_path_str, &temp[1]); + cJSON_Free(temp); + + char *fullpath = props->lib_name; + char *rel_base; + if (NULL != library_path_str) { + if (loader_platform_is_path(library_path_str)) { + // A relative or absolute path + char *name_copy = loader_stack_alloc(strlen(filename) + 1); + strcpy(name_copy, filename); + rel_base = loader_platform_dirname(name_copy); + loader_expand_path(library_path_str, rel_base, MAX_STRING_SIZE, fullpath); + } else { +// A filename which is assumed in a system directory +#if defined(DEFAULT_VK_LAYERS_PATH) + loader_get_fullpath(library_path_str, DEFAULT_VK_LAYERS_PATH, MAX_STRING_SIZE, fullpath); +#else + loader_get_fullpath(library_path_str, "", MAX_STRING_SIZE, fullpath); +#endif + } + } + } else if (NULL != component_layers) { + if (version.major == 1 && (version.minor < 1 || version.patch < 1)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Indicating meta-layer-specific component_layers, but using older " + "JSON file version."); + } + int count = cJSON_GetArraySize(component_layers); + props->num_component_layers = count; + + // Allocate buffer for layer names + props->component_layer_names = + loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == props->component_layer_names) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Copy the component layers into the array + for (i = 0; i < count; i++) { + cJSON *comp_layer = cJSON_GetArrayItem(component_layers, i); + if (NULL != comp_layer) { + temp = cJSON_Print(comp_layer); + if (NULL == temp) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->component_layer_names[i], temp + 1, MAX_STRING_SIZE - 1); + props->component_layer_names[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); + } + } + + // This is now, officially, a meta-layer + props->type_flags |= VK_LAYER_TYPE_FLAG_META_LAYER; + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Encountered meta-layer %s", name); + + // Make sure we set up other things so we head down the correct branches below + library_path_str = NULL; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Layer missing both library_path and component_layers fields. One or the " + "other MUST be defined. Skipping this layer"); + goto out; + } + + props->num_blacklist_layers = 0; + props->blacklist_layer_names = NULL; + blacklisted_layers = cJSON_GetObjectItem(layer_node, "blacklisted_layers"); + if (blacklisted_layers != NULL) { + if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Layer %s contains a blacklist, but a blacklist can only be provided by the override metalayer. " + "This blacklist will be ignored.", + name); + } else { + props->num_blacklist_layers = cJSON_GetArraySize(blacklisted_layers); + if (props->num_blacklist_layers > 0) { + // Allocate the blacklist array + props->blacklist_layer_names = loader_instance_heap_alloc( + inst, sizeof(char[MAX_STRING_SIZE]) * props->num_blacklist_layers, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (props->blacklist_layer_names == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Copy the blacklisted layers into the array + for (i = 0; i < (int)props->num_blacklist_layers; ++i) { + cJSON *black_layer = cJSON_GetArrayItem(blacklisted_layers, i); + if (black_layer == NULL) { + continue; + } + temp = cJSON_Print(black_layer); + if (temp == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->blacklist_layer_names[i], temp + 1, MAX_STRING_SIZE - 1); + props->blacklist_layer_names[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); + } + } + } + } + + override_paths = cJSON_GetObjectItem(layer_node, "override_paths"); + if (NULL != override_paths) { + if (version.major == 1 && (version.minor < 1 || version.patch < 1)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Indicating meta-layer-specific override paths, but using older " + "JSON file version."); + } + int count = cJSON_GetArraySize(override_paths); + props->num_override_paths = count; + if (count > 0) { + // Allocate buffer for override paths + props->override_paths = + loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * count, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == props->override_paths) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Copy the override paths into the array + for (i = 0; i < count; i++) { + cJSON *override_path = cJSON_GetArrayItem(override_paths, i); + if (NULL != override_path) { + temp = cJSON_Print(override_path); + if (NULL == temp) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->override_paths[i], temp + 1, MAX_STRING_SIZE - 1); + props->override_paths[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); + } + } + } + } + + if (is_implicit) { + GET_JSON_OBJECT(layer_node, disable_environment) + } +#undef GET_JSON_ITEM +#undef GET_JSON_OBJECT + + strncpy(props->info.layerName, name, sizeof(props->info.layerName)); + props->info.layerName[sizeof(props->info.layerName) - 1] = '\0'; + props->info.specVersion = loader_make_version(api_version); + props->info.implementationVersion = atoi(implementation_version); + strncpy((char *)props->info.description, description, sizeof(props->info.description)); + props->info.description[sizeof(props->info.description) - 1] = '\0'; + if (is_implicit) { + if (!disable_environment || !disable_environment->child) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Didn't find required layer child value disable_environment" + "in manifest JSON file, skipping this layer"); + layer_node = layer_node->next; + goto out; + } + strncpy(props->disable_env_var.name, disable_environment->child->string, sizeof(props->disable_env_var.name)); + props->disable_env_var.name[sizeof(props->disable_env_var.name) - 1] = '\0'; + strncpy(props->disable_env_var.value, disable_environment->child->valuestring, sizeof(props->disable_env_var.value)); + props->disable_env_var.value[sizeof(props->disable_env_var.value) - 1] = '\0'; + } + +// Now get all optional items and objects and put in list: +// functions +// instance_extensions +// device_extensions +// enable_environment (implicit layers only) +#define GET_JSON_OBJECT(node, var) \ + { var = cJSON_GetObjectItem(node, #var); } +#define GET_JSON_ITEM(node, var) \ + { \ + item = cJSON_GetObjectItem(node, #var); \ + if (item != NULL) { \ + temp = cJSON_Print(item); \ + if (temp != NULL) { \ + temp[strlen(temp) - 1] = '\0'; \ + var = loader_stack_alloc(strlen(temp) + 1); \ + strcpy(var, &temp[1]); \ + cJSON_Free(temp); \ + } else { \ + result = VK_ERROR_OUT_OF_HOST_MEMORY; \ + goto out; \ + } \ + } \ + } + + cJSON *instance_extensions, *device_extensions, *functions, *enable_environment; + cJSON *entrypoints = NULL; + char *vkGetInstanceProcAddr = NULL; + char *vkGetDeviceProcAddr = NULL; + char *vkNegotiateLoaderLayerInterfaceVersion = NULL; + char *spec_version = NULL; + char **entry_array = NULL; + cJSON *app_keys = NULL; + + // Layer interface functions + // vkGetInstanceProcAddr + // vkGetDeviceProcAddr + // vkNegotiateLoaderLayerInterfaceVersion (starting with JSON file 1.1.0) + GET_JSON_OBJECT(layer_node, functions) + if (functions != NULL) { + if (version.major > 1 || version.minor >= 1) { + GET_JSON_ITEM(functions, vkNegotiateLoaderLayerInterfaceVersion) + if (vkNegotiateLoaderLayerInterfaceVersion != NULL) + strncpy(props->functions.str_negotiate_interface, vkNegotiateLoaderLayerInterfaceVersion, + sizeof(props->functions.str_negotiate_interface)); + props->functions.str_negotiate_interface[sizeof(props->functions.str_negotiate_interface) - 1] = '\0'; + } else { + props->functions.str_negotiate_interface[0] = '\0'; + } + GET_JSON_ITEM(functions, vkGetInstanceProcAddr) + GET_JSON_ITEM(functions, vkGetDeviceProcAddr) + if (vkGetInstanceProcAddr != NULL) { + strncpy(props->functions.str_gipa, vkGetInstanceProcAddr, sizeof(props->functions.str_gipa)); + if (version.major > 1 || version.minor >= 1) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Layer \"%s\" using deprecated \'vkGetInstanceProcAddr\' tag which was deprecated starting with JSON " + "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " + "compatibility reasons it may be desirable to continue using the deprecated tag.", + name); + } + } + props->functions.str_gipa[sizeof(props->functions.str_gipa) - 1] = '\0'; + if (vkGetDeviceProcAddr != NULL) { + strncpy(props->functions.str_gdpa, vkGetDeviceProcAddr, sizeof(props->functions.str_gdpa)); + if (version.major > 1 || version.minor >= 1) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "Layer \"%s\" using deprecated \'vkGetDeviceProcAddr\' tag which was deprecated starting with JSON " + "file version 1.1.0. The new vkNegotiateLoaderLayerInterfaceVersion function is preferred, though for " + "compatibility reasons it may be desirable to continue using the deprecated tag.", + name); + } + } + props->functions.str_gdpa[sizeof(props->functions.str_gdpa) - 1] = '\0'; + } + + // instance_extensions + // array of { + // name + // spec_version + // } + GET_JSON_OBJECT(layer_node, instance_extensions) + if (instance_extensions != NULL) { + int count = cJSON_GetArraySize(instance_extensions); + for (i = 0; i < count; i++) { + ext_item = cJSON_GetArrayItem(instance_extensions, i); + GET_JSON_ITEM(ext_item, name) + if (name != NULL) { + strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName)); + ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0'; + } + GET_JSON_ITEM(ext_item, spec_version) + if (NULL != spec_version) { + ext_prop.specVersion = atoi(spec_version); + } else { + ext_prop.specVersion = 0; + } + bool ext_unsupported = wsi_unsupported_instance_extension(&ext_prop); + if (!ext_unsupported) { + loader_add_to_ext_list(inst, &props->instance_extension_list, 1, &ext_prop); + } + } + } + + // device_extensions + // array of { + // name + // spec_version + // entrypoints + // } + GET_JSON_OBJECT(layer_node, device_extensions) + if (device_extensions != NULL) { + int count = cJSON_GetArraySize(device_extensions); + for (i = 0; i < count; i++) { + ext_item = cJSON_GetArrayItem(device_extensions, i); + GET_JSON_ITEM(ext_item, name) + GET_JSON_ITEM(ext_item, spec_version) + if (name != NULL) { + strncpy(ext_prop.extensionName, name, sizeof(ext_prop.extensionName)); + ext_prop.extensionName[sizeof(ext_prop.extensionName) - 1] = '\0'; + } + if (NULL != spec_version) { + ext_prop.specVersion = atoi(spec_version); + } else { + ext_prop.specVersion = 0; + } + // entrypoints = cJSON_GetObjectItem(ext_item, "entrypoints"); + GET_JSON_OBJECT(ext_item, entrypoints) + int entry_count; + if (entrypoints == NULL) { + loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, 0, NULL); + continue; + } + entry_count = cJSON_GetArraySize(entrypoints); + if (entry_count) { + entry_array = (char **)loader_stack_alloc(sizeof(char *) * entry_count); + } + for (j = 0; j < entry_count; j++) { + ext_item = cJSON_GetArrayItem(entrypoints, j); + if (ext_item != NULL) { + temp = cJSON_Print(ext_item); + if (NULL == temp) { + entry_array[j] = NULL; + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + entry_array[j] = loader_stack_alloc(strlen(temp) + 1); + strcpy(entry_array[j], &temp[1]); + cJSON_Free(temp); + } + } + loader_add_to_dev_ext_list(inst, &props->device_extension_list, &ext_prop, entry_count, entry_array); + } + } + if (is_implicit) { + GET_JSON_OBJECT(layer_node, enable_environment) + + // enable_environment is optional + if (enable_environment) { + strncpy(props->enable_env_var.name, enable_environment->child->string, sizeof(props->enable_env_var.name)); + props->enable_env_var.name[sizeof(props->enable_env_var.name) - 1] = '\0'; + strncpy(props->enable_env_var.value, enable_environment->child->valuestring, sizeof(props->enable_env_var.value)); + props->enable_env_var.value[sizeof(props->enable_env_var.value) - 1] = '\0'; + } + } + + // Read in the pre-instance stuff + cJSON *pre_instance = cJSON_GetObjectItem(layer_node, "pre_instance_functions"); + if (pre_instance) { + if (!layer_json_supports_pre_instance_tag(&version)) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "Found pre_instance_functions section in layer from \"%s\". " + "This section is only valid in manifest version 1.1.2 or later. The section will be ignored", + filename); + } else if (!is_implicit) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Found pre_instance_functions section in explicit layer from " + "\"%s\". This section is only valid in implicit layers. The section will be ignored", + filename); + } else { + cJSON *inst_ext_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceExtensionProperties"); + if (inst_ext_json) { + char *inst_ext_name = cJSON_Print(inst_ext_json); + if (inst_ext_name == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + size_t len = strlen(inst_ext_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_ext_name) - 2; + strncpy(props->pre_instance_functions.enumerate_instance_extension_properties, inst_ext_name + 1, len); + props->pre_instance_functions.enumerate_instance_extension_properties[len] = '\0'; + cJSON_Free(inst_ext_name); + } + + cJSON *inst_layer_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceLayerProperties"); + if (inst_layer_json) { + char *inst_layer_name = cJSON_Print(inst_layer_json); + if (inst_layer_name == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + size_t len = strlen(inst_layer_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_layer_name) - 2; + strncpy(props->pre_instance_functions.enumerate_instance_layer_properties, inst_layer_name + 1, len); + props->pre_instance_functions.enumerate_instance_layer_properties[len] = '\0'; + cJSON_Free(inst_layer_name); + } + + cJSON *inst_version_json = cJSON_GetObjectItem(pre_instance, "vkEnumerateInstanceVersion"); + if (inst_version_json) { + char *inst_version_name = cJSON_Print(inst_version_json); + if (inst_version_json) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + size_t len = strlen(inst_version_name) >= MAX_STRING_SIZE ? MAX_STRING_SIZE - 3 : strlen(inst_version_name) - 2; + strncpy(props->pre_instance_functions.enumerate_instance_version, inst_version_name + 1, len); + props->pre_instance_functions.enumerate_instance_version[len] = '\0'; + cJSON_Free(inst_version_name); + } + } + } + + props->num_app_key_paths = 0; + props->app_key_paths = NULL; + app_keys = cJSON_GetObjectItem(layer_node, "app_keys"); + if (app_keys != NULL) { + if (strcmp(name, VK_OVERRIDE_LAYER_NAME)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Layer %s contains app_keys, but any app_keys can only be provided by the override metalayer. " + "These will be ignored.", + name); + } else { + props->num_app_key_paths = cJSON_GetArraySize(app_keys); + + // Allocate the blacklist array + props->app_key_paths = loader_instance_heap_alloc(inst, sizeof(char[MAX_STRING_SIZE]) * props->num_app_key_paths, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (props->app_key_paths == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Copy the app_key_paths into the array + for (i = 0; i < (int)props->num_app_key_paths; ++i) { + cJSON *app_key_path = cJSON_GetArrayItem(app_keys, i); + if (app_key_path == NULL) { + continue; + } + temp = cJSON_Print(app_key_path); + if (temp == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + temp[strlen(temp) - 1] = '\0'; + strncpy(props->app_key_paths[i], temp + 1, MAX_STRING_SIZE - 1); + props->app_key_paths[i][MAX_STRING_SIZE - 1] = '\0'; + cJSON_Free(temp); + } + } + } + + result = VK_SUCCESS; + +out: +#undef GET_JSON_ITEM +#undef GET_JSON_OBJECT + + if (VK_SUCCESS != result && NULL != props) { + if (NULL != props->blacklist_layer_names) { + loader_instance_heap_free(inst, props->blacklist_layer_names); + } + if (NULL != props->component_layer_names) { + loader_instance_heap_free(inst, props->component_layer_names); + } + if (NULL != props->override_paths) { + loader_instance_heap_free(inst, props->override_paths); + } + if (NULL != props->app_key_paths) { + loader_instance_heap_free(inst, props->app_key_paths); + } + props->num_blacklist_layers = 0; + props->blacklist_layer_names = NULL; + props->num_component_layers = 0; + props->component_layer_names = NULL; + props->num_override_paths = 0; + props->override_paths = NULL; + props->num_app_key_paths = 0; + props->app_key_paths = NULL; + } + + return result; +} + +static inline bool isValidLayerJsonVersion(const layer_json_version *layer_json) { + // Supported versions are: 1.0.0, 1.0.1, and 1.1.0 - 1.1.2. + if ((layer_json->major == 1 && layer_json->minor == 1 && layer_json->patch < 3) || + (layer_json->major == 1 && layer_json->minor == 0 && layer_json->patch < 2)) { + return true; + } + return false; +} + +static inline bool layerJsonSupportsMultipleLayers(const layer_json_version *layer_json) { + // Supported versions started in 1.0.1, so anything newer + if ((layer_json->major > 1 || layer_json->minor > 0 || layer_json->patch > 1)) { + return true; + } + return false; +} + +// Given a cJSON struct (json) of the top level JSON object from layer manifest +// file, add entry to the layer_list. Fill out the layer_properties in this list +// entry from the input cJSON object. +// +// \returns +// void +// layer_list has a new entry and initialized accordingly. +// If the json input object does not have all the required fields no entry +// is added to the list. +static VkResult loaderAddLayerProperties(const struct loader_instance *inst, struct loader_layer_list *layer_instance_list, + cJSON *json, bool is_implicit, char *filename) { + // The following Fields in layer manifest file that are required: + // - "file_format_version" + // - If more than one "layer" object are used, then the "layers" array is + // required + VkResult result = VK_ERROR_INITIALIZATION_FAILED; + cJSON *item, *layers_node, *layer_node; + layer_json_version json_version = {0, 0, 0}; + char *vers_tok; + cJSON *disable_environment = NULL; + // Make sure sure the top level json value is an object + if (!json || json->type != 6) { + goto out; + } + item = cJSON_GetObjectItem(json, "file_format_version"); + if (item == NULL) { + goto out; + } + char *file_vers = cJSON_PrintUnformatted(item); + if (NULL == file_vers) { + goto out; + } + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Found manifest file %s, version %s", filename, file_vers); + // Get the major/minor/and patch as integers for easier comparison + vers_tok = strtok(file_vers, ".\"\n\r"); + if (NULL != vers_tok) { + json_version.major = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + json_version.minor = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + json_version.patch = (uint16_t)atoi(vers_tok); + } + } + } + + if (!isValidLayerJsonVersion(&json_version)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddLayerProperties: %s invalid layer manifest file version %d.%d.%d. May cause errors.", filename, + json_version.major, json_version.minor, json_version.patch); + } + cJSON_Free(file_vers); + + // If "layers" is present, read in the array of layer objects + layers_node = cJSON_GetObjectItem(json, "layers"); + if (layers_node != NULL) { + int numItems = cJSON_GetArraySize(layers_node); + if (!layerJsonSupportsMultipleLayers(&json_version)) { + loader_log( + inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddLayerProperties: \'layers\' tag not supported until file version 1.0.1, but %s is reporting version %s", + filename, file_vers); + } + for (int curLayer = 0; curLayer < numItems; curLayer++) { + layer_node = cJSON_GetArrayItem(layers_node, curLayer); + if (layer_node == NULL) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddLayerProperties: Can not find 'layers' array element %d object in manifest JSON file %s. " + "Skipping this file", + curLayer, filename); + goto out; + } + result = loaderReadLayerJson(inst, layer_instance_list, layer_node, json_version, item, disable_environment, + is_implicit, filename); + } + } else { + // Otherwise, try to read in individual layers + layer_node = cJSON_GetObjectItem(json, "layer"); + if (layer_node == NULL) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderAddLayerProperties: Can not find 'layer' object in manifest JSON file %s. Skipping this file.", + filename); + goto out; + } + // Loop through all "layer" objects in the file to get a count of them + // first. + uint16_t layer_count = 0; + cJSON *tempNode = layer_node; + do { + tempNode = tempNode->next; + layer_count++; + } while (tempNode != NULL); + + // Throw a warning if we encounter multiple "layer" objects in file + // versions newer than 1.0.0. Having multiple objects with the same + // name at the same level is actually a JSON standard violation. + if (layer_count > 1 && layerJsonSupportsMultipleLayers(&json_version)) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderAddLayerProperties: Multiple 'layer' nodes are deprecated starting in file version \"1.0.1\". " + "Please use 'layers' : [] array instead in %s.", + filename); + } else { + do { + result = loaderReadLayerJson(inst, layer_instance_list, layer_node, json_version, item, disable_environment, + is_implicit, filename); + layer_node = layer_node->next; + } while (layer_node != NULL); + } + } + +out: + + return result; +} + +static inline size_t DetermineDataFilePathSize(const char *cur_path, size_t relative_path_size) { + size_t path_size = 0; + + if (NULL != cur_path) { + // For each folder in cur_path, (detected by finding additional + // path separators in the string) we need to add the relative path on + // the end. Plus, leave an additional two slots on the end to add an + // additional directory slash and path separator if needed + path_size += strlen(cur_path) + relative_path_size + 2; + for (const char *x = cur_path; *x; ++x) { + if (*x == PATH_SEPARATOR) { + path_size += relative_path_size + 2; + } + } + } + + return path_size; +} + +static inline void CopyDataFilePath(const char *cur_path, const char *relative_path, size_t relative_path_size, + char **output_path) { + if (NULL != cur_path) { + uint32_t start = 0; + uint32_t stop = 0; + char *cur_write = *output_path; + + while (cur_path[start] != '\0') { + while (cur_path[start] == PATH_SEPARATOR) { + start++; + } + stop = start; + while (cur_path[stop] != PATH_SEPARATOR && cur_path[stop] != '\0') { + stop++; + } + const size_t s = stop - start; + if (s) { + memcpy(cur_write, &cur_path[start], s); + cur_write += s; + + // If last symbol written was not a directory symbol, add it. + if (*(cur_write - 1) != DIRECTORY_SYMBOL) { + *cur_write++ = DIRECTORY_SYMBOL; + } + + if (relative_path_size > 0) { + memcpy(cur_write, relative_path, relative_path_size); + cur_write += relative_path_size; + } + *cur_write++ = PATH_SEPARATOR; + start = stop; + } + } + *output_path = cur_write; + } +} + +// Check to see if there's enough space in the data file list. If not, add some. +static inline VkResult CheckAndAdjustDataFileList(const struct loader_instance *inst, struct loader_data_files *out_files) { + if (out_files->count == 0) { + out_files->filename_list = loader_instance_heap_alloc(inst, 64 * sizeof(char *), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (NULL == out_files->filename_list) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "CheckAndAdjustDataFileList: Failed to allocate space for manifest file name list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + out_files->alloc_count = 64; + } else if (out_files->count == out_files->alloc_count) { + size_t new_size = out_files->alloc_count * sizeof(char *) * 2; + void *new_ptr = loader_instance_heap_realloc(inst, out_files->filename_list, out_files->alloc_count * sizeof(char *), + new_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "CheckAndAdjustDataFileList: Failed to reallocate space for manifest file name list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + out_files->filename_list = new_ptr; + out_files->alloc_count *= 2; + } + + return VK_SUCCESS; +} + +// If the file found is a manifest file name, add it to the out_files manifest list. +static VkResult AddIfManifestFile(const struct loader_instance *inst, const char *file_name, struct loader_data_files *out_files) { + VkResult vk_result = VK_SUCCESS; + + if (NULL == file_name || NULL == out_files) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "AddIfManfistFile: Received NULL pointer"); + vk_result = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + // Look for files ending with ".json" suffix + size_t name_len = strlen(file_name); + const char *name_suffix = file_name + name_len - 5; + if ((name_len < 5) || 0 != strncmp(name_suffix, ".json", 5)) { + // Use incomplete to indicate invalid name, but to keep going. + vk_result = VK_INCOMPLETE; + goto out; + } + + // Check and allocate space in the manifest list if necessary + vk_result = CheckAndAdjustDataFileList(inst, out_files); + if (VK_SUCCESS != vk_result) { + goto out; + } + + out_files->filename_list[out_files->count] = + loader_instance_heap_alloc(inst, strlen(file_name) + 1, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (out_files->filename_list[out_files->count] == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "AddIfManfistFile: Failed to allocate space for manifest file %d list", + out_files->count); + vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + strcpy(out_files->filename_list[out_files->count++], file_name); + +out: + + return vk_result; +} + +static VkResult AddDataFilesInPath(const struct loader_instance *inst, char *search_path, bool is_directory_list, + struct loader_data_files *out_files, bool use_first_found_manifest) { + VkResult vk_result = VK_SUCCESS; + DIR *dir_stream = NULL; + struct dirent *dir_entry; + char *cur_file; + char *next_file; + char *name; + char full_path[2048]; +#ifndef _WIN32 + char temp_path[2048]; +#endif + + // Now, parse the paths + next_file = search_path; + while (NULL != next_file && *next_file != '\0') { + name = NULL; + cur_file = next_file; + next_file = loader_get_next_path(cur_file); + + // Get the next name in the list and verify it's valid + if (is_directory_list) { + dir_stream = opendir(cur_file); + if (NULL == dir_stream) { + continue; + } + while (1) { + dir_entry = readdir(dir_stream); + if (NULL == dir_entry) { + break; + } + + name = &(dir_entry->d_name[0]); + loader_get_fullpath(name, cur_file, sizeof(full_path), full_path); + name = full_path; + + VkResult local_res; + local_res = AddIfManifestFile(inst, name, out_files); + + // Incomplete means this was not a valid data file. + if (local_res == VK_INCOMPLETE) { + continue; + } else if (local_res != VK_SUCCESS) { + vk_result = local_res; + break; + } + } + closedir(dir_stream); + if (vk_result != VK_SUCCESS) { + goto out; + } + } else { +#ifdef _WIN32 + name = cur_file; +#else + // Only Linux has relative paths, make a copy of location so it isn't modified + size_t str_len; + if (NULL != next_file) { + str_len = next_file - cur_file + 1; + } else { + str_len = strlen(cur_file) + 1; + } + if (str_len > sizeof(temp_path)) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "AddDataFilesInPath: Path to %s too long\n", cur_file); + continue; + } + strcpy(temp_path, cur_file); + name = temp_path; +#endif + loader_get_fullpath(cur_file, name, sizeof(full_path), full_path); + name = full_path; + + VkResult local_res; + local_res = AddIfManifestFile(inst, name, out_files); + + // Incomplete means this was not a valid data file. + if (local_res == VK_INCOMPLETE) { + continue; + } else if (local_res != VK_SUCCESS) { + vk_result = local_res; + break; + } + } + if (use_first_found_manifest && out_files->count > 0) { + break; + } + } + +out: + + return vk_result; +} + +// Look for data files in the provided paths, but first check the environment override to determine if we should use that +// instead. +static VkResult ReadDataFilesInSearchPaths(const struct loader_instance *inst, enum loader_data_files_type data_file_type, + const char *env_override, const char *path_override, const char *relative_location, + bool *override_active, struct loader_data_files *out_files) { + VkResult vk_result = VK_SUCCESS; + bool is_directory_list = true; + bool is_icd = (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD); + char *override_env = NULL; + const char *override_path = NULL; + size_t search_path_size = 0; + char *search_path = NULL; + char *cur_path_ptr = NULL; + size_t rel_size = 0; + bool use_first_found_manifest = false; +#ifndef _WIN32 + bool xdgconfig_alloc = true; + bool xdgdata_alloc = true; +#endif + +#ifndef _WIN32 + // Determine how much space is needed to generate the full search path + // for the current manifest files. + char *xdgconfdirs = loader_secure_getenv("XDG_CONFIG_DIRS", inst); + char *xdgdatadirs = loader_secure_getenv("XDG_DATA_DIRS", inst); + char *xdgdatahome = loader_secure_getenv("XDG_DATA_HOME", inst); + char *home = NULL; + char* home_root = NULL; + + if (xdgconfdirs == NULL) { + xdgconfig_alloc = false; + } + if (xdgdatadirs == NULL) { + xdgdata_alloc = false; + } +#if !defined(__Fuchsia__) + if (xdgconfdirs == NULL || xdgconfdirs[0] == '\0') { + xdgconfdirs = FALLBACK_CONFIG_DIRS; + } + if (xdgdatadirs == NULL || xdgdatadirs[0] == '\0') { + xdgdatadirs = FALLBACK_DATA_DIRS; + } +#endif + + // Only use HOME if XDG_DATA_HOME is not present on the system + if (NULL == xdgdatahome) { + home = loader_secure_getenv("HOME", inst); + if (home != NULL) { + home_root = loader_instance_heap_alloc(inst, strlen(home) + 14, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (home_root == NULL) { + vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + strcpy(home_root, home); + strcat(home_root, "/.local/share"); + } + } +#endif + + if (path_override != NULL) { + override_path = path_override; + } else if (env_override != NULL) { +#ifndef _WIN32 + if (geteuid() != getuid() || getegid() != getgid()) { + // Don't allow setuid apps to use the env var: + env_override = NULL; + } else +#endif + { + override_env = loader_secure_getenv(env_override, inst); + + // The ICD override is actually a specific list of filenames, not directories + if (is_icd && NULL != override_env) { + is_directory_list = false; + } + override_path = override_env; + } + } + + // Add two by default for NULL terminator and one path separator on end (just in case) + search_path_size = 2; + + // If there's an override, use that (and the local folder if required) and nothing else + if (NULL != override_path) { + // Local folder and null terminator + search_path_size += strlen(override_path) + 1; + } else if (NULL == relative_location) { + // If there's no override, and no relative location, bail out. This is usually + // the case when we're on Windows and the default path is to use the registry. + goto out; + } else { + // Add the general search folders (with the appropriate relative folder added) + rel_size = strlen(relative_location); + if (rel_size == 0) { + goto out; + } else { +#if defined(__APPLE__) + search_path_size += MAXPATHLEN; +#endif +#ifndef _WIN32 + search_path_size += DetermineDataFilePathSize(xdgconfdirs, rel_size); + search_path_size += DetermineDataFilePathSize(xdgdatadirs, rel_size); + search_path_size += DetermineDataFilePathSize(SYSCONFDIR, rel_size); +#if defined(EXTRASYSCONFDIR) + search_path_size += DetermineDataFilePathSize(EXTRASYSCONFDIR, rel_size); +#endif + if (is_directory_list) { + if (!IsHighIntegrity()) { + search_path_size += DetermineDataFilePathSize(xdgdatahome, rel_size); + search_path_size += DetermineDataFilePathSize(home_root, rel_size); + } + } +#endif + } + } + + // Allocate the required space + search_path = loader_instance_heap_alloc(inst, search_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (NULL == search_path) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ReadDataFilesInSearchPaths: Failed to allocate space for search path of length %d", (uint32_t)search_path_size); + vk_result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + cur_path_ptr = search_path; + + // Add the remaining paths to the list + if (NULL != override_path) { + strcpy(cur_path_ptr, override_path); + } else { +#ifndef _WIN32 + if (rel_size > 0) { +#if defined(__APPLE__) + // Add the bundle's Resources dir to the beginning of the search path. + // Looks for manifests in the bundle first, before any system directories. + CFBundleRef main_bundle = CFBundleGetMainBundle(); + if (NULL != main_bundle) { + CFURLRef ref = CFBundleCopyResourcesDirectoryURL(main_bundle); + if (NULL != ref) { + if (CFURLGetFileSystemRepresentation(ref, TRUE, (UInt8 *)cur_path_ptr, search_path_size)) { + cur_path_ptr += strlen(cur_path_ptr); + *cur_path_ptr++ = DIRECTORY_SYMBOL; + memcpy(cur_path_ptr, relative_location, rel_size); + cur_path_ptr += rel_size; + *cur_path_ptr++ = PATH_SEPARATOR; + // only for ICD manifests + if (env_override != NULL && strcmp(VK_ICD_FILENAMES_ENV_VAR, env_override) == 0) { + use_first_found_manifest = true; + } + } + CFRelease(ref); + } + } +#endif + CopyDataFilePath(xdgconfdirs, relative_location, rel_size, &cur_path_ptr); + CopyDataFilePath(SYSCONFDIR, relative_location, rel_size, &cur_path_ptr); +#if defined(EXTRASYSCONFDIR) + CopyDataFilePath(EXTRASYSCONFDIR, relative_location, rel_size, &cur_path_ptr); +#endif + CopyDataFilePath(xdgdatadirs, relative_location, rel_size, &cur_path_ptr); + if (is_directory_list) { + CopyDataFilePath(xdgdatahome, relative_location, rel_size, &cur_path_ptr); + CopyDataFilePath(home_root, relative_location, rel_size, &cur_path_ptr); + } + } + + // Remove the last path separator + --cur_path_ptr; + + assert(cur_path_ptr - search_path < (ptrdiff_t)search_path_size); + *cur_path_ptr = '\0'; +#endif + } + + // Remove duplicate paths, or it would result in duplicate extensions, duplicate devices, etc. + // This uses minimal memory, but is O(N^2) on the number of paths. Expect only a few paths. + char path_sep_str[2] = {PATH_SEPARATOR, '\0'}; + size_t search_path_updated_size = strlen(search_path); + for (size_t first = 0; first < search_path_updated_size;) { + // If this is an empty path, erase it + if (search_path[first] == PATH_SEPARATOR) { + memmove(&search_path[first], &search_path[first + 1], search_path_updated_size - first + 1); + search_path_updated_size -= 1; + continue; + } + + size_t first_end = first + 1; + first_end += strcspn(&search_path[first_end], path_sep_str); + for (size_t second = first_end + 1; second < search_path_updated_size;) { + size_t second_end = second + 1; + second_end += strcspn(&search_path[second_end], path_sep_str); + if (first_end - first == second_end - second && + !strncmp(&search_path[first], &search_path[second], second_end - second)) { + // Found duplicate. Include PATH_SEPARATOR in second_end, then erase it from search_path. + if (search_path[second_end] == PATH_SEPARATOR) { + second_end++; + } + memmove(&search_path[second], &search_path[second_end], search_path_updated_size - second_end + 1); + search_path_updated_size -= second_end - second; + } else { + second = second_end + 1; + } + } + first = first_end + 1; + } + search_path_size = search_path_updated_size; + + // Print out the paths being searched if debugging is enabled + if (search_path_size > 0) { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "ReadDataFilesInSearchPaths: Searching the following paths for manifest files: %s\n", search_path); + } + + // Now, parse the paths and add any manifest files found in them. + vk_result = AddDataFilesInPath(inst, search_path, is_directory_list, out_files, use_first_found_manifest); + + if (NULL != override_path) { + *override_active = true; + } else { + *override_active = false; + } + +out: + + if (NULL != override_env) { + loader_free_getenv(override_env, inst); + } +#ifndef _WIN32 + if (xdgconfig_alloc) { + loader_free_getenv(xdgconfdirs, inst); + } + if (xdgdata_alloc) { + loader_free_getenv(xdgdatadirs, inst); + } + if (NULL != xdgdatahome) { + loader_free_getenv(xdgdatahome, inst); + } + if (NULL != home) { + loader_free_getenv(home, inst); + } + if (NULL != home_root) { + loader_instance_heap_free(inst, home_root); + } +#endif + + if (NULL != search_path) { + loader_instance_heap_free(inst, search_path); + } + + return vk_result; +} + +#ifdef _WIN32 +// Read manifest JSON files using the Windows driver interface +static VkResult ReadManifestsFromD3DAdapters(const struct loader_instance *inst, char **reg_data, PDWORD reg_data_size, + const wchar_t *value_name) { + VkResult result = VK_INCOMPLETE; + LoaderEnumAdapters2 adapters = {.adapter_count = 0, .adapters = NULL}; + LoaderQueryRegistryInfo *full_info = NULL; + size_t full_info_size = 0; + char *json_path = NULL; + size_t json_path_size = 0; + + PFN_LoaderEnumAdapters2 fpLoaderEnumAdapters2 = + (PFN_LoaderEnumAdapters2)GetProcAddress(GetModuleHandle("gdi32.dll"), "D3DKMTEnumAdapters2"); + PFN_LoaderQueryAdapterInfo fpLoaderQueryAdapterInfo = + (PFN_LoaderQueryAdapterInfo)GetProcAddress(GetModuleHandle("gdi32.dll"), "D3DKMTQueryAdapterInfo"); + if (fpLoaderEnumAdapters2 == NULL || fpLoaderQueryAdapterInfo == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Get all of the adapters + NTSTATUS status = fpLoaderEnumAdapters2(&adapters); + if (status == STATUS_SUCCESS && adapters.adapter_count > 0) { + adapters.adapters = loader_instance_heap_alloc(inst, sizeof(*adapters.adapters) * adapters.adapter_count, + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (adapters.adapters == NULL) { + goto out; + } + status = fpLoaderEnumAdapters2(&adapters); + } + if (status != STATUS_SUCCESS) { + goto out; + } + + // If that worked, we need to get the manifest file(s) for each adapter + for (ULONG i = 0; i < adapters.adapter_count; ++i) { + // The first query should just check if the field exists and how big it is + LoaderQueryRegistryInfo filename_info = { + .query_type = LOADER_QUERY_REGISTRY_ADAPTER_KEY, + .query_flags = + { + .translate_path = true, + }, + .value_type = REG_MULTI_SZ, + .physical_adapter_index = 0, + }; + wcsncpy(filename_info.value_name, value_name, sizeof(filename_info.value_name) / sizeof(WCHAR)); + LoaderQueryAdapterInfo query_info = { + .handle = adapters.adapters[i].handle, + .type = LOADER_QUERY_TYPE_REGISTRY, + .private_data = &filename_info, + .private_data_size = sizeof(filename_info), + }; + status = fpLoaderQueryAdapterInfo(&query_info); + + // This error indicates that the type didn't match, so we'll try a REG_SZ + if (status != STATUS_SUCCESS) { + filename_info.value_type = REG_SZ; + status = fpLoaderQueryAdapterInfo(&query_info); + } + + if (status != STATUS_SUCCESS || filename_info.status != LOADER_QUERY_REGISTRY_STATUS_BUFFER_OVERFLOW) { + continue; + } + + while (status == STATUS_SUCCESS && + ((LoaderQueryRegistryInfo *)query_info.private_data)->status == LOADER_QUERY_REGISTRY_STATUS_BUFFER_OVERFLOW) { + bool needs_copy = (full_info == NULL); + size_t full_size = sizeof(LoaderQueryRegistryInfo) + filename_info.output_value_size; + void *buffer = + loader_instance_heap_realloc(inst, full_info, full_info_size, full_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (buffer == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + full_info = buffer; + full_info_size = full_size; + + if (needs_copy) { + memcpy(full_info, &filename_info, sizeof(LoaderQueryRegistryInfo)); + } + query_info.private_data = full_info; + query_info.private_data_size = (UINT)full_info_size; + status = fpLoaderQueryAdapterInfo(&query_info); + } + + if (status != STATUS_SUCCESS || full_info->status != LOADER_QUERY_REGISTRY_STATUS_SUCCESS) { + goto out; + } + + // Convert the wide string to a narrow string + void *buffer = loader_instance_heap_realloc(inst, json_path, json_path_size, full_info->output_value_size, + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (buffer == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + json_path = buffer; + json_path_size = full_info->output_value_size; + + // Iterate over each component string + for (const wchar_t *curr_path = full_info->output_string; curr_path[0] != '\0'; curr_path += wcslen(curr_path) + 1) { + WideCharToMultiByte(CP_UTF8, 0, curr_path, -1, json_path, (int)json_path_size, NULL, NULL); + + // Add the string to the output list + result = VK_SUCCESS; + loaderAddJsonEntry(inst, reg_data, reg_data_size, (LPCTSTR)L"EnumAdapters", REG_SZ, json_path, + (DWORD)strlen(json_path) + 1, &result); + if (result != VK_SUCCESS) { + goto out; + } + + // If this is a string and not a multi-string, we don't want to go throught the loop more than once + if (full_info->value_type == REG_SZ) { + break; + } + } + } + +out: + if (json_path != NULL) { + loader_instance_heap_free(inst, json_path); + } + if (full_info != NULL) { + loader_instance_heap_free(inst, full_info); + } + if (adapters.adapters != NULL) { + loader_instance_heap_free(inst, adapters.adapters); + } + + return result; +} + +// Look for data files in the registry. +static VkResult ReadDataFilesInRegistry(const struct loader_instance *inst, enum loader_data_files_type data_file_type, + bool warn_if_not_present, char *registry_location, struct loader_data_files *out_files) { + VkResult vk_result = VK_SUCCESS; + char *search_path = NULL; + + // These calls look at the PNP/Device section of the registry. + VkResult regHKR_result = VK_SUCCESS; + DWORD reg_size = 4096; + if (!strncmp(registry_location, VK_DRIVERS_INFO_REGISTRY_LOC, sizeof(VK_DRIVERS_INFO_REGISTRY_LOC))) { + // If we're looking for drivers we need to try enumerating adapters + regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, ®_size, LoaderPnpDriverRegistryWide()); + if (regHKR_result == VK_INCOMPLETE) { + regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, ®_size, LoaderPnpDriverRegistry()); + } + } else if (!strncmp(registry_location, VK_ELAYERS_INFO_REGISTRY_LOC, sizeof(VK_ELAYERS_INFO_REGISTRY_LOC))) { + regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, ®_size, LoaderPnpELayerRegistryWide()); + if (regHKR_result == VK_INCOMPLETE) { + regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, ®_size, LoaderPnpELayerRegistry()); + } + } else if (!strncmp(registry_location, VK_ILAYERS_INFO_REGISTRY_LOC, sizeof(VK_ILAYERS_INFO_REGISTRY_LOC))) { + regHKR_result = ReadManifestsFromD3DAdapters(inst, &search_path, ®_size, LoaderPnpILayerRegistryWide()); + if (regHKR_result == VK_INCOMPLETE) { + regHKR_result = loaderGetDeviceRegistryFiles(inst, &search_path, ®_size, LoaderPnpILayerRegistry()); + } + } + + // This call looks into the Khronos non-device specific section of the registry. + bool use_secondary_hive = (data_file_type == LOADER_DATA_FILE_MANIFEST_LAYER) && (!IsHighIntegrity()); + VkResult reg_result = loaderGetRegistryFiles(inst, registry_location, use_secondary_hive, &search_path, ®_size); + + if ((VK_SUCCESS != reg_result && VK_SUCCESS != regHKR_result) || NULL == search_path) { + if (data_file_type == LOADER_DATA_FILE_MANIFEST_ICD) { + loader_log( + inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ReadDataFilesInRegistry: Registry lookup failed to get ICD manifest files. Possibly missing Vulkan driver?"); + if (VK_SUCCESS == reg_result || VK_ERROR_OUT_OF_HOST_MEMORY == reg_result) { + vk_result = reg_result; + } else { + vk_result = regHKR_result; + } + } else { + if (warn_if_not_present) { + if (data_file_type == LOADER_DATA_FILE_MANIFEST_LAYER) { + // This is only a warning for layers + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "ReadDataFilesInRegistry: Registry lookup failed to get layer manifest files."); + } else { + // This is only a warning for general data files + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "ReadDataFilesInRegistry: Registry lookup failed to get data files."); + } + } + if (reg_result == VK_ERROR_OUT_OF_HOST_MEMORY) { + vk_result = reg_result; + } else { + // Return success for now since it's not critical for layers + vk_result = VK_SUCCESS; + } + } + goto out; + } + + // Now, parse the paths and add any manifest files found in them. + vk_result = AddDataFilesInPath(inst, search_path, false, out_files, false); + +out: + + if (NULL != search_path) { + loader_instance_heap_free(inst, search_path); + } + + return vk_result; +} +#endif // _WIN32 + +// Find the Vulkan library manifest files. +// +// This function scans the "location" or "env_override" directories/files +// for a list of JSON manifest files. If env_override is non-NULL +// and has a valid value. Then the location is ignored. Otherwise +// location is used to look for manifest files. The location +// is interpreted as Registry path on Windows and a directory path(s) +// on Linux. "home_location" is an additional directory in the users home +// directory to look at. It is expanded into the dir path +// $XDG_DATA_HOME/home_location or $HOME/.local/share/home_location depending +// on environment variables. This "home_location" is only used on Linux. +// +// \returns +// VKResult +// A string list of manifest files to be opened in out_files param. +// List has a pointer to string for each manifest filename. +// When done using the list in out_files, pointers should be freed. +// Location or override string lists can be either files or directories as +// follows: +// | location | override +// -------------------------------- +// Win ICD | files | files +// Win Layer | files | dirs +// Linux ICD | dirs | files +// Linux Layer| dirs | dirs +static VkResult loaderGetDataFiles(const struct loader_instance *inst, enum loader_data_files_type data_file_type, + bool warn_if_not_present, const char *env_override, const char *path_override, + char *registry_location, const char *relative_location, struct loader_data_files *out_files) { + VkResult res = VK_SUCCESS; + bool override_active = false; + + // Free and init the out_files information so there's no false data left from uninitialized variables. + if (out_files->filename_list != NULL) { + for (uint32_t i = 0; i < out_files->count; i++) { + if (NULL != out_files->filename_list[i]) { + loader_instance_heap_free(inst, out_files->filename_list[i]); + out_files->filename_list[i] = NULL; + } + } + loader_instance_heap_free(inst, out_files->filename_list); + } + out_files->count = 0; + out_files->alloc_count = 0; + out_files->filename_list = NULL; + + res = ReadDataFilesInSearchPaths(inst, data_file_type, env_override, path_override, relative_location, &override_active, + out_files); + if (VK_SUCCESS != res) { + goto out; + } + +#ifdef _WIN32 + // Read the registry if the override wasn't active. + if (!override_active) { + res = ReadDataFilesInRegistry(inst, data_file_type, warn_if_not_present, registry_location, out_files); + if (VK_SUCCESS != res) { + goto out; + } + } +#endif + +out: + + if (VK_SUCCESS != res && NULL != out_files->filename_list) { + for (uint32_t remove = 0; remove < out_files->count; remove++) { + loader_instance_heap_free(inst, out_files->filename_list[remove]); + } + loader_instance_heap_free(inst, out_files->filename_list); + out_files->count = 0; + out_files->alloc_count = 0; + out_files->filename_list = NULL; + } + + return res; +} + +void loader_init_icd_lib_list() {} + +void loader_destroy_icd_lib_list() {} + +// Try to find the Vulkan ICD driver(s). +// +// This function scans the default system loader path(s) or path +// specified by the \c VK_ICD_FILENAMES environment variable in +// order to find loadable VK ICDs manifest files. From these +// manifest files it finds the ICD libraries. +// +// \returns +// Vulkan result +// (on result == VK_SUCCESS) a list of icds that were discovered +VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list) { + char *file_str; + uint16_t file_major_vers = 0; + uint16_t file_minor_vers = 0; + uint16_t file_patch_vers = 0; + char *vers_tok; + struct loader_data_files manifest_files; + VkResult res = VK_SUCCESS; + bool lockedMutex = false; + cJSON *json = NULL; + uint32_t num_good_icds = 0; + + memset(&manifest_files, 0, sizeof(struct loader_data_files)); + + res = loader_scanned_icd_init(inst, icd_tramp_list); + if (VK_SUCCESS != res) { + goto out; + } + // Get a list of manifest files for ICDs + res = loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_ICD, true, VK_ICD_FILENAMES_ENV_VAR, NULL, + VK_DRIVERS_INFO_REGISTRY_LOC, VK_DRIVERS_INFO_RELATIVE_DIR, &manifest_files); + if (VK_SUCCESS != res || manifest_files.count == 0) { + goto out; + } + loader_platform_thread_lock_mutex(&loader_json_lock); + lockedMutex = true; + for (uint32_t i = 0; i < manifest_files.count; i++) { + file_str = manifest_files.filename_list[i]; + if (file_str == NULL) { + continue; + } + + VkResult temp_res = loader_get_json(inst, file_str, &json); + if (NULL == json || temp_res != VK_SUCCESS) { + if (NULL != json) { + cJSON_Delete(json); + json = NULL; + } + // If we haven't already found an ICD, copy this result to + // the returned result. + if (num_good_icds == 0) { + res = temp_res; + } + if (temp_res == VK_ERROR_OUT_OF_HOST_MEMORY) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + break; + } else { + continue; + } + } + res = temp_res; + + cJSON *item, *itemICD; + item = cJSON_GetObjectItem(json, "file_format_version"); + if (item == NULL) { + if (num_good_icds == 0) { + res = VK_ERROR_INITIALIZATION_FAILED; + } + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: ICD JSON %s does not have a" + " \'file_format_version\' field. Skipping ICD JSON.", + file_str); + cJSON_Delete(json); + json = NULL; + continue; + } + + char *file_vers = cJSON_Print(item); + if (NULL == file_vers) { + // Only reason the print can fail is if there was an allocation issue + if (num_good_icds == 0) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + } + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Failed retrieving ICD JSON %s" + " \'file_format_version\' field. Skipping ICD JSON", + file_str); + cJSON_Delete(json); + json = NULL; + continue; + } + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Found ICD manifest file %s, version %s", file_str, file_vers); + + // Get the major/minor/and patch as integers for easier comparison + vers_tok = strtok(file_vers, ".\"\n\r"); + if (NULL != vers_tok) { + file_major_vers = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + file_minor_vers = (uint16_t)atoi(vers_tok); + vers_tok = strtok(NULL, ".\"\n\r"); + if (NULL != vers_tok) { + file_patch_vers = (uint16_t)atoi(vers_tok); + } + } + } + + if (file_major_vers != 1 || file_minor_vers != 0 || file_patch_vers > 1) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Unexpected manifest file version " + "(expected 1.0.0 or 1.0.1), may cause errors"); + } + cJSON_Free(file_vers); + + itemICD = cJSON_GetObjectItem(json, "ICD"); + if (itemICD != NULL) { + item = cJSON_GetObjectItem(itemICD, "library_path"); + if (item != NULL) { + char *temp = cJSON_Print(item); + if (!temp || strlen(temp) == 0) { + if (num_good_icds == 0) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + } + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Failed retrieving ICD JSON %s" + " \'library_path\' field. Skipping ICD JSON.", + file_str); + cJSON_Free(temp); + cJSON_Delete(json); + json = NULL; + continue; + } + // strip out extra quotes + temp[strlen(temp) - 1] = '\0'; + char *library_path = loader_stack_alloc(strlen(temp) + 1); + if (NULL == library_path) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_icd_scan: Failed to allocate space for " + "ICD JSON %s \'library_path\' value. Skipping " + "ICD JSON.", + file_str); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + cJSON_Free(temp); + cJSON_Delete(json); + json = NULL; + goto out; + } + strcpy(library_path, &temp[1]); + cJSON_Free(temp); + if (strlen(library_path) == 0) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: ICD JSON %s \'library_path\'" + " field is empty. Skipping ICD JSON.", + file_str); + cJSON_Delete(json); + json = NULL; + continue; + } + char fullpath[MAX_STRING_SIZE]; + // Print out the paths being searched if debugging is enabled + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Searching for ICD drivers named %s", library_path); + if (loader_platform_is_path(library_path)) { + // a relative or absolute path + char *name_copy = loader_stack_alloc(strlen(file_str) + 1); + char *rel_base; + strcpy(name_copy, file_str); + rel_base = loader_platform_dirname(name_copy); + loader_expand_path(library_path, rel_base, sizeof(fullpath), fullpath); + } else { +// a filename which is assumed in a system directory +#if defined(DEFAULT_VK_DRIVERS_PATH) + loader_get_fullpath(library_path, DEFAULT_VK_DRIVERS_PATH, sizeof(fullpath), fullpath); +#else + loader_get_fullpath(library_path, "", sizeof(fullpath), fullpath); +#endif + } + + uint32_t vers = 0; + item = cJSON_GetObjectItem(itemICD, "api_version"); + if (item != NULL) { + temp = cJSON_Print(item); + if (NULL == temp) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Failed retrieving ICD JSON %s" + " \'api_version\' field. Skipping ICD JSON.", + file_str); + + // Only reason the print can fail is if there was an + // allocation issue + if (num_good_icds == 0) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + } + + cJSON_Free(temp); + cJSON_Delete(json); + json = NULL; + continue; + } + vers = loader_make_version(temp); + cJSON_Free(temp); + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: ICD JSON %s does not have an" + " \'api_version\' field.", + file_str); + } + + res = loader_scanned_icd_add(inst, icd_tramp_list, fullpath, vers); + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_icd_scan: Failed to add ICD JSON %s. " + " Skipping ICD JSON.", + fullpath); + cJSON_Delete(json); + json = NULL; + continue; + } + num_good_icds++; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Failed to find \'library_path\' " + "object in ICD JSON file %s. Skipping ICD JSON.", + file_str); + } + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_icd_scan: Can not find \'ICD\' object in ICD JSON " + "file %s. Skipping ICD JSON", + file_str); + } + + cJSON_Delete(json); + json = NULL; + } + +out: + + if (NULL != json) { + cJSON_Delete(json); + } + + if (NULL != manifest_files.filename_list) { + for (uint32_t i = 0; i < manifest_files.count; i++) { + if (NULL != manifest_files.filename_list[i]) { + loader_instance_heap_free(inst, manifest_files.filename_list[i]); + } + } + loader_instance_heap_free(inst, manifest_files.filename_list); + } + if (lockedMutex) { + loader_platform_thread_unlock_mutex(&loader_json_lock); + } + + return res; +} + +void loaderScanForLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) { + char *file_str; + struct loader_data_files manifest_files; + cJSON *json; + bool override_layer_valid = false; + char *override_paths = NULL; + uint32_t total_count = 0; + + memset(&manifest_files, 0, sizeof(struct loader_data_files)); + + // Cleanup any previously scanned libraries + loaderDeleteLayerListAndProperties(inst, instance_layers); + + loader_platform_thread_lock_mutex(&loader_json_lock); + + // Get a list of manifest files for any implicit layers + // Pass NULL for environment variable override - implicit layers are not overridden by LAYERS_PATH_ENV + if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, false, NULL, NULL, VK_ILAYERS_INFO_REGISTRY_LOC, + VK_ILAYERS_INFO_RELATIVE_DIR, &manifest_files)) { + goto out; + } + + if (manifest_files.count != 0) { + total_count += manifest_files.count; + for (uint32_t i = 0; i < manifest_files.count; i++) { + file_str = manifest_files.filename_list[i]; + if (file_str == NULL) { + continue; + } + + // Parse file into JSON struct + VkResult res = loader_get_json(inst, file_str, &json); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } else if (VK_SUCCESS != res || NULL == json) { + continue; + } + + VkResult local_res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str); + cJSON_Delete(json); + + // If the error is anything other than out of memory we still want to try to load the other layers + if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { + goto out; + } + } + } + + // Remove any extraneous override layers. + RemoveAllNonValidOverrideLayers(inst, instance_layers); + + // Check to see if the override layer is present, and use it's override paths. + for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) { + struct loader_layer_properties *prop = &instance_layers->list[i]; + if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop) && prop->num_override_paths > 0) { + char *cur_write_ptr = NULL; + size_t override_path_size = 0; + for (uint32_t j = 0; j < prop->num_override_paths; j++) { + override_path_size += DetermineDataFilePathSize(prop->override_paths[j], 0); + } + override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (override_paths == NULL) { + goto out; + } + cur_write_ptr = &override_paths[0]; + for (uint32_t j = 0; j < prop->num_override_paths; j++) { + CopyDataFilePath(prop->override_paths[j], NULL, 0, &cur_write_ptr); + } + // Remove the last path separator + --cur_write_ptr; + assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size); + *cur_write_ptr = '\0'; + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loaderScanForLayers: Override layer has override paths set to %s", + override_paths); + } + } + + // Get a list of manifest files for explicit layers + if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, true, VK_LAYER_PATH_ENV_VAR, override_paths, + VK_ELAYERS_INFO_REGISTRY_LOC, VK_ELAYERS_INFO_RELATIVE_DIR, &manifest_files)) { + goto out; + } + + // Make sure we have at least one layer, if not, go ahead and return + if (manifest_files.count == 0 && total_count == 0) { + goto out; + } else { + total_count += manifest_files.count; + for (uint32_t i = 0; i < manifest_files.count; i++) { + file_str = manifest_files.filename_list[i]; + if (file_str == NULL) { + continue; + } + + // Parse file into JSON struct + VkResult res = loader_get_json(inst, file_str, &json); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } else if (VK_SUCCESS != res || NULL == json) { + continue; + } + + VkResult local_res = loaderAddLayerProperties(inst, instance_layers, json, false, file_str); + cJSON_Delete(json); + + // If the error is anything other than out of memory we still want to try to load the other layers + if (VK_ERROR_OUT_OF_HOST_MEMORY == local_res) { + goto out; + } + } + } + + // Verify any meta-layers in the list are valid and all the component layers are + // actually present in the available layer list + VerifyAllMetaLayers(inst, instance_layers, &override_layer_valid); + + if (override_layer_valid) { + loaderRemoveLayersInBlacklist(inst, instance_layers); + if (NULL != inst) { + inst->override_layer_present = true; + } + } + +out: + + if (NULL != override_paths) { + loader_instance_heap_free(inst, override_paths); + } + if (NULL != manifest_files.filename_list) { + for (uint32_t i = 0; i < manifest_files.count; i++) { + if (NULL != manifest_files.filename_list[i]) { + loader_instance_heap_free(inst, manifest_files.filename_list[i]); + } + } + loader_instance_heap_free(inst, manifest_files.filename_list); + } + loader_platform_thread_unlock_mutex(&loader_json_lock); +} + +void loaderScanForImplicitLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers) { + char *file_str; + struct loader_data_files manifest_files; + cJSON *json; + bool override_layer_valid = false; + char *override_paths = NULL; + bool implicit_metalayer_present = false; + bool have_json_lock = false; + + // Before we begin anything, init manifest_files to avoid a delete of garbage memory if + // a failure occurs before allocating the manifest filename_list. + memset(&manifest_files, 0, sizeof(struct loader_data_files)); + + // Pass NULL for environment variable override - implicit layers are not overridden by LAYERS_PATH_ENV + VkResult res = loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, false, NULL, NULL, VK_ILAYERS_INFO_REGISTRY_LOC, + VK_ILAYERS_INFO_RELATIVE_DIR, &manifest_files); + if (VK_SUCCESS != res || manifest_files.count == 0) { + goto out; + } + + // Cleanup any previously scanned libraries + loaderDeleteLayerListAndProperties(inst, instance_layers); + + loader_platform_thread_lock_mutex(&loader_json_lock); + have_json_lock = true; + + for (uint32_t i = 0; i < manifest_files.count; i++) { + file_str = manifest_files.filename_list[i]; + if (file_str == NULL) { + continue; + } + + // parse file into JSON struct + res = loader_get_json(inst, file_str, &json); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } else if (VK_SUCCESS != res || NULL == json) { + continue; + } + + res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str); + + loader_instance_heap_free(inst, file_str); + manifest_files.filename_list[i] = NULL; + cJSON_Delete(json); + + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } + } + + // Remove any extraneous override layers. + RemoveAllNonValidOverrideLayers(inst, instance_layers); + + // Check to see if either the override layer is present, or another implicit meta-layer. + // Each of these may require explicit layers to be enabled at this time. + for (int32_t i = 0; i < (int32_t)instance_layers->count; i++) { + struct loader_layer_properties *prop = &instance_layers->list[i]; + if (prop->is_override && loaderImplicitLayerIsEnabled(inst, prop)) { + override_layer_valid = true; + if (prop->num_override_paths > 0) { + char *cur_write_ptr = NULL; + size_t override_path_size = 0; + for (uint32_t j = 0; j < prop->num_override_paths; j++) { + override_path_size += DetermineDataFilePathSize(prop->override_paths[j], 0); + } + override_paths = loader_instance_heap_alloc(inst, override_path_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (override_paths == NULL) { + goto out; + } + cur_write_ptr = &override_paths[0]; + for (uint32_t j = 0; j < prop->num_override_paths; j++) { + CopyDataFilePath(prop->override_paths[j], NULL, 0, &cur_write_ptr); + } + // Remove the last path separator + --cur_write_ptr; + assert(cur_write_ptr - override_paths < (ptrdiff_t)override_path_size); + *cur_write_ptr = '\0'; + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loaderScanForImplicitLayers: Override layer has override paths set to %s", override_paths); + } + } else if (!prop->is_override && prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER) { + implicit_metalayer_present = true; + } + } + + // If either the override layer or an implicit meta-layer are present, we need to add + // explicit layer info as well. Not to worry, though, all explicit layers not included + // in the override layer will be removed below in loaderRemoveLayersInBlacklist(). + if (override_layer_valid || implicit_metalayer_present) { + if (VK_SUCCESS != loaderGetDataFiles(inst, LOADER_DATA_FILE_MANIFEST_LAYER, true, VK_LAYER_PATH_ENV_VAR, override_paths, + VK_ELAYERS_INFO_REGISTRY_LOC, VK_ELAYERS_INFO_RELATIVE_DIR, &manifest_files)) { + goto out; + } + + for (uint32_t i = 0; i < manifest_files.count; i++) { + file_str = manifest_files.filename_list[i]; + if (file_str == NULL) { + continue; + } + + // parse file into JSON struct + res = loader_get_json(inst, file_str, &json); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } else if (VK_SUCCESS != res || NULL == json) { + continue; + } + + res = loaderAddLayerProperties(inst, instance_layers, json, true, file_str); + + loader_instance_heap_free(inst, file_str); + manifest_files.filename_list[i] = NULL; + cJSON_Delete(json); + + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + goto out; + } + } + } + + // Verify any meta-layers in the list are valid and all the component layers are + // actually present in the available layer list + VerifyAllMetaLayers(inst, instance_layers, &override_layer_valid); + + if (override_layer_valid || implicit_metalayer_present) { + loaderRemoveLayersNotInImplicitMetaLayers(inst, instance_layers); + if (override_layer_valid && inst != NULL) { + inst->override_layer_present = true; + } + } + +out: + + if (NULL != override_paths) { + loader_instance_heap_free(inst, override_paths); + } + for (uint32_t i = 0; i < manifest_files.count; i++) { + if (NULL != manifest_files.filename_list[i]) { + loader_instance_heap_free(inst, manifest_files.filename_list[i]); + } + } + if (NULL != manifest_files.filename_list) { + loader_instance_heap_free(inst, manifest_files.filename_list); + } + + if (have_json_lock) { + loader_platform_thread_unlock_mutex(&loader_json_lock); + } +} + +static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_internal(VkInstance inst, const char *pName) { + // inst is not wrapped + if (inst == VK_NULL_HANDLE) { + return NULL; + } + VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; + void *addr; + + if (disp_table == NULL) return NULL; + + bool found_name; + addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); + if (found_name) { + return addr; + } + + if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, true, NULL, &addr)) return addr; + + // Don't call down the chain, this would be an infinite loop + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpdpa_instance_internal() unrecognized name %s", pName); + return NULL; +} + +static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator(VkInstance inst, const char *pName) { + // inst is not wrapped + if (inst == VK_NULL_HANDLE) { + return NULL; + } + VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; + void *addr; + + if (disp_table == NULL) return NULL; + + bool found_name; + addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); + if (found_name) { + return addr; + } + + // Get the terminator, but don't perform checking since it should already + // have been setup if we get here. + if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, false, NULL, &addr)) { + return addr; + } + + // Don't call down the chain, this would be an infinite loop + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName); + return NULL; +} + +static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_instance_internal(VkInstance inst, const char *pName) { + if (!strcmp(pName, "vkGetInstanceProcAddr")) { + return (PFN_vkVoidFunction)loader_gpa_instance_internal; + } + if (!strcmp(pName, "vk_layerGetPhysicalDeviceProcAddr")) { + return (PFN_vkVoidFunction)loader_gpdpa_instance_terminator; + } + if (!strcmp(pName, "vkCreateInstance")) { + return (PFN_vkVoidFunction)terminator_CreateInstance; + } + if (!strcmp(pName, "vkCreateDevice")) { + return (PFN_vkVoidFunction)terminator_CreateDevice; + } + + // The VK_EXT_debug_utils functions need a special case here so the terminators can still be found from vkGetInstanceProcAddr + if (!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) { + return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT; + } + if (!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) { + return (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT; + } + if (!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT; + } + if (!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT; + } + if (!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT; + } + if (!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT; + } + if (!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT; + } + if (!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) { + return (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT; + } + + // inst is not wrapped + if (inst == VK_NULL_HANDLE) { + return NULL; + } + VkLayerInstanceDispatchTable *disp_table = *(VkLayerInstanceDispatchTable **)inst; + void *addr; + + if (disp_table == NULL) return NULL; + + bool found_name; + addr = loader_lookup_instance_dispatch_table(disp_table, pName, &found_name); + if (found_name) { + return addr; + } + + // Don't call down the chain, this would be an infinite loop + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "loader_gpa_instance_internal() unrecognized name %s", pName); + return NULL; +} + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpa_device_internal(VkDevice device, const char *pName) { + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL); + + // Return this function if a layer above here is asking for the vkGetDeviceProcAddr. + // This is so we can properly intercept any device commands needing a terminator. + if (!strcmp(pName, "vkGetDeviceProcAddr")) { + return (PFN_vkVoidFunction)loader_gpa_device_internal; + } + + // NOTE: Device Funcs needing Trampoline/Terminator. + // Overrides for device functions needing a trampoline and + // a terminator because certain device entry-points still need to go + // through a terminator before hitting the ICD. This could be for + // several reasons, but the main one is currently unwrapping an + // object before passing the appropriate info along to the ICD. + // This is why we also have to override the direct ICD call to + // vkGetDeviceProcAddr to intercept those calls. + PFN_vkVoidFunction addr = get_extension_device_proc_terminator(dev, pName); + if (NULL != addr) { + return addr; + } + + return icd_term->dispatch.GetDeviceProcAddr(device, pName); +} + +// Initialize device_ext dispatch table entry as follows: +// If dev == NULL find all logical devices created within this instance and +// init the entry (given by idx) in the ext dispatch table. +// If dev != NULL only initialize the entry in the given dev's dispatch table. +// The initialization value is gotten by calling down the device chain with +// GDPA. +// If GDPA returns NULL then don't initialize the dispatch table entry. +static void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, struct loader_device *dev, uint32_t idx, + const char *funcName) + +{ + void *gdpa_value; + if (dev != NULL) { + gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr(dev->chain_device, funcName); + if (gdpa_value != NULL) dev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value; + } else { + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next) { + struct loader_device *ldev = icd_term->logical_device_list; + while (ldev) { + gdpa_value = ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr(ldev->chain_device, funcName); + if (gdpa_value != NULL) ldev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value; + ldev = ldev->next; + } + } + } +} + +// Find all dev extension in the hash table and initialize the dispatch table +// for dev for each of those extension entrypoints found in hash table. +void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev) { + for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) { + if (inst->dev_ext_disp_hash[i].func_name != NULL) + loader_init_dispatch_dev_ext_entry(inst, dev, i, inst->dev_ext_disp_hash[i].func_name); + } +} + +static bool loader_check_icds_for_dev_ext_address(struct loader_instance *inst, const char *funcName) { + struct loader_icd_term *icd_term; + icd_term = inst->icd_terms; + while (NULL != icd_term) { + if (icd_term->scanned_icd->GetInstanceProcAddr(icd_term->instance, funcName)) + // this icd supports funcName + return true; + icd_term = icd_term->next; + } + + return false; +} + +static bool loader_check_layer_list_for_dev_ext_address(const struct loader_layer_list *const layers, const char *funcName) { + // Iterate over the layers. + for (uint32_t layer = 0; layer < layers->count; ++layer) { + // Iterate over the extensions. + const struct loader_device_extension_list *const extensions = &(layers->list[layer].device_extension_list); + for (uint32_t extension = 0; extension < extensions->count; ++extension) { + // Iterate over the entry points. + const struct loader_dev_ext_props *const property = &(extensions->list[extension]); + for (uint32_t entry = 0; entry < property->entrypoint_count; ++entry) { + if (strcmp(property->entrypoints[entry], funcName) == 0) { + return true; + } + } + } + } + + return false; +} + +static void loader_free_dev_ext_table(struct loader_instance *inst) { + for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) { + loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].func_name); + loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].list.index); + } + memset(inst->dev_ext_disp_hash, 0, sizeof(inst->dev_ext_disp_hash)); +} + +static bool loader_add_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) { + uint32_t i; + uint32_t idx = *ptr_idx; + struct loader_dispatch_hash_list *list = &inst->dev_ext_disp_hash[idx].list; + + if (!inst->dev_ext_disp_hash[idx].func_name) { + // no entry here at this idx, so use it + assert(list->capacity == 0); + inst->dev_ext_disp_hash[idx].func_name = + (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (inst->dev_ext_disp_hash[idx].func_name == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table: Failed to allocate memory " + "for func_name %s", + funcName); + return false; + } + strncpy(inst->dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1); + return true; + } + + // check for enough capacity + if (list->capacity == 0) { + list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (list->index == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table: Failed to allocate memory for list index of function %s", funcName); + return false; + } + list->capacity = 8 * sizeof(*(list->index)); + } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { + void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table: Failed to reallocate memory for list index of function %s", funcName); + return false; + } + list->index = new_ptr; + list->capacity *= 2; + } + + // find an unused index in the hash table and use it + i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS; + do { + if (!inst->dev_ext_disp_hash[i].func_name) { + assert(inst->dev_ext_disp_hash[i].list.capacity == 0); + inst->dev_ext_disp_hash[i].func_name = + (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (inst->dev_ext_disp_hash[i].func_name == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table: Failed to allocate memory " + "for func_name %s", + funcName); + return false; + } + strncpy(inst->dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1); + list->index[list->count] = i; + list->count++; + *ptr_idx = i; + return true; + } + i = (i + 1) % MAX_NUM_UNKNOWN_EXTS; + } while (i != idx); + + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table: Could not insert into hash table; is " + "it full?"); + + return false; +} + +static bool loader_name_in_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) { + uint32_t alt_idx; + if (inst->dev_ext_disp_hash[*idx].func_name && !strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) return true; + + // funcName wasn't at the primary spot in the hash table + // search the list of secondary locations (shallow search, not deep search) + for (uint32_t i = 0; i < inst->dev_ext_disp_hash[*idx].list.count; i++) { + alt_idx = inst->dev_ext_disp_hash[*idx].list.index[i]; + if (!strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) { + *idx = alt_idx; + return true; + } + } + + return false; +} + +// This function returns generic trampoline code address for unknown entry +// points. +// Presumably, these unknown entry points (as given by funcName) are device +// extension entrypoints. A hash table is used to keep a list of unknown entry +// points and their mapping to the device extension dispatch table +// (struct loader_dev_ext_dispatch_table). +// \returns +// For a given entry point string (funcName), if an existing mapping is found +// the +// trampoline address for that mapping is returned. Otherwise, this unknown +// entry point +// has not been seen yet. Next check if a layer or ICD supports it. If so then +// a +// new entry in the hash table is initialized and that trampoline address for +// the new entry is returned. Null is returned if the hash table is full or +// if no discovered layer or ICD returns a non-NULL GetProcAddr for it. +void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) { + uint32_t idx; + uint32_t seed = 0; + + idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS; + + if (loader_name_in_dev_ext_table(inst, &idx, funcName)) + // found funcName already in hash + return loader_get_dev_ext_trampoline(idx); + + // Check if funcName is supported in either ICDs or a layer library + if (!loader_check_icds_for_dev_ext_address(inst, funcName) && + !loader_check_layer_list_for_dev_ext_address(&inst->app_activated_layer_list, funcName)) { + // if support found in layers continue on + return NULL; + } + + if (loader_add_dev_ext_table(inst, &idx, funcName)) { + // successfully added new table entry + // init any dev dispatch table entries as needed + loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName); + return loader_get_dev_ext_trampoline(idx); + } + + return NULL; +} + +static bool loader_check_icds_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) { + struct loader_icd_term *icd_term; + icd_term = inst->icd_terms; + while (NULL != icd_term) { + if (icd_term->scanned_icd->interface_version >= MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION && + icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName)) + // this icd supports funcName + return true; + icd_term = icd_term->next; + } + + return false; +} + +static bool loader_check_layer_list_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) { + struct loader_layer_properties *layer_prop_list = inst->expanded_activated_layer_list.list; + for (uint32_t layer = 0; layer < inst->expanded_activated_layer_list.count; ++layer) { + // If this layer supports the vk_layerGetPhysicalDeviceProcAddr, then call + // it and see if it returns a valid pointer for this function name. + if (layer_prop_list[layer].interface_version > 1) { + const struct loader_layer_functions *const functions = &(layer_prop_list[layer].functions); + if (NULL != functions->get_physical_device_proc_addr && + NULL != functions->get_physical_device_proc_addr((VkInstance)inst->instance, funcName)) { + return true; + } + } + } + + return false; +} + +static void loader_free_phys_dev_ext_table(struct loader_instance *inst) { + for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) { + loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].func_name); + loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].list.index); + } + memset(inst->phys_dev_ext_disp_hash, 0, sizeof(inst->phys_dev_ext_disp_hash)); +} + +static bool loader_add_phys_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) { + uint32_t i; + uint32_t idx = *ptr_idx; + struct loader_dispatch_hash_list *list = &inst->phys_dev_ext_disp_hash[idx].list; + + if (!inst->phys_dev_ext_disp_hash[idx].func_name) { + // no entry here at this idx, so use it + assert(list->capacity == 0); + inst->phys_dev_ext_disp_hash[idx].func_name = + (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (inst->phys_dev_ext_disp_hash[idx].func_name == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_phys_dev_ext_table() can't allocate memory for " + "func_name"); + return false; + } + strncpy(inst->phys_dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1); + return true; + } + + // check for enough capacity + if (list->capacity == 0) { + list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (list->index == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_add_phys_dev_ext_table() can't allocate list memory"); + return false; + } + list->capacity = 8 * sizeof(*(list->index)); + } else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) { + void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_ptr) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "loader_add_phys_dev_ext_table() can't reallocate list memory"); + return false; + } + list->index = new_ptr; + list->capacity *= 2; + } + + // find an unused index in the hash table and use it + i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS; + do { + if (!inst->phys_dev_ext_disp_hash[i].func_name) { + assert(inst->phys_dev_ext_disp_hash[i].list.capacity == 0); + inst->phys_dev_ext_disp_hash[i].func_name = + (char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (inst->phys_dev_ext_disp_hash[i].func_name == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_dev_ext_table() can't reallocate " + "func_name memory"); + return false; + } + strncpy(inst->phys_dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1); + list->index[list->count] = i; + list->count++; + *ptr_idx = i; + return true; + } + i = (i + 1) % MAX_NUM_UNKNOWN_EXTS; + } while (i != idx); + + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_add_phys_dev_ext_table() couldn't insert into hash table; is " + "it full?"); + return false; +} + +static bool loader_name_in_phys_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) { + uint32_t alt_idx; + if (inst->phys_dev_ext_disp_hash[*idx].func_name && !strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName)) + return true; + + // funcName wasn't at the primary spot in the hash table + // search the list of secondary locations (shallow search, not deep search) + for (uint32_t i = 0; i < inst->phys_dev_ext_disp_hash[*idx].list.count; i++) { + alt_idx = inst->phys_dev_ext_disp_hash[*idx].list.index[i]; + if (!strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName)) { + *idx = alt_idx; + return true; + } + } + + return false; +} + +// This function returns a generic trampoline and/or terminator function +// address for any unknown physical device extension commands. A hash +// table is used to keep a list of unknown entry points and their +// mapping to the physical device extension dispatch table (struct +// loader_phys_dev_ext_dispatch_table). +// For a given entry point string (funcName), if an existing mapping is +// found, then the trampoline address for that mapping is returned in +// tramp_addr (if it is not NULL) and the terminator address for that +// mapping is returned in term_addr (if it is not NULL). Otherwise, +// this unknown entry point has not been seen yet. +// If it has not been seen before, and perform_checking is 'true', +// check if a layer or and ICD supports it. If so then a new entry in +// the hash table is initialized and the trampoline and/or terminator +// addresses are returned. +// Null is returned if the hash table is full or if no discovered layer or +// ICD returns a non-NULL GetProcAddr for it. +bool loader_phys_dev_ext_gpa(struct loader_instance *inst, const char *funcName, bool perform_checking, void **tramp_addr, + void **term_addr) { + uint32_t idx; + uint32_t seed = 0; + bool success = false; + + if (inst == NULL) { + goto out; + } + + if (NULL != tramp_addr) { + *tramp_addr = NULL; + } + if (NULL != term_addr) { + *term_addr = NULL; + } + + // We should always check to see if any ICD supports it. + if (!loader_check_icds_for_phys_dev_ext_address(inst, funcName)) { + // If we're not checking layers, or we are and it's not in a layer, just + // return + if (!perform_checking || !loader_check_layer_list_for_phys_dev_ext_address(inst, funcName)) { + goto out; + } + } + + idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS; + if (perform_checking && !loader_name_in_phys_dev_ext_table(inst, &idx, funcName)) { + uint32_t i; + bool added = false; + + // Only need to add first one to get index in Instance. Others will use + // the same index. + if (!added && loader_add_phys_dev_ext_table(inst, &idx, funcName)) { + added = true; + } + + // Setup the ICD function pointers + struct loader_icd_term *icd_term = inst->icd_terms; + while (NULL != icd_term) { + if (MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION <= icd_term->scanned_icd->interface_version && + NULL != icd_term->scanned_icd->GetPhysicalDeviceProcAddr) { + icd_term->phys_dev_ext[idx] = + (PFN_PhysDevExt)icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName); + + // Make sure we set the instance dispatch to point to the + // loader's terminator now since we can at least handle it + // in one ICD. + inst->disp->phys_dev_ext[idx] = loader_get_phys_dev_ext_termin(idx); + } else { + icd_term->phys_dev_ext[idx] = NULL; + } + + icd_term = icd_term->next; + } + + // Now, search for the first layer attached and query using it to get + // the first entry point. + for (i = 0; i < inst->expanded_activated_layer_list.count; i++) { + struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i]; + if (layer_prop->interface_version > 1 && NULL != layer_prop->functions.get_physical_device_proc_addr) { + inst->disp->phys_dev_ext[idx] = + (PFN_PhysDevExt)layer_prop->functions.get_physical_device_proc_addr((VkInstance)inst->instance, funcName); + if (NULL != inst->disp->phys_dev_ext[idx]) { + break; + } + } + } + } + + if (NULL != tramp_addr) { + *tramp_addr = loader_get_phys_dev_ext_tramp(idx); + } + + if (NULL != term_addr) { + *term_addr = loader_get_phys_dev_ext_termin(idx); + } + + success = true; + +out: + return success; +} + +struct loader_instance *loader_get_instance(const VkInstance instance) { + // look up the loader_instance in our list by comparing dispatch tables, as + // there is no guarantee the instance is still a loader_instance* after any + // layers which wrap the instance object. + const VkLayerInstanceDispatchTable *disp; + struct loader_instance *ptr_instance = NULL; + disp = loader_get_instance_layer_dispatch(instance); + for (struct loader_instance *inst = loader.instances; inst; inst = inst->next) { + if (&inst->disp->layer_inst_disp == disp) { + ptr_instance = inst; + break; + } + } + return ptr_instance; +} + +static loader_platform_dl_handle loaderOpenLayerFile(const struct loader_instance *inst, const char *chain_type, + struct loader_layer_properties *prop) { + if ((prop->lib_handle = loader_platform_open_library(prop->lib_name)) == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, loader_platform_open_library_error(prop->lib_name)); + } else { + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Loading layer library %s", prop->lib_name); + } + + return prop->lib_handle; +} + +static void loaderCloseLayerFile(const struct loader_instance *inst, struct loader_layer_properties *prop) { + if (prop->lib_handle) { + loader_platform_close_library(prop->lib_handle); + loader_log(inst, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Unloading layer library %s", prop->lib_name); + prop->lib_handle = NULL; + } +} + +void loaderDeactivateLayers(const struct loader_instance *instance, struct loader_device *device, struct loader_layer_list *list) { + // Delete instance list of enabled layers and close any layer libraries + for (uint32_t i = 0; i < list->count; i++) { + struct loader_layer_properties *layer_prop = &list->list[i]; + + loaderCloseLayerFile(instance, layer_prop); + } + loaderDestroyLayerList(instance, device, list); +} + +// Go through the search_list and find any layers which match type. If layer +// type match is found in then add it to ext_list. +static void loaderAddImplicitLayers(const struct loader_instance *inst, struct loader_layer_list *target_list, + struct loader_layer_list *expanded_target_list, const struct loader_layer_list *source_list) { + for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) { + const struct loader_layer_properties *prop = &source_list->list[src_layer]; + if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) { + loaderAddImplicitLayer(inst, prop, target_list, expanded_target_list, source_list); + } + } +} + +// Get the layer name(s) from the env_name environment variable. If layer is found in +// search_list then add it to layer_list. But only add it to layer_list if type_flags matches. +static VkResult loaderAddEnvironmentLayers(struct loader_instance *inst, const enum layer_type_flags type_flags, + const char *env_name, struct loader_layer_list *target_list, + struct loader_layer_list *expanded_target_list, + const struct loader_layer_list *source_list) { + VkResult res = VK_SUCCESS; + char *next, *name; + char *layer_env = loader_getenv(env_name, inst); + if (layer_env == NULL) { + goto out; + } + name = loader_stack_alloc(strlen(layer_env) + 1); + if (name == NULL) { + goto out; + } + strcpy(name, layer_env); + + while (name && *name) { + next = loader_get_next_path(name); + res = loaderAddLayerNameToList(inst, name, type_flags, source_list, target_list, expanded_target_list); + if (res != VK_SUCCESS) { + goto out; + } + name = next; + } + +out: + + if (layer_env != NULL) { + loader_free_getenv(layer_env, inst); + } + + return res; +} + +VkResult loaderEnableInstanceLayers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, + const struct loader_layer_list *instance_layers) { + VkResult err = VK_SUCCESS; + uint16_t layer_api_major_version; + uint16_t layer_api_minor_version; + uint32_t i; + struct loader_layer_properties *prop; + + assert(inst && "Cannot have null instance"); + + if (!loaderInitLayerList(inst, &inst->app_activated_layer_list)) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderEnableInstanceLayers: Failed to initialize application version of the layer list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + if (!loaderInitLayerList(inst, &inst->expanded_activated_layer_list)) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderEnableInstanceLayers: Failed to initialize expanded version of the layer list"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + // Add any implicit layers first + loaderAddImplicitLayers(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, instance_layers); + + // Add any layers specified via environment variable next + err = loaderAddEnvironmentLayers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, "VK_INSTANCE_LAYERS", &inst->app_activated_layer_list, + &inst->expanded_activated_layer_list, instance_layers); + if (err != VK_SUCCESS) { + goto out; + } + + // Add layers specified by the application + err = loaderAddLayerNamesToList(inst, &inst->app_activated_layer_list, &inst->expanded_activated_layer_list, + pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, instance_layers); + + for (i = 0; i < inst->expanded_activated_layer_list.count; i++) { + // Verify that the layer api version is at least that of the application's request, if not, throw a warning since + // undefined behavior could occur. + prop = inst->expanded_activated_layer_list.list + i; + layer_api_major_version = VK_VERSION_MAJOR(prop->info.specVersion); + layer_api_minor_version = VK_VERSION_MINOR(prop->info.specVersion); + if (inst->app_api_major_version > layer_api_major_version || + (inst->app_api_major_version == layer_api_major_version && inst->app_api_minor_version > layer_api_minor_version)) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "loader_add_to_layer_list: Explicit layer %s is using an old API version %" PRIu16 ".%" PRIu16 + " versus application requested %" PRIu16 ".%" PRIu16, + prop->info.layerName, layer_api_major_version, layer_api_minor_version, inst->app_api_major_version, + inst->app_api_minor_version); + } + } + +out: + return err; +} + +// Determine the layer interface version to use. +bool loaderGetLayerInterfaceVersion(PFN_vkNegotiateLoaderLayerInterfaceVersion fp_negotiate_layer_version, + VkNegotiateLayerInterface *interface_struct) { + memset(interface_struct, 0, sizeof(VkNegotiateLayerInterface)); + interface_struct->sType = LAYER_NEGOTIATE_INTERFACE_STRUCT; + interface_struct->loaderLayerInterfaceVersion = 1; + interface_struct->pNext = NULL; + + if (fp_negotiate_layer_version != NULL) { + // Layer supports the negotiation API, so call it with the loader's + // latest version supported + interface_struct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; + VkResult result = fp_negotiate_layer_version(interface_struct); + + if (result != VK_SUCCESS) { + // Layer no longer supports the loader's latest interface version so + // fail loading the Layer + return false; + } + } + + if (interface_struct->loaderLayerInterfaceVersion < MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION) { + // Loader no longer supports the layer's latest interface version so + // fail loading the layer + return false; + } + + return true; +} + +VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, + PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA) { + VkResult res; + VkPhysicalDevice internal_device = VK_NULL_HANDLE; + struct loader_device *dev = NULL; + struct loader_instance *inst = NULL; + + if (instance != NULL) { + inst = loader_get_instance(instance); + internal_device = physicalDevice; + } else { + struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice; + internal_device = phys_dev->phys_dev; + inst = (struct loader_instance *)phys_dev->this_instance; + } + + // Get the physical device (ICD) extensions + struct loader_extension_list icd_exts; + icd_exts.list = NULL; + res = loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to create ICD extension list"); + goto out; + } + + PFN_vkEnumerateDeviceExtensionProperties enumDeviceExtensionProperties = NULL; + if (layerGIPA != NULL) { + enumDeviceExtensionProperties = + (PFN_vkEnumerateDeviceExtensionProperties)layerGIPA(instance, "vkEnumerateDeviceExtensionProperties"); + } else { + enumDeviceExtensionProperties = inst->disp->layer_inst_disp.EnumerateDeviceExtensionProperties; + } + res = loader_add_device_extensions(inst, enumDeviceExtensionProperties, internal_device, "Unknown", &icd_exts); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to add extensions to list"); + goto out; + } + + // Make sure requested extensions to be enabled are supported + res = loader_validate_device_extensions(inst, &inst->expanded_activated_layer_list, &icd_exts, pCreateInfo); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to validate extensions in list"); + goto out; + } + + dev = loader_create_logical_device(inst, pAllocator); + if (dev == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Copy the application enabled instance layer list into the device + if (NULL != inst->app_activated_layer_list.list) { + dev->app_activated_layer_list.capacity = inst->app_activated_layer_list.capacity; + dev->app_activated_layer_list.count = inst->app_activated_layer_list.count; + dev->app_activated_layer_list.list = + loader_device_heap_alloc(dev, inst->app_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + if (dev->app_activated_layer_list.list == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkCreateDevice: Failed to allocate application activated layer list of size %d.", + inst->app_activated_layer_list.capacity); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(dev->app_activated_layer_list.list, inst->app_activated_layer_list.list, + sizeof(*dev->app_activated_layer_list.list) * dev->app_activated_layer_list.count); + } else { + dev->app_activated_layer_list.capacity = 0; + dev->app_activated_layer_list.count = 0; + dev->app_activated_layer_list.list = NULL; + } + + // Copy the expanded enabled instance layer list into the device + if (NULL != inst->expanded_activated_layer_list.list) { + dev->expanded_activated_layer_list.capacity = inst->expanded_activated_layer_list.capacity; + dev->expanded_activated_layer_list.count = inst->expanded_activated_layer_list.count; + dev->expanded_activated_layer_list.list = + loader_device_heap_alloc(dev, inst->expanded_activated_layer_list.capacity, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + if (dev->expanded_activated_layer_list.list == NULL) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkCreateDevice: Failed to allocate expanded activated layer list of size %d.", + inst->expanded_activated_layer_list.capacity); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(dev->expanded_activated_layer_list.list, inst->expanded_activated_layer_list.list, + sizeof(*dev->expanded_activated_layer_list.list) * dev->expanded_activated_layer_list.count); + } else { + dev->expanded_activated_layer_list.capacity = 0; + dev->expanded_activated_layer_list.count = 0; + dev->expanded_activated_layer_list.list = NULL; + } + + res = loader_create_device_chain(internal_device, pCreateInfo, pAllocator, inst, dev, layerGIPA, nextGDPA); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "vkCreateDevice: Failed to create device chain."); + goto out; + } + + *pDevice = dev->chain_device; + + // Initialize any device extension dispatch entry's from the instance list + loader_init_dispatch_dev_ext(inst, dev); + + // Initialize WSI device extensions as part of core dispatch since loader + // has dedicated trampoline code for these + loader_init_device_extension_dispatch_table(&dev->loader_dispatch, inst->disp->layer_inst_disp.GetInstanceProcAddr, + dev->loader_dispatch.core_dispatch.GetDeviceProcAddr, inst->instance, *pDevice); + +out: + + // Failure cleanup + if (VK_SUCCESS != res) { + if (NULL != dev) { + loader_destroy_logical_device(inst, dev, pAllocator); + } + } + + if (NULL != icd_exts.list) { + loader_destroy_generic_list(inst, (struct loader_generic_list *)&icd_exts); + } + return res; +} + +VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator, + PFN_vkDestroyDevice destroyFunction) { + struct loader_device *dev; + + if (device == VK_NULL_HANDLE) { + return; + } + + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, NULL); + const struct loader_instance *inst = icd_term->this_instance; + + destroyFunction(device, pAllocator); + dev->chain_device = NULL; + dev->icd_device = NULL; + loader_remove_logical_device(inst, icd_term, dev, pAllocator); +} + +// Given the list of layers to activate in the loader_instance +// structure. This function will add a VkLayerInstanceCreateInfo +// structure to the VkInstanceCreateInfo.pNext pointer. +// Each activated layer will have it's own VkLayerInstanceLink +// structure that tells the layer what Get*ProcAddr to call to +// get function pointers to the next layer down. +// Once the chain info has been created this function will +// execute the CreateInstance call chain. Each layer will +// then have an opportunity in it's CreateInstance function +// to setup it's dispatch table when the lower layer returns +// successfully. +// Each layer can wrap or not-wrap the returned VkInstance object +// as it sees fit. +// The instance chain is terminated by a loader function +// that will call CreateInstance on all available ICD's and +// cache those VkInstance objects for future use. +VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, + struct loader_instance *inst, VkInstance *created_instance) { + uint32_t activated_layers = 0; + VkLayerInstanceCreateInfo chain_info; + VkLayerInstanceLink *layer_instance_link_info = NULL; + VkInstanceCreateInfo loader_create_info; + VkResult res; + + PFN_vkGetInstanceProcAddr next_gipa = loader_gpa_instance_internal; + PFN_vkGetInstanceProcAddr cur_gipa = loader_gpa_instance_internal; + PFN_vkGetDeviceProcAddr cur_gdpa = loader_gpa_device_internal; + PFN_GetPhysicalDeviceProcAddr next_gpdpa = loader_gpdpa_instance_internal; + PFN_GetPhysicalDeviceProcAddr cur_gpdpa = loader_gpdpa_instance_internal; + + memcpy(&loader_create_info, pCreateInfo, sizeof(VkInstanceCreateInfo)); + + if (inst->expanded_activated_layer_list.count > 0) { + chain_info.u.pLayerInfo = NULL; + chain_info.pNext = pCreateInfo->pNext; + chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; + chain_info.function = VK_LAYER_LINK_INFO; + loader_create_info.pNext = &chain_info; + + layer_instance_link_info = loader_stack_alloc(sizeof(VkLayerInstanceLink) * inst->expanded_activated_layer_list.count); + if (!layer_instance_link_info) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_instance_chain: Failed to alloc Instance" + " objects for layer"); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + // Create instance chain of enabled layers + for (int32_t i = inst->expanded_activated_layer_list.count - 1; i >= 0; i--) { + struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i]; + loader_platform_dl_handle lib_handle; + + lib_handle = loaderOpenLayerFile(inst, "instance", layer_prop); + if (!lib_handle) { + continue; + } + + if (NULL == layer_prop->functions.negotiate_layer_interface) { + PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_interface = NULL; + bool functions_in_interface = false; + if (strlen(layer_prop->functions.str_negotiate_interface) == 0) { + negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( + lib_handle, "vkNegotiateLoaderLayerInterfaceVersion"); + } else { + negotiate_interface = (PFN_vkNegotiateLoaderLayerInterfaceVersion)loader_platform_get_proc_address( + lib_handle, layer_prop->functions.str_negotiate_interface); + } + + // If we can negotiate an interface version, then we can also + // get everything we need from the one function call, so try + // that first, and see if we can get all the function pointers + // necessary from that one call. + if (NULL != negotiate_interface) { + layer_prop->functions.negotiate_layer_interface = negotiate_interface; + + VkNegotiateLayerInterface interface_struct; + + if (loaderGetLayerInterfaceVersion(negotiate_interface, &interface_struct)) { + // Go ahead and set the properties version to the + // correct value. + layer_prop->interface_version = interface_struct.loaderLayerInterfaceVersion; + + // If the interface is 2 or newer, we have access to the + // new GetPhysicalDeviceProcAddr function, so grab it, + // and the other necessary functions, from the + // structure. + if (interface_struct.loaderLayerInterfaceVersion > 1) { + cur_gipa = interface_struct.pfnGetInstanceProcAddr; + cur_gdpa = interface_struct.pfnGetDeviceProcAddr; + cur_gpdpa = interface_struct.pfnGetPhysicalDeviceProcAddr; + if (cur_gipa != NULL) { + // We've set the functions, so make sure we + // don't do the unnecessary calls later. + functions_in_interface = true; + } + } + } + } + + if (!functions_in_interface) { + if ((cur_gipa = layer_prop->functions.get_instance_proc_addr) == NULL) { + if (strlen(layer_prop->functions.str_gipa) == 0) { + cur_gipa = + (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); + layer_prop->functions.get_instance_proc_addr = cur_gipa; + } else { + cur_gipa = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, + layer_prop->functions.str_gipa); + } + + if (NULL == cur_gipa) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_instance_chain: Failed to" + " find \'vkGetInstanceProcAddr\' in " + "layer %s", + layer_prop->lib_name); + continue; + } + } + } + } + + layer_instance_link_info[activated_layers].pNext = chain_info.u.pLayerInfo; + layer_instance_link_info[activated_layers].pfnNextGetInstanceProcAddr = next_gipa; + layer_instance_link_info[activated_layers].pfnNextGetPhysicalDeviceProcAddr = next_gpdpa; + next_gipa = cur_gipa; + if (layer_prop->interface_version > 1 && cur_gpdpa != NULL) { + layer_prop->functions.get_physical_device_proc_addr = cur_gpdpa; + next_gpdpa = cur_gpdpa; + } + if (layer_prop->interface_version > 1 && cur_gipa != NULL) { + layer_prop->functions.get_instance_proc_addr = cur_gipa; + } + if (layer_prop->interface_version > 1 && cur_gdpa != NULL) { + layer_prop->functions.get_device_proc_addr = cur_gdpa; + } + + chain_info.u.pLayerInfo = &layer_instance_link_info[activated_layers]; + + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Insert instance layer %s (%s)", layer_prop->info.layerName, + layer_prop->lib_name); + + activated_layers++; + } + } + + VkLoaderFeatureFlags feature_flags = 0; +#if defined(_WIN32) + IDXGIFactory6* dxgi_factory = NULL; + HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory); + if (hres == S_OK) { + feature_flags |= VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING; + dxgi_factory->lpVtbl->Release(dxgi_factory); + } +#endif + + PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)next_gipa(*created_instance, "vkCreateInstance"); + if (fpCreateInstance) { + const VkLayerInstanceCreateInfo instance_dispatch = { + .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO, + .pNext = loader_create_info.pNext, + .function = VK_LOADER_DATA_CALLBACK, + .u = { + .pfnSetInstanceLoaderData = vkSetInstanceDispatch, + }, + }; + const VkLayerInstanceCreateInfo device_callback = { + .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO, + .pNext = &instance_dispatch, + .function = VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK, + .u = { + .layerDevice = { + .pfnLayerCreateDevice = loader_layer_create_device, + .pfnLayerDestroyDevice = loader_layer_destroy_device, + }, + }, + }; + const VkLayerInstanceCreateInfo loader_features = { + .sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO, + .pNext = &device_callback, + .function = VK_LOADER_FEATURES, + .u = { + .loaderFeatures = feature_flags, + }, + }; + loader_create_info.pNext = &loader_features; + + res = fpCreateInstance(&loader_create_info, pAllocator, created_instance); + } else { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_instance_chain: Failed to find " + "\'vkCreateInstance\'"); + // Couldn't find CreateInstance function! + res = VK_ERROR_INITIALIZATION_FAILED; + } + + if (res == VK_SUCCESS) { + loader_init_instance_core_dispatch_table(&inst->disp->layer_inst_disp, next_gipa, *created_instance); + inst->instance = *created_instance; + } + + return res; +} + +void loaderActivateInstanceLayerExtensions(struct loader_instance *inst, VkInstance created_inst) { + loader_init_instance_extension_dispatch_table(&inst->disp->layer_inst_disp, inst->disp->layer_inst_disp.GetInstanceProcAddr, + created_inst); +} + +VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, + struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, + PFN_vkGetDeviceProcAddr *layerNextGDPA) { + uint32_t activated_layers = 0; + VkLayerDeviceLink *layer_device_link_info; + VkLayerDeviceCreateInfo chain_info; + VkDeviceCreateInfo loader_create_info; + VkResult res; + + PFN_vkGetDeviceProcAddr fpGDPA = NULL, nextGDPA = loader_gpa_device_internal; + PFN_vkGetInstanceProcAddr fpGIPA = NULL, nextGIPA = loader_gpa_instance_internal; + + memcpy(&loader_create_info, pCreateInfo, sizeof(VkDeviceCreateInfo)); + + // Before we continue, we need to find out if the KHR_device_group extension is in the enabled list. If it is, we then + // need to look for the corresponding VkDeviceGroupDeviceCreateInfoKHR struct in the device list. This is because we + // need to replace all the incoming physical device values (which are really loader trampoline physical device values) + // with the layer/ICD version. + { + VkBaseOutStructure *pNext = (VkBaseOutStructure *)loader_create_info.pNext; + VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&loader_create_info; + while (NULL != pNext) { + if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { + VkDeviceGroupDeviceCreateInfoKHR *cur_struct = (VkDeviceGroupDeviceCreateInfoKHR *)pNext; + if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { + VkDeviceGroupDeviceCreateInfoKHR *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfoKHR)); + VkPhysicalDevice *phys_dev_array = NULL; + if (NULL == temp_struct) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfoKHR)); + phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); + if (NULL == phys_dev_array) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + // Before calling down, replace the incoming physical device values (which are really loader trampoline + // physical devices) with the next layer (or possibly even the terminator) physical device values. + struct loader_physical_device_tramp *cur_tramp; + for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { + cur_tramp = (struct loader_physical_device_tramp *)cur_struct->pPhysicalDevices[phys_dev]; + phys_dev_array[phys_dev] = cur_tramp->phys_dev; + } + temp_struct->pPhysicalDevices = phys_dev_array; + + // Replace the old struct in the pNext chain with this one. + pPrev->pNext = (VkBaseOutStructure *)temp_struct; + pNext = (VkBaseOutStructure *)temp_struct; + } + break; + } + + pPrev = pNext; + pNext = pNext->pNext; + } + } + + layer_device_link_info = loader_stack_alloc(sizeof(VkLayerDeviceLink) * dev->expanded_activated_layer_list.count); + if (!layer_device_link_info) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_device_chain: Failed to alloc Device objects" + " for layer. Skipping Layer."); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + if (dev->expanded_activated_layer_list.count > 0) { + chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; + chain_info.function = VK_LAYER_LINK_INFO; + chain_info.u.pLayerInfo = NULL; + chain_info.pNext = loader_create_info.pNext; + loader_create_info.pNext = &chain_info; + + bool done = false; + + // Create instance chain of enabled layers + for (int32_t i = dev->expanded_activated_layer_list.count - 1; i >= 0; i--) { + struct loader_layer_properties *layer_prop = &dev->expanded_activated_layer_list.list[i]; + loader_platform_dl_handle lib_handle; + + lib_handle = loaderOpenLayerFile(inst, "device", layer_prop); + if (!lib_handle || done) { + continue; + } + + // The Get*ProcAddr pointers will already be filled in if they were received from either the json file or the + // version negotiation + if ((fpGIPA = layer_prop->functions.get_instance_proc_addr) == NULL) { + if (strlen(layer_prop->functions.str_gipa) == 0) { + fpGIPA = (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetInstanceProcAddr"); + layer_prop->functions.get_instance_proc_addr = fpGIPA; + } else + fpGIPA = + (PFN_vkGetInstanceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gipa); + if (!fpGIPA) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_device_chain: Failed to find " + "\'vkGetInstanceProcAddr\' in layer %s. Skipping" + " layer.", + layer_prop->lib_name); + continue; + } + } + + if (fpGIPA == callingLayer) { + if (layerNextGDPA != NULL) { + *layerNextGDPA = nextGDPA; + } + done = true; + continue; + } + + if ((fpGDPA = layer_prop->functions.get_device_proc_addr) == NULL) { + if (strlen(layer_prop->functions.str_gdpa) == 0) { + fpGDPA = (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, "vkGetDeviceProcAddr"); + layer_prop->functions.get_device_proc_addr = fpGDPA; + } else + fpGDPA = + (PFN_vkGetDeviceProcAddr)loader_platform_get_proc_address(lib_handle, layer_prop->functions.str_gdpa); + if (!fpGDPA) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to find vkGetDeviceProcAddr in layer %s", + layer_prop->lib_name); + continue; + } + } + + layer_device_link_info[activated_layers].pNext = chain_info.u.pLayerInfo; + layer_device_link_info[activated_layers].pfnNextGetInstanceProcAddr = nextGIPA; + layer_device_link_info[activated_layers].pfnNextGetDeviceProcAddr = nextGDPA; + chain_info.u.pLayerInfo = &layer_device_link_info[activated_layers]; + nextGIPA = fpGIPA; + nextGDPA = fpGDPA; + + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Inserted device layer %s (%s)", layer_prop->info.layerName, + layer_prop->lib_name); + + activated_layers++; + } + } + + VkDevice created_device = (VkDevice)dev; + PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)nextGIPA(inst->instance, "vkCreateDevice"); + if (fpCreateDevice) { + VkLayerDeviceCreateInfo create_info_disp; + + create_info_disp.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; + create_info_disp.function = VK_LOADER_DATA_CALLBACK; + + create_info_disp.u.pfnSetDeviceLoaderData = vkSetDeviceDispatch; + + create_info_disp.pNext = loader_create_info.pNext; + loader_create_info.pNext = &create_info_disp; + res = fpCreateDevice(pd, &loader_create_info, pAllocator, &created_device); + if (res != VK_SUCCESS) { + return res; + } + dev->chain_device = created_device; + } else { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_create_device_chain: Failed to find \'vkCreateDevice\' " + "in layers or ICD"); + // Couldn't find CreateDevice function! + return VK_ERROR_INITIALIZATION_FAILED; + } + + // Initialize device dispatch table + loader_init_device_dispatch_table(&dev->loader_dispatch, nextGDPA, dev->chain_device); + + return res; +} + +VkResult loaderValidateLayers(const struct loader_instance *inst, const uint32_t layer_count, + const char *const *ppEnabledLayerNames, const struct loader_layer_list *list) { + struct loader_layer_properties *prop; + + for (uint32_t i = 0; i < layer_count; i++) { + VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, ppEnabledLayerNames[i]); + if (result != VK_STRING_ERROR_NONE) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderValidateLayers: Device ppEnabledLayerNames " + "contains string that is too long or is badly formed"); + return VK_ERROR_LAYER_NOT_PRESENT; + } + + prop = loaderFindLayerProperty(ppEnabledLayerNames[i], list); + if (NULL == prop) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loaderValidateLayers: Layer %d does not exist in the list of available layers", i); + return VK_ERROR_LAYER_NOT_PRESENT; + } + } + return VK_SUCCESS; +} + +VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts, + const struct loader_layer_list *instance_layers, + const VkInstanceCreateInfo *pCreateInfo) { + VkExtensionProperties *extension_prop; + char *env_value; + bool check_if_known = true; + VkResult res = VK_SUCCESS; + + struct loader_layer_list active_layers; + struct loader_layer_list expanded_layers; + memset(&active_layers, 0, sizeof(active_layers)); + memset(&expanded_layers, 0, sizeof(expanded_layers)); + if (!loaderInitLayerList(inst, &active_layers)) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + if (!loaderInitLayerList(inst, &expanded_layers)) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Build the lists of active layers (including metalayers) and expanded layers (with metalayers resolved to their components) + loaderAddImplicitLayers(inst, &active_layers, &expanded_layers, instance_layers); + res = loaderAddEnvironmentLayers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, ENABLED_LAYERS_ENV, &active_layers, &expanded_layers, + instance_layers); + if (res != VK_SUCCESS) { + goto out; + } + res = loaderAddLayerNamesToList(inst, &active_layers, &expanded_layers, pCreateInfo->enabledLayerCount, + pCreateInfo->ppEnabledLayerNames, instance_layers); + if (VK_SUCCESS != res) { + goto out; + } + + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); + if (result != VK_STRING_ERROR_NONE) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_validate_instance_extensions: Instance ppEnabledExtensionNames contains " + "string that is too long or is badly formed"); + res = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Check if a user wants to disable the instance extension filtering behavior + env_value = loader_getenv("VK_LOADER_DISABLE_INST_EXT_FILTER", inst); + if (NULL != env_value && atoi(env_value) != 0) { + check_if_known = false; + } + loader_free_getenv(env_value, inst); + + if (check_if_known) { + // See if the extension is in the list of supported extensions + bool found = false; + for (uint32_t j = 0; LOADER_INSTANCE_EXTENSIONS[j] != NULL; j++) { + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], LOADER_INSTANCE_EXTENSIONS[j]) == 0) { + found = true; + break; + } + } + + // If it isn't in the list, return an error + if (!found) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_validate_instance_extensions: Extension %s not found in list of known instance extensions.", + pCreateInfo->ppEnabledExtensionNames[i]); + res = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + } + + extension_prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], icd_exts); + + if (extension_prop) { + continue; + } + + extension_prop = NULL; + + // Not in global list, search layer extension lists + struct loader_layer_properties *layer_prop = NULL; + for (uint32_t j = 0; NULL == extension_prop && j < expanded_layers.count; ++j) { + extension_prop = + get_extension_property(pCreateInfo->ppEnabledExtensionNames[i], &expanded_layers.list[j].instance_extension_list); + if (extension_prop) { + // Found the extension in one of the layers enabled by the app. + break; + } + + layer_prop = loaderFindLayerProperty(expanded_layers.list[j].info.layerName, instance_layers); + if (NULL == layer_prop) { + // Should NOT get here, loaderValidateLayers should have already filtered this case out. + continue; + } + } + + if (!extension_prop) { + // Didn't find extension name in any of the global layers, error out + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_validate_instance_extensions: Instance extension %s not supported by available ICDs or enabled " + "layers.", + pCreateInfo->ppEnabledExtensionNames[i]); + res = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + } + +out: + loaderDestroyLayerList(inst, NULL, &active_layers); + loaderDestroyLayerList(inst, NULL, &expanded_layers); + return res; +} + +VkResult loader_validate_device_extensions(struct loader_instance *this_instance, + const struct loader_layer_list *activated_device_layers, + const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo) { + VkExtensionProperties *extension_prop; + struct loader_layer_properties *layer_prop; + + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + VkStringErrorFlags result = vk_string_validate(MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]); + if (result != VK_STRING_ERROR_NONE) { + loader_log(this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_validate_device_extensions: Device ppEnabledExtensionNames contains " + "string that is too long or is badly formed"); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; + extension_prop = get_extension_property(extension_name, icd_exts); + + if (extension_prop) { + continue; + } + + // Not in global list, search activated layer extension lists + for (uint32_t j = 0; j < activated_device_layers->count; j++) { + layer_prop = &activated_device_layers->list[j]; + + extension_prop = get_dev_extension_property(extension_name, &layer_prop->device_extension_list); + if (extension_prop) { + // Found the extension in one of the layers enabled by the app. + break; + } + } + + if (!extension_prop) { + // Didn't find extension name in any of the device layers, error out + loader_log(this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "loader_validate_device_extensions: Device extension %s not supported by selected physical device " + "or enabled layers.", + pCreateInfo->ppEnabledExtensionNames[i]); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + return VK_SUCCESS; +} + +// Terminator functions for the Instance chain +// All named terminator_ +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { + struct loader_icd_term *icd_term; + VkExtensionProperties *prop; + char **filtered_extension_names = NULL; + VkInstanceCreateInfo icd_create_info; + VkResult res = VK_SUCCESS; + bool one_icd_successful = false; + + struct loader_instance *ptr_instance = (struct loader_instance *)*pInstance; + memcpy(&icd_create_info, pCreateInfo, sizeof(icd_create_info)); + + icd_create_info.enabledLayerCount = 0; + icd_create_info.ppEnabledLayerNames = NULL; + + // NOTE: Need to filter the extensions to only those supported by the ICD. + // No ICD will advertise support for layers. An ICD library could + // support a layer, but it would be independent of the actual ICD, + // just in the same library. + filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); + if (!filtered_extension_names) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "terminator_CreateInstance: Failed create extension name array for %d extensions", + pCreateInfo->enabledExtensionCount); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + icd_create_info.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; + + for (uint32_t i = 0; i < ptr_instance->icd_tramp_list.count; i++) { + icd_term = loader_icd_add(ptr_instance, &ptr_instance->icd_tramp_list.scanned_list[i]); + if (NULL == icd_term) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "terminator_CreateInstance: Failed to add ICD %d to ICD trampoline list.", i); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // If any error happens after here, we need to remove the ICD from the list, + // because we've already added it, but haven't validated it + + // Make sure that we reset the pApplicationInfo so we don't get an old pointer + icd_create_info.pApplicationInfo = pCreateInfo->pApplicationInfo; + icd_create_info.enabledExtensionCount = 0; + struct loader_extension_list icd_exts; + + loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "Build ICD instance extension list"); + // traverse scanned icd list adding non-duplicate extensions to the list + res = loader_init_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + // If out of memory, bail immediately. + goto out; + } else if (VK_SUCCESS != res) { + // Something bad happened with this ICD, so free it and try the + // next. + ptr_instance->icd_terms = icd_term->next; + icd_term->next = NULL; + loader_icd_destroy(ptr_instance, icd_term, pAllocator); + continue; + } + + res = loader_add_instance_extensions(ptr_instance, icd_term->scanned_icd->EnumerateInstanceExtensionProperties, + icd_term->scanned_icd->lib_name, &icd_exts); + if (VK_SUCCESS != res) { + loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); + if (VK_ERROR_OUT_OF_HOST_MEMORY == res) { + // If out of memory, bail immediately. + goto out; + } else { + // Something bad happened with this ICD, so free it and try the next. + ptr_instance->icd_terms = icd_term->next; + icd_term->next = NULL; + loader_icd_destroy(ptr_instance, icd_term, pAllocator); + continue; + } + } + + for (uint32_t j = 0; j < pCreateInfo->enabledExtensionCount; j++) { + prop = get_extension_property(pCreateInfo->ppEnabledExtensionNames[j], &icd_exts); + if (prop) { + filtered_extension_names[icd_create_info.enabledExtensionCount] = (char *)pCreateInfo->ppEnabledExtensionNames[j]; + icd_create_info.enabledExtensionCount++; + } + } + + loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&icd_exts); + + // Get the driver version from vkEnumerateInstanceVersion + uint32_t icd_version = VK_API_VERSION_1_0; + VkResult icd_result = VK_SUCCESS; + if (icd_term->scanned_icd->api_version >= VK_API_VERSION_1_1) { + PFN_vkEnumerateInstanceVersion icd_enumerate_instance_version = (PFN_vkEnumerateInstanceVersion) + icd_term->scanned_icd->GetInstanceProcAddr(NULL, "vkEnumerateInstanceVersion"); + if (icd_enumerate_instance_version != NULL) { + icd_result = icd_enumerate_instance_version(&icd_version); + if (icd_result != VK_SUCCESS) { + icd_version = VK_API_VERSION_1_0; + loader_log(ptr_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, "terminator_CreateInstance: ICD \"%s\" " + "vkEnumerateInstanceVersion returned error. The ICD will be treated as a 1.0 ICD", + icd_term->scanned_icd->lib_name); + } + } + } + + // Create an instance, substituting the version to 1.0 if necessary + VkApplicationInfo icd_app_info; + uint32_t icd_version_nopatch = VK_MAKE_VERSION(VK_VERSION_MAJOR(icd_version), VK_VERSION_MINOR(icd_version), 0); + uint32_t requested_version = pCreateInfo == NULL || pCreateInfo->pApplicationInfo == NULL ? VK_API_VERSION_1_0 : pCreateInfo->pApplicationInfo->apiVersion; + if ((requested_version != 0) && (icd_version_nopatch == VK_API_VERSION_1_0)) { + if (icd_create_info.pApplicationInfo == NULL) { + memset(&icd_app_info, 0, sizeof(icd_app_info)); + } else { + memcpy(&icd_app_info, icd_create_info.pApplicationInfo, sizeof(icd_app_info)); + } + icd_app_info.apiVersion = icd_version; + icd_create_info.pApplicationInfo = &icd_app_info; + } + icd_result = ptr_instance->icd_tramp_list.scanned_list[i].CreateInstance(&icd_create_info, pAllocator, &(icd_term->instance)); + if (VK_ERROR_OUT_OF_HOST_MEMORY == icd_result) { + // If out of memory, bail immediately. + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } else if (VK_SUCCESS != icd_result) { + loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "terminator_CreateInstance: Failed to CreateInstance in " + "ICD %d. Skipping ICD.", + i); + ptr_instance->icd_terms = icd_term->next; + icd_term->next = NULL; + loader_icd_destroy(ptr_instance, icd_term, pAllocator); + continue; + } + + if (!loader_icd_init_entries(icd_term, icd_term->instance, + ptr_instance->icd_tramp_list.scanned_list[i].GetInstanceProcAddr)) { + loader_log(ptr_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "terminator_CreateInstance: Failed to CreateInstance and find " + "entrypoints with ICD. Skipping ICD."); + ptr_instance->icd_terms = icd_term->next; + icd_term->next = NULL; + loader_icd_destroy(ptr_instance, icd_term, pAllocator); + continue; + } + + // If we made it this far, at least one ICD was successful + one_icd_successful = true; + } + + // If no ICDs were added to instance list and res is unchanged from it's initial value, the loader was unable to + // find a suitable ICD. + if (VK_SUCCESS == res && (ptr_instance->icd_terms == NULL || !one_icd_successful)) { + res = VK_ERROR_INCOMPATIBLE_DRIVER; + } + +out: + + if (VK_SUCCESS != res) { + while (NULL != ptr_instance->icd_terms) { + icd_term = ptr_instance->icd_terms; + ptr_instance->icd_terms = icd_term->next; + if (NULL != icd_term->instance) { + icd_term->dispatch.DestroyInstance(icd_term->instance, pAllocator); + } + loader_icd_destroy(ptr_instance, icd_term, pAllocator); + } + } + + return res; +} + +VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { + struct loader_instance *ptr_instance = loader_instance(instance); + if (NULL == ptr_instance) { + return; + } + struct loader_icd_term *icd_terms = ptr_instance->icd_terms; + struct loader_icd_term *next_icd_term; + + // Remove this instance from the list of instances: + struct loader_instance *prev = NULL; + struct loader_instance *next = loader.instances; + while (next != NULL) { + if (next == ptr_instance) { + // Remove this instance from the list: + if (prev) + prev->next = next->next; + else + loader.instances = next->next; + break; + } + prev = next; + next = next->next; + } + + while (NULL != icd_terms) { + if (icd_terms->instance) { + icd_terms->dispatch.DestroyInstance(icd_terms->instance, pAllocator); + } + next_icd_term = icd_terms->next; + icd_terms->instance = VK_NULL_HANDLE; + loader_icd_destroy(ptr_instance, icd_terms, pAllocator); + + icd_terms = next_icd_term; + } + + loaderDeleteLayerListAndProperties(ptr_instance, &ptr_instance->instance_layer_list); + loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list); + loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list); + if (NULL != ptr_instance->phys_devs_term) { + for (uint32_t i = 0; i < ptr_instance->phys_dev_count_term; i++) { + loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term[i]); + } + loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_term); + } + if (NULL != ptr_instance->phys_dev_groups_term) { + for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_term; i++) { + loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term[i]); + } + loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_term); + } + loader_free_dev_ext_table(ptr_instance); + loader_free_phys_dev_ext_table(ptr_instance); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { + VkResult res = VK_SUCCESS; + struct loader_physical_device_term *phys_dev_term; + phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + struct loader_device *dev = (struct loader_device *)*pDevice; + PFN_vkCreateDevice fpCreateDevice = icd_term->dispatch.CreateDevice; + struct loader_extension_list icd_exts; + + VkBaseOutStructure *caller_dgci_container = NULL; + VkDeviceGroupDeviceCreateInfoKHR *caller_dgci = NULL; + + dev->phys_dev_term = phys_dev_term; + + icd_exts.list = NULL; + + if (fpCreateDevice == NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "terminator_CreateDevice: No vkCreateDevice command exposed " + "by ICD %s", + icd_term->scanned_icd->lib_name); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + VkDeviceCreateInfo localCreateInfo; + memcpy(&localCreateInfo, pCreateInfo, sizeof(localCreateInfo)); + + // NOTE: Need to filter the extensions to only those supported by the ICD. + // No ICD will advertise support for layers. An ICD library could support a layer, + // but it would be independent of the actual ICD, just in the same library. + char **filtered_extension_names = NULL; + if (0 < pCreateInfo->enabledExtensionCount) { + filtered_extension_names = loader_stack_alloc(pCreateInfo->enabledExtensionCount * sizeof(char *)); + if (NULL == filtered_extension_names) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "terminator_CreateDevice: Failed to create extension name " + "storage for %d extensions", + pCreateInfo->enabledExtensionCount); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } + + localCreateInfo.enabledLayerCount = 0; + localCreateInfo.ppEnabledLayerNames = NULL; + + localCreateInfo.enabledExtensionCount = 0; + localCreateInfo.ppEnabledExtensionNames = (const char *const *)filtered_extension_names; + + // Get the physical device (ICD) extensions + res = loader_init_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts, sizeof(VkExtensionProperties)); + if (VK_SUCCESS != res) { + goto out; + } + + res = loader_add_device_extensions(icd_term->this_instance, icd_term->dispatch.EnumerateDeviceExtensionProperties, + phys_dev_term->phys_dev, icd_term->scanned_icd->lib_name, &icd_exts); + if (res != VK_SUCCESS) { + goto out; + } + + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i]; + VkExtensionProperties *prop = get_extension_property(extension_name, &icd_exts); + if (prop) { + filtered_extension_names[localCreateInfo.enabledExtensionCount] = (char *)extension_name; + localCreateInfo.enabledExtensionCount++; + } else { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_DEBUG_BIT_EXT, 0, + "vkCreateDevice extension %s not available for " + "devices associated with ICD %s", + extension_name, icd_term->scanned_icd->lib_name); + } + } + + // Before we continue, If KHX_device_group is the list of enabled and viable extensions, then we then need to look for the + // corresponding VkDeviceGroupDeviceCreateInfo struct in the device list and replace all the physical device values (which + // are really loader physical device terminator values) with the ICD versions. + //if (icd_term->this_instance->enabled_known_extensions.khr_device_group_creation == 1) { + { + VkBaseOutStructure *pNext = (VkBaseOutStructure *)localCreateInfo.pNext; + VkBaseOutStructure *pPrev = (VkBaseOutStructure *)&localCreateInfo; + while (NULL != pNext) { + if (VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO == pNext->sType) { + VkDeviceGroupDeviceCreateInfo *cur_struct = (VkDeviceGroupDeviceCreateInfo *)pNext; + if (0 < cur_struct->physicalDeviceCount && NULL != cur_struct->pPhysicalDevices) { + VkDeviceGroupDeviceCreateInfo *temp_struct = loader_stack_alloc(sizeof(VkDeviceGroupDeviceCreateInfo)); + VkPhysicalDevice *phys_dev_array = NULL; + if (NULL == temp_struct) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(temp_struct, cur_struct, sizeof(VkDeviceGroupDeviceCreateInfo)); + phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * cur_struct->physicalDeviceCount); + if (NULL == phys_dev_array) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + // Before calling down, replace the incoming physical device values (which are really loader terminator + // physical devices) with the ICDs physical device values. + struct loader_physical_device_term *cur_term; + for (uint32_t phys_dev = 0; phys_dev < cur_struct->physicalDeviceCount; phys_dev++) { + cur_term = (struct loader_physical_device_term *)cur_struct->pPhysicalDevices[phys_dev]; + phys_dev_array[phys_dev] = cur_term->phys_dev; + } + temp_struct->pPhysicalDevices = phys_dev_array; + + // Keep track of pointers to restore pNext chain before returning + caller_dgci_container = pPrev; + caller_dgci = cur_struct; + + // Replace the old struct in the pNext chain with this one. + pPrev->pNext = (VkBaseOutStructure *)temp_struct; + pNext = (VkBaseOutStructure *)temp_struct; + } + break; + } + + pPrev = pNext; + pNext = pNext->pNext; + } + } + + // Handle loader emulation for structs that are not supported by the ICD: + // Presently, the emulation leaves the pNext chain alone. This means that the ICD will receive items in the chain which + // are not recognized by the ICD. If this causes the ICD to fail, then the items would have to be removed here. The current + // implementation does not remove them because copying the pNext chain would be impossible if the loader does not recognize + // the any of the struct types, as the loader would not know the size to allocate and copy. + //if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { + { + const void *pNext = localCreateInfo.pNext; + while (pNext != NULL) { + switch (*(VkStructureType *)pNext) { + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: { + const VkPhysicalDeviceFeatures2KHR *features = pNext; + + if (icd_term->dispatch.GetPhysicalDeviceFeatures2 == NULL && icd_term->dispatch.GetPhysicalDeviceFeatures2KHR == NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkCreateDevice: Emulating handling of VkPhysicalDeviceFeatures2 in pNext chain for ICD \"%s\"", + icd_term->scanned_icd->lib_name); + + // Verify that VK_KHR_get_physical_device_properties2 is enabled + if (icd_term->this_instance->enabled_known_extensions.khr_get_physical_device_properties2) { + localCreateInfo.pEnabledFeatures = &features->features; + } + } + + // Leave this item in the pNext chain for now + + pNext = features->pNext; + break; + } + + case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: { + const VkDeviceGroupDeviceCreateInfoKHR *group_info = pNext; + + if (icd_term->dispatch.EnumeratePhysicalDeviceGroups == NULL && icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR == NULL) { + loader_log( + icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkCreateDevice: Emulating handling of VkPhysicalDeviceGroupProperties in pNext chain for ICD \"%s\"", + icd_term->scanned_icd->lib_name); + + // The group must contain only this one device, since physical device groups aren't actually supported + if (group_info->physicalDeviceCount != 1) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkCreateDevice: Emulation failed to create device from device group info"); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + } + + // Nothing needs to be done here because we're leaving the item in the pNext chain and because the spec states + // that the physicalDevice argument must be included in the device group, and we've already checked that it is + + pNext = group_info->pNext; + break; + } + + // Multiview properties are also allowed, but since VK_KHX_multiview is a device extension, we'll just let the ICD + // handle that error when the user enables the extension here + default: { + const VkBaseInStructure *header = pNext; + pNext = header->pNext; + break; + } + } + } + } + + // Every extension that has a loader-defined terminator needs to be marked as enabled or disabled so that we know whether or + // not to return that terminator when vkGetDeviceProcAddr is called + for (uint32_t i = 0; i < localCreateInfo.enabledExtensionCount; ++i) { + if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { + dev->extensions.khr_swapchain_enabled = true; + } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME)) { + dev->extensions.khr_display_swapchain_enabled = true; + } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { + dev->extensions.khr_device_group_enabled = true; + } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { + dev->extensions.ext_debug_marker_enabled = true; + } else if (!strcmp(localCreateInfo.ppEnabledExtensionNames[i], "VK_EXT_full_screen_exclusive")) { + dev->extensions.ext_full_screen_exclusive_enabled = true; + } + } + dev->extensions.ext_debug_utils_enabled = icd_term->this_instance->enabled_known_extensions.ext_debug_utils; + + if (!dev->extensions.khr_device_group_enabled) { + VkPhysicalDeviceProperties properties; + icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &properties); + if (properties.apiVersion >= VK_API_VERSION_1_1) { + dev->extensions.khr_device_group_enabled = true; + } + } + + res = fpCreateDevice(phys_dev_term->phys_dev, &localCreateInfo, pAllocator, &dev->icd_device); + if (res != VK_SUCCESS) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "terminator_CreateDevice: Failed in ICD %s vkCreateDevice" + "call", + icd_term->scanned_icd->lib_name); + goto out; + } + + *pDevice = dev->icd_device; + loader_add_logical_device(icd_term->this_instance, icd_term, dev); + + // Init dispatch pointer in new device object + loader_init_dispatch(*pDevice, &dev->loader_dispatch); + +out: + if (NULL != icd_exts.list) { + loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts); + } + + // Restore pNext pointer to old VkDeviceGroupDeviceCreateInfoKHX + // in the chain to maintain consistency for the caller. + if (caller_dgci_container != NULL) { + caller_dgci_container->pNext = (VkBaseOutStructure *)caller_dgci; + } + + return res; +} + +VkResult setupLoaderTrampPhysDevs(VkInstance instance) { + VkResult res = VK_SUCCESS; + VkPhysicalDevice *local_phys_devs = NULL; + struct loader_instance *inst; + uint32_t total_count = 0; + struct loader_physical_device_tramp **new_phys_devs = NULL; + + inst = loader_get_instance(instance); + if (NULL == inst) { + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + // Query how many GPUs there + res = inst->disp->layer_inst_disp.EnumeratePhysicalDevices(instance, &total_count, NULL); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevs: Failed during dispatch call " + "of \'vkEnumeratePhysicalDevices\' to lower layers or " + "loader to get count."); + goto out; + } + + // Really use what the total GPU count is since Optimus and other layers may mess + // the count up. + total_count = inst->total_gpu_count; + + // Create an array for the new physical devices, which will be stored + // in the instance for the trampoline code. + new_phys_devs = (struct loader_physical_device_tramp **)loader_instance_heap_alloc( + inst, total_count * sizeof(struct loader_physical_device_tramp *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_devs) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevs: Failed to allocate new physical device" + " array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(new_phys_devs, 0, total_count * sizeof(struct loader_physical_device_tramp *)); + + // Create a temporary array (on the stack) to keep track of the + // returned VkPhysicalDevice values. + local_phys_devs = loader_stack_alloc(sizeof(VkPhysicalDevice) * total_count); + if (NULL == local_phys_devs) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevs: Failed to allocate local " + "physical device array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(local_phys_devs, 0, sizeof(VkPhysicalDevice) * total_count); + + res = inst->disp->layer_inst_disp.EnumeratePhysicalDevices(instance, &total_count, local_phys_devs); + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevs: Failed during dispatch call " + "of \'vkEnumeratePhysicalDevices\' to lower layers or " + "loader to get content."); + goto out; + } + + // Copy or create everything to fill the new array of physical devices + for (uint32_t new_idx = 0; new_idx < total_count; new_idx++) { + // Check if this physical device is already in the old buffer + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_tramp; old_idx++) { + if (local_phys_devs[new_idx] == inst->phys_devs_tramp[old_idx]->phys_dev) { + new_phys_devs[new_idx] = inst->phys_devs_tramp[old_idx]; + break; + } + } + + // If this physical device isn't in the old buffer, create it + if (NULL == new_phys_devs[new_idx]) { + new_phys_devs[new_idx] = (struct loader_physical_device_tramp *)loader_instance_heap_alloc( + inst, sizeof(struct loader_physical_device_tramp), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_devs[new_idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevs: Failed to allocate " + "physical device trampoline object %d", + new_idx); + total_count = new_idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + // Initialize the new physicalDevice object + loader_set_dispatch((void *)new_phys_devs[new_idx], inst->disp); + new_phys_devs[new_idx]->this_instance = inst; + new_phys_devs[new_idx]->phys_dev = local_phys_devs[new_idx]; + } + } + +out: + + if (VK_SUCCESS != res) { + if (NULL != new_phys_devs) { + for (uint32_t i = 0; i < total_count; i++) { + loader_instance_heap_free(inst, new_phys_devs[i]); + } + loader_instance_heap_free(inst, new_phys_devs); + } + total_count = 0; + } else { + // Free everything that didn't carry over to the new array of + // physical devices + if (NULL != inst->phys_devs_tramp) { + for (uint32_t i = 0; i < inst->phys_dev_count_tramp; i++) { + bool found = false; + for (uint32_t j = 0; j < total_count; j++) { + if (inst->phys_devs_tramp[i] == new_phys_devs[j]) { + found = true; + break; + } + } + if (!found) { + loader_instance_heap_free(inst, inst->phys_devs_tramp[i]); + } + } + loader_instance_heap_free(inst, inst->phys_devs_tramp); + } + + // Swap in the new physical device list + inst->phys_dev_count_tramp = total_count; + inst->phys_devs_tramp = new_phys_devs; + } + + return res; +} + +struct LoaderSortedPhysicalDevice { + uint32_t device_count; + VkPhysicalDevice* physical_devices; + uint32_t icd_index; + struct loader_icd_term* icd_term; +}; + +// This function allocates an array in sorted_devices which must be freed by the caller if not null +VkResult ReadSortedPhysicalDevices(struct loader_instance *inst, struct LoaderSortedPhysicalDevice **sorted_devices, uint32_t* sorted_count) +{ + VkResult res = VK_SUCCESS; + +#if defined(_WIN32) + uint32_t sorted_alloc = 0; + struct loader_icd_term *icd_term = NULL; + IDXGIFactory6* dxgi_factory = NULL; + HRESULT hres = fpCreateDXGIFactory1(&IID_IDXGIFactory6, (void **)&dxgi_factory); + if (hres != S_OK) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, "Failed to create DXGI factory 6. Physical devices will not be sorted"); + } + else { + sorted_alloc = 16; + *sorted_devices = loader_instance_heap_alloc(inst, sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (*sorted_devices == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + memset(*sorted_devices, 0, sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice)); + + *sorted_count = 0; + for (uint32_t i = 0; ; ++i) { + IDXGIAdapter1* adapter; + hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED, &IID_IDXGIAdapter1, (void **)&adapter); + if (hres == DXGI_ERROR_NOT_FOUND) { + break; // No more adapters + } + else if (hres != S_OK) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to enumerate adapters by GPU preference at index %u. This adapter will not be sorted", i); + break; + } + + DXGI_ADAPTER_DESC1 description; + hres = adapter->lpVtbl->GetDesc1(adapter, &description); + if (hres != S_OK) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to get adapter LUID index %u. This adapter will not be sorted", i); + continue; + } + + if (sorted_alloc <= i) { + uint32_t old_size = sorted_alloc * sizeof(struct LoaderSortedPhysicalDevice); + *sorted_devices = loader_instance_heap_realloc(inst, *sorted_devices, old_size, 2 * old_size, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (*sorted_devices == NULL) { + adapter->lpVtbl->Release(adapter); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + sorted_alloc *= 2; + } + struct LoaderSortedPhysicalDevice *sorted_array = *sorted_devices; + sorted_array[*sorted_count].device_count = 0; + sorted_array[*sorted_count].physical_devices = NULL; + //*sorted_count = i; + + icd_term = inst->icd_terms; + for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) { + // This is the new behavior, which cannot be run unless the ICD provides EnumerateAdapterPhysicalDevices + if (icd_term->scanned_icd->EnumerateAdapterPhysicalDevices == NULL) { + continue; + } + + uint32_t count; + VkResult vkres = icd_term->scanned_icd->EnumerateAdapterPhysicalDevices(icd_term->instance, description.AdapterLuid, &count, NULL); + if (vkres == VK_ERROR_INCOMPATIBLE_DRIVER) { + continue; // This driver doesn't support the adapter + } else if (vkres == VK_ERROR_OUT_OF_HOST_MEMORY) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } else if (vkres != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "Failed to convert DXGI adapter into Vulkan physical device with unexpected error code"); + continue; + } + + // Get the actual physical devices + if (0 != count) + { + do { + sorted_array[*sorted_count].physical_devices = loader_instance_heap_realloc(inst, sorted_array[*sorted_count].physical_devices, sorted_array[*sorted_count].device_count * sizeof(VkPhysicalDevice), count * sizeof(VkPhysicalDevice), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (sorted_array[*sorted_count].physical_devices == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + break; + } + sorted_array[*sorted_count].device_count = count; + } while ((vkres = icd_term->scanned_icd->EnumerateAdapterPhysicalDevices(icd_term->instance, description.AdapterLuid, &count, sorted_array[*sorted_count].physical_devices)) == VK_INCOMPLETE); + } + + if (vkres != VK_SUCCESS) { + loader_instance_heap_free(inst, sorted_array[*sorted_count].physical_devices); + sorted_array[*sorted_count].physical_devices = NULL; + if (vkres == VK_ERROR_OUT_OF_HOST_MEMORY) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } else { + loader_log(inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "Failed to convert DXGI adapter into Vulkan physical device"); + continue; + } + } + inst->total_gpu_count += (sorted_array[*sorted_count].device_count = count); + sorted_array[*sorted_count].icd_index = icd_idx; + sorted_array[*sorted_count].icd_term = icd_term; + ++(*sorted_count); + } + + adapter->lpVtbl->Release(adapter); + } + + dxgi_factory->lpVtbl->Release(dxgi_factory); + } + +out: +#endif + + if (*sorted_count == 0 && *sorted_devices != NULL) { + loader_instance_heap_free(inst, *sorted_devices); + *sorted_devices = NULL; + } + return res; +} + +VkResult setupLoaderTermPhysDevs(struct loader_instance *inst) { + VkResult res = VK_SUCCESS; + struct loader_icd_term *icd_term; + struct loader_phys_dev_per_icd *icd_phys_dev_array = NULL; + struct loader_physical_device_term **new_phys_devs = NULL; + struct LoaderSortedPhysicalDevice *sorted_phys_dev_array = NULL; + uint32_t sorted_count = 0; + + inst->total_gpu_count = 0; + + // Allocate something to store the physical device characteristics + // that we read from each ICD. + icd_phys_dev_array = + (struct loader_phys_dev_per_icd *)loader_stack_alloc(sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count); + if (NULL == icd_phys_dev_array) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to allocate temporary " + "ICD Physical device info array of size %d", + inst->total_gpu_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(icd_phys_dev_array, 0, sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count); + + // Get the physical devices supported by platform sorting mechanism into a separate list + res = ReadSortedPhysicalDevices(inst, &sorted_phys_dev_array, &sorted_count); + if (VK_SUCCESS != res) { + goto out; + } + + // For each ICD, query the number of physical devices, and then get an + // internal value for those physical devices. + icd_term = inst->icd_terms; + for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) { + icd_phys_dev_array[icd_idx].count = 0; + icd_phys_dev_array[icd_idx].phys_devs = NULL; + icd_phys_dev_array[icd_idx].this_icd_term = NULL; + + // This is the legacy behavior which should be skipped if EnumerateAdapterPhysicalDevices is available + // and we successfully enumerated sorted adapters using ReadSortedPhysicalDevices. +#if defined(VK_USE_PLATFORM_WIN32_KHR) + if (sorted_count && icd_term->scanned_icd->EnumerateAdapterPhysicalDevices != NULL) { + continue; + } +#endif + + res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].count, NULL); + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Call to " + "ICD %d's \'vkEnumeratePhysicalDevices\' failed with" + " error 0x%08x", + icd_idx, res); + goto out; + } + + icd_phys_dev_array[icd_idx].phys_devs = + (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].count * sizeof(VkPhysicalDevice)); + if (NULL == icd_phys_dev_array[icd_idx].phys_devs) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to allocate temporary " + "ICD Physical device array for ICD %d of size %d", + icd_idx, inst->total_gpu_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].count), + icd_phys_dev_array[icd_idx].phys_devs); + if (VK_SUCCESS != res) { + goto out; + } + inst->total_gpu_count += icd_phys_dev_array[icd_idx].count; + icd_phys_dev_array[icd_idx].this_icd_term = icd_term; + } + + if (0 == inst->total_gpu_count) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to detect any valid" + " GPUs in the current config"); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + new_phys_devs = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term *) * inst->total_gpu_count, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_devs) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to allocate new physical" + " device array of size %d", + inst->total_gpu_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(new_phys_devs, 0, sizeof(struct loader_physical_device_term *) * inst->total_gpu_count); + + // Copy or create everything to fill the new array of physical devices + uint32_t idx = 0; + +#if defined(_WIN32) + // Copy over everything found through sorted enumeration + for (uint32_t i = 0; i < sorted_count; ++i) { + for (uint32_t j = 0; j < sorted_phys_dev_array[i].device_count; ++j) { + // Check if this physical device is already in the old buffer + if (NULL != inst->phys_devs_term) { + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { + if (sorted_phys_dev_array[i].physical_devices[j] == inst->phys_devs_term[old_idx]->phys_dev) { + new_phys_devs[idx] = inst->phys_devs_term[old_idx]; + break; + } + } + } + + // If this physical device isn't in the old buffer, then we need to create it. + if (NULL == new_phys_devs[idx]) { + new_phys_devs[idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_devs[idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to allocate " + "physical device terminator object %d", + idx); + inst->total_gpu_count = idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + loader_set_dispatch((void *)new_phys_devs[idx], inst->disp); + new_phys_devs[idx]->this_icd_term = sorted_phys_dev_array[i].icd_term; + new_phys_devs[idx]->icd_index = (uint8_t)(sorted_phys_dev_array[i].icd_index); + new_phys_devs[idx]->phys_dev = sorted_phys_dev_array[i].physical_devices[j]; + } + + // Increment the count of new physical devices + idx++; + } + } +#endif + + // Copy over everything found through EnumeratePhysicalDevices + for (uint32_t icd_idx = 0; icd_idx < inst->total_icd_count; icd_idx++) { + for (uint32_t pd_idx = 0; pd_idx < icd_phys_dev_array[icd_idx].count; pd_idx++) { + // Check if this physical device is already in the old buffer + if (NULL != inst->phys_devs_term) { + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_count_term; old_idx++) { + if (icd_phys_dev_array[icd_idx].phys_devs[pd_idx] == inst->phys_devs_term[old_idx]->phys_dev) { + new_phys_devs[idx] = inst->phys_devs_term[old_idx]; + break; + } + } + } + // If this physical device isn't in the old buffer, then we + // need to create it. + if (NULL == new_phys_devs[idx]) { + new_phys_devs[idx] = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term), + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_devs[idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevs: Failed to allocate " + "physical device terminator object %d", + idx); + inst->total_gpu_count = idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + loader_set_dispatch((void *)new_phys_devs[idx], inst->disp); + new_phys_devs[idx]->this_icd_term = icd_phys_dev_array[icd_idx].this_icd_term; + new_phys_devs[idx]->icd_index = (uint8_t)(icd_idx); + new_phys_devs[idx]->phys_dev = icd_phys_dev_array[icd_idx].phys_devs[pd_idx]; + } + idx++; + } + } + +out: + + if (VK_SUCCESS != res) { + if (NULL != new_phys_devs) { + // We've encountered an error, so we should free the new buffers. + for (uint32_t i = 0; i < inst->total_gpu_count; i++) { + loader_instance_heap_free(inst, new_phys_devs[i]); + } + loader_instance_heap_free(inst, new_phys_devs); + } + inst->total_gpu_count = 0; + } else { + // Free everything that didn't carry over to the new array of + // physical devices. Everything else will have been copied over + // to the new array. + if (NULL != inst->phys_devs_term) { + for (uint32_t cur_pd = 0; cur_pd < inst->phys_dev_count_term; cur_pd++) { + bool found = false; + for (uint32_t new_pd_idx = 0; new_pd_idx < inst->total_gpu_count; new_pd_idx++) { + if (inst->phys_devs_term[cur_pd] == new_phys_devs[new_pd_idx]) { + found = true; + break; + } + } + if (!found) { + loader_instance_heap_free(inst, inst->phys_devs_term[cur_pd]); + } + } + loader_instance_heap_free(inst, inst->phys_devs_term); + } + + // Swap out old and new devices list + inst->phys_dev_count_term = inst->total_gpu_count; + inst->phys_devs_term = new_phys_devs; + } + + if (sorted_phys_dev_array != NULL) { + for (uint32_t i = 0; i < sorted_count; ++i) { + if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) { + loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices); + } + } + loader_instance_heap_free(inst, sorted_phys_dev_array); + } + + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, + VkPhysicalDevice *pPhysicalDevices) { + struct loader_instance *inst = (struct loader_instance *)instance; + VkResult res = VK_SUCCESS; + + // Always call the setup loader terminator physical devices because they may + // have changed at any point. + res = setupLoaderTermPhysDevs(inst); + if (VK_SUCCESS != res) { + goto out; + } + + uint32_t copy_count = inst->total_gpu_count; + if (NULL != pPhysicalDevices) { + if (copy_count > *pPhysicalDeviceCount) { + copy_count = *pPhysicalDeviceCount; + res = VK_INCOMPLETE; + } + + for (uint32_t i = 0; i < copy_count; i++) { + pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_term[i]; + } + } + + *pPhysicalDeviceCount = copy_count; + +out: + + return res; +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceProperties) { + icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, pProperties); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, + uint32_t *pQueueFamilyPropertyCount, + VkQueueFamilyProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties) { + icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, pProperties); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceMemoryProperties) { + icd_term->dispatch.GetPhysicalDeviceMemoryProperties(phys_dev_term->phys_dev, pProperties); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures *pFeatures) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceFeatures) { + icd_term->dispatch.GetPhysicalDeviceFeatures(phys_dev_term->phys_dev, pFeatures); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, + VkFormatProperties *pFormatInfo) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceFormatProperties) { + icd_term->dispatch.GetPhysicalDeviceFormatProperties(phys_dev_term->phys_dev, format, pFormatInfo); + } +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, + VkImageType type, VkImageTiling tiling, + VkImageUsageFlags usage, VkImageCreateFlags flags, + VkImageFormatProperties *pImageFormatProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceImageFormatProperties) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "Encountered the vkEnumerateDeviceLayerProperties " + "terminator. This means a layer improperly continued."); + return VK_ERROR_INITIALIZATION_FAILED; + } + return icd_term->dispatch.GetPhysicalDeviceImageFormatProperties(phys_dev_term->phys_dev, format, type, tiling, usage, flags, + pImageFormatProperties); +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, + VkImageType type, VkSampleCountFlagBits samples, + VkImageUsageFlags usage, VkImageTiling tiling, + uint32_t *pNumProperties, + VkSparseImageFormatProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL != icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties) { + icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties(phys_dev_term->phys_dev, format, type, samples, usage, + tiling, pNumProperties, pProperties); + } +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, + const char *pLayerName, uint32_t *pPropertyCount, + VkExtensionProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term; + + struct loader_layer_list implicit_layer_list = {0}; + struct loader_extension_list all_exts = {0}; + struct loader_extension_list icd_exts = {0}; + + // Any layer or trampoline wrapping should be removed at this point in time can just cast to the expected + // type for VkPhysicalDevice. + phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + + // if we got here with a non-empty pLayerName, look up the extensions + // from the json + if (pLayerName != NULL && strlen(pLayerName) > 0) { + uint32_t count; + uint32_t copy_size; + const struct loader_instance *inst = phys_dev_term->this_icd_term->this_instance; + struct loader_device_extension_list *dev_ext_list = NULL; + struct loader_device_extension_list local_ext_list; + memset(&local_ext_list, 0, sizeof(local_ext_list)); + if (vk_string_validate(MaxLoaderStringLength, pLayerName) == VK_STRING_ERROR_NONE) { + for (uint32_t i = 0; i < inst->instance_layer_list.count; i++) { + struct loader_layer_properties *props = &inst->instance_layer_list.list[i]; + if (strcmp(props->info.layerName, pLayerName) == 0) { + dev_ext_list = &props->device_extension_list; + } + } + + count = (dev_ext_list == NULL) ? 0 : dev_ext_list->count; + if (pProperties == NULL) { + *pPropertyCount = count; + loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); + return VK_SUCCESS; + } + + copy_size = *pPropertyCount < count ? *pPropertyCount : count; + for (uint32_t i = 0; i < copy_size; i++) { + memcpy(&pProperties[i], &dev_ext_list->list[i].props, sizeof(VkExtensionProperties)); + } + *pPropertyCount = copy_size; + + loader_destroy_generic_list(inst, (struct loader_generic_list *)&local_ext_list); + if (copy_size < count) { + return VK_INCOMPLETE; + } + } else { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkEnumerateDeviceExtensionProperties: pLayerName " + "is too long or is badly formed"); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + return VK_SUCCESS; + } + + // This case is during the call down the instance chain with pLayerName == NULL + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + uint32_t icd_ext_count = *pPropertyCount; + VkExtensionProperties *icd_props_list = pProperties; + VkResult res; + + if (NULL == icd_props_list) { + // We need to find the count without duplicates. This requires querying the driver for the names of the extensions. + // A small amount of storage is then needed to facilitate the de-duplication. + res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, NULL); + if (res != VK_SUCCESS) { + goto out; + } + icd_props_list = loader_instance_heap_alloc(icd_term->this_instance, sizeof(VkExtensionProperties) * icd_ext_count, + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); + if (NULL == icd_props_list) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + } + + // Get the available device extension count, and if pProperties is not NULL, the extensions as well + res = icd_term->dispatch.EnumerateDeviceExtensionProperties(phys_dev_term->phys_dev, NULL, &icd_ext_count, icd_props_list); + if (res != VK_SUCCESS) { + goto out; + } + + if (!loaderInitLayerList(icd_term->this_instance, &implicit_layer_list)) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + loaderAddImplicitLayers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list); + + // Initialize dev_extension list within the physicalDevice object + res = loader_init_device_extensions(icd_term->this_instance, phys_dev_term, icd_ext_count, icd_props_list, &icd_exts); + if (res != VK_SUCCESS) { + goto out; + } + + // We need to determine which implicit layers are active, and then add their extensions. This can't be cached as + // it depends on results of environment variables (which can change). + res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, icd_exts.count, icd_exts.list); + if (res != VK_SUCCESS) { + goto out; + } + + loaderAddImplicitLayers(icd_term->this_instance, &implicit_layer_list, NULL, &icd_term->this_instance->instance_layer_list); + + for (uint32_t i = 0; i < implicit_layer_list.count; i++) { + for (uint32_t j = 0; j < implicit_layer_list.list[i].device_extension_list.count; j++) { + res = loader_add_to_ext_list(icd_term->this_instance, &all_exts, 1, + &implicit_layer_list.list[i].device_extension_list.list[j].props); + if (res != VK_SUCCESS) { + goto out; + } + } + } + uint32_t capacity = *pPropertyCount; + VkExtensionProperties *props = pProperties; + + res = VK_SUCCESS; + if (NULL != pProperties) { + for (uint32_t i = 0; i < all_exts.count && i < capacity; i++) { + props[i] = all_exts.list[i]; + } + + // Wasn't enough space for the extensions, we did partial copy now return VK_INCOMPLETE + if (capacity < all_exts.count) { + res = VK_INCOMPLETE; + } else { + *pPropertyCount = all_exts.count; + } + } else { + *pPropertyCount = all_exts.count; + } + +out: + + if (NULL != implicit_layer_list.list) { + loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&implicit_layer_list); + } + if (NULL != all_exts.list) { + loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&all_exts); + } + if (NULL != icd_exts.list) { + loader_destroy_generic_list(icd_term->this_instance, (struct loader_generic_list *)&icd_exts); + } + if (NULL == pProperties && NULL != icd_props_list) { + loader_instance_heap_free(icd_term->this_instance, icd_props_list); + } + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, + VkLayerProperties *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "Encountered the vkEnumerateDeviceLayerProperties " + "terminator. This means a layer improperly continued."); + // Should never get here this call isn't dispatched down the chain + return VK_ERROR_INITIALIZATION_FAILED; +} + +VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) { + VkStringErrorFlags result = VK_STRING_ERROR_NONE; + int num_char_bytes = 0; + int i, j; + + if (utf8 == NULL) { + return VK_STRING_ERROR_NULL_PTR; + } + + for (i = 0; i <= max_length; i++) { + if (utf8[i] == 0) { + break; + } else if (i == max_length) { + result |= VK_STRING_ERROR_LENGTH; + break; + } else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) { + num_char_bytes = 0; + } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) { + num_char_bytes = 1; + } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) { + num_char_bytes = 2; + } else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) { + num_char_bytes = 3; + } else { + result = VK_STRING_ERROR_BAD_DATA; + } + + // Validate the following num_char_bytes of data + for (j = 0; (j < num_char_bytes) && (i < max_length); j++) { + if (++i == max_length) { + result |= VK_STRING_ERROR_LENGTH; + break; + } + if ((utf8[i] & UTF8_DATA_BYTE_MASK) != UTF8_DATA_BYTE_CODE) { + result |= VK_STRING_ERROR_BAD_DATA; + } + } + } + return result; +} + +VKAPI_ATTR VkResult VKAPI_CALL +terminator_EnumerateInstanceVersion(const VkEnumerateInstanceVersionChain *chain, uint32_t* pApiVersion) { + // NOTE: The Vulkan WG doesn't want us checking pApiVersion for NULL, but instead + // prefers us crashing. + *pApiVersion = VK_HEADER_VERSION_COMPLETE; + return VK_SUCCESS; +} + +VKAPI_ATTR VkResult VKAPI_CALL +terminator_EnumerateInstanceExtensionProperties(const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName, + uint32_t *pPropertyCount, VkExtensionProperties *pProperties) { + struct loader_extension_list *global_ext_list = NULL; + struct loader_layer_list instance_layers; + struct loader_extension_list local_ext_list; + struct loader_icd_tramp_list icd_tramp_list; + uint32_t copy_size; + VkResult res = VK_SUCCESS; + + // tls_instance = NULL; + memset(&local_ext_list, 0, sizeof(local_ext_list)); + memset(&instance_layers, 0, sizeof(instance_layers)); + + // Get layer libraries if needed + if (pLayerName && strlen(pLayerName) != 0) { + if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) { + assert(VK_FALSE && + "vkEnumerateInstanceExtensionProperties: " + "pLayerName is too long or is badly formed"); + res = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + loaderScanForLayers(NULL, &instance_layers); + for (uint32_t i = 0; i < instance_layers.count; i++) { + struct loader_layer_properties *props = &instance_layers.list[i]; + if (strcmp(props->info.layerName, pLayerName) == 0) { + global_ext_list = &props->instance_extension_list; + break; + } + } + } else { + // Preload ICD libraries so subsequent calls to EnumerateInstanceExtensionProperties don't have to load them + loader_preload_icds(); + + // Scan/discover all ICD libraries + memset(&icd_tramp_list, 0, sizeof(icd_tramp_list)); + res = loader_icd_scan(NULL, &icd_tramp_list); + // EnumerateInstanceExtensionProperties can't return anything other than OOM or VK_ERROR_LAYER_NOT_PRESENT + if ((VK_SUCCESS != res && icd_tramp_list.count > 0) || res == VK_ERROR_OUT_OF_HOST_MEMORY) { + goto out; + } + // Get extensions from all ICD's, merge so no duplicates + res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list); + if (VK_SUCCESS != res) { + goto out; + } + loader_scanned_icd_clear(NULL, &icd_tramp_list); + + // Append enabled implicit layers. + loaderScanForImplicitLayers(NULL, &instance_layers); + for (uint32_t i = 0; i < instance_layers.count; i++) { + if (!loaderImplicitLayerIsEnabled(NULL, &instance_layers.list[i])) { + continue; + } + struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list; + loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list); + } + + global_ext_list = &local_ext_list; + } + + if (global_ext_list == NULL) { + res = VK_ERROR_LAYER_NOT_PRESENT; + goto out; + } + + if (pProperties == NULL) { + *pPropertyCount = global_ext_list->count; + goto out; + } + + copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count; + for (uint32_t i = 0; i < copy_size; i++) { + memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties)); + } + *pPropertyCount = copy_size; + + if (copy_size < global_ext_list->count) { + res = VK_INCOMPLETE; + goto out; + } + +out: + + loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list); + loaderDeleteLayerListAndProperties(NULL, &instance_layers); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(const VkEnumerateInstanceLayerPropertiesChain *chain, + uint32_t *pPropertyCount, + VkLayerProperties *pProperties) { + VkResult result = VK_SUCCESS; + struct loader_layer_list instance_layer_list; + tls_instance = NULL; + + LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); + + uint32_t copy_size; + + // Get layer libraries + memset(&instance_layer_list, 0, sizeof(instance_layer_list)); + loaderScanForLayers(NULL, &instance_layer_list); + + if (pProperties == NULL) { + *pPropertyCount = instance_layer_list.count; + goto out; + } + + copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count; + for (uint32_t i = 0; i < copy_size; i++) { + memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties)); + } + + *pPropertyCount = copy_size; + + if (copy_size < instance_layer_list.count) { + result = VK_INCOMPLETE; + goto out; + } + +out: + + loaderDeleteLayerListAndProperties(NULL, &instance_layer_list); + return result; +} + +#if defined(_WIN32) +BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { + switch (reason) { + case DLL_PROCESS_ATTACH: + loader_initialize(); + break; + case DLL_PROCESS_DETACH: + if (NULL == reserved) { + loader_release(); + } + break; + default: + // Do nothing + break; + } + return TRUE; +} +#elif !defined(_WIN32) +__attribute__((constructor)) void loader_init_library() { loader_initialize(); } + +__attribute__((destructor)) void loader_free_library() { loader_release(); } +#endif + +// ---- Vulkan Core 1.1 terminators + +VkResult setupLoaderTermPhysDevGroups(struct loader_instance *inst) { + VkResult res = VK_SUCCESS; + struct loader_icd_term *icd_term; + uint32_t total_count = 0; + uint32_t cur_icd_group_count = 0; + VkPhysicalDeviceGroupPropertiesKHR **new_phys_dev_groups = NULL; + VkPhysicalDeviceGroupPropertiesKHR *local_phys_dev_groups = NULL; + bool *local_phys_dev_group_sorted = NULL; + PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL; + struct LoaderSortedPhysicalDevice* sorted_phys_dev_array = NULL; + uint32_t sorted_count = 0; + + if (0 == inst->phys_dev_count_term) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Loader failed to setup physical " + "device terminator info before calling \'EnumeratePhysicalDeviceGroups\'."); + assert(false); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + // For each ICD, query the number of physical device groups, and then get an + // internal value for those physical devices. + icd_term = inst->icd_terms; + for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) { + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + if (inst->enabled_known_extensions.khr_device_group_creation) { + fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; + } else { + fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; + } + + cur_icd_group_count = 0; + if (NULL == fpEnumeratePhysicalDeviceGroups) { + // Treat each ICD's GPU as it's own group if the extension isn't supported + res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &cur_icd_group_count, NULL); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.", + icd_idx); + goto out; + } + } else { + // Query the actual group info + res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &cur_icd_group_count, NULL); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get count.", + icd_idx); + goto out; + } + } + total_count += cur_icd_group_count; + } + + // Create an array for the new physical device groups, which will be stored + // in the instance for the Terminator code. + new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_alloc( + inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_dev_groups) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to allocate new physical device" + " group array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(new_phys_dev_groups, 0, total_count * sizeof(VkPhysicalDeviceGroupProperties *)); + + // Create a temporary array (on the stack) to keep track of the + // returned VkPhysicalDevice values. + local_phys_dev_groups = loader_stack_alloc(sizeof(VkPhysicalDeviceGroupProperties) * total_count); + local_phys_dev_group_sorted = loader_stack_alloc(sizeof(bool) * total_count); + if (NULL == local_phys_dev_groups || NULL == local_phys_dev_group_sorted) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to allocate local " + "physical device group array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + // Initialize the memory to something valid + memset(local_phys_dev_groups, 0, sizeof(VkPhysicalDeviceGroupProperties) * total_count); + memset(local_phys_dev_group_sorted, 0, sizeof(bool) * total_count); + for (uint32_t group = 0; group < total_count; group++) { + local_phys_dev_groups[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR; + local_phys_dev_groups[group].pNext = NULL; + local_phys_dev_groups[group].subsetAllocation = false; + } + + // Get the physical devices supported by platform sorting mechanism into a separate list + res = ReadSortedPhysicalDevices(inst, &sorted_phys_dev_array, &sorted_count); + if (VK_SUCCESS != res) { + goto out; + } + + cur_icd_group_count = 0; + icd_term = inst->icd_terms; + for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) { + uint32_t count_this_time = total_count - cur_icd_group_count; + + // Check if this group can be sorted +#if defined(VK_USE_PLATFORM_WIN32_KHR) + bool icd_sorted = sorted_count && (icd_term->scanned_icd->EnumerateAdapterPhysicalDevices != NULL); +#else + bool icd_sorted = false; +#endif + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + if (inst->enabled_known_extensions.khr_device_group_creation) { + fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroupsKHR; + } else { + fpEnumeratePhysicalDeviceGroups = icd_term->dispatch.EnumeratePhysicalDeviceGroups; + } + + if (NULL == fpEnumeratePhysicalDeviceGroups) { + VkPhysicalDevice* phys_dev_array = loader_stack_alloc(sizeof(VkPhysicalDevice) * count_this_time); + if (NULL == phys_dev_array) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to allocate local " + "physical device array of size %d", + count_this_time); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + res = icd_term->dispatch.EnumeratePhysicalDevices(icd_term->instance, &count_this_time, phys_dev_array); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDevices\' to ICD %d to get plain phys dev count.", + icd_idx); + goto out; + } + + // Add each GPU as it's own group + for (uint32_t indiv_gpu = 0; indiv_gpu < count_this_time; indiv_gpu++) { + local_phys_dev_groups[indiv_gpu + cur_icd_group_count].physicalDeviceCount = 1; + local_phys_dev_groups[indiv_gpu + cur_icd_group_count].physicalDevices[0] = phys_dev_array[indiv_gpu]; + local_phys_dev_group_sorted[indiv_gpu + cur_icd_group_count] = icd_sorted; + } + + } else { + res = fpEnumeratePhysicalDeviceGroups(icd_term->instance, &count_this_time, &local_phys_dev_groups[cur_icd_group_count]); + for (uint32_t group = 0; group < count_this_time; ++group) { + local_phys_dev_group_sorted[group + cur_icd_group_count] = icd_sorted; + } + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDeviceGroups\' to ICD %d to get content.", + icd_idx); + goto out; + } + } + + cur_icd_group_count += count_this_time; + } + + // Replace all the physical device IDs with the proper loader values + for (uint32_t group = 0; group < total_count; group++) { + for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].physicalDeviceCount; group_gpu++) { + bool found = false; + for (uint32_t term_gpu = 0; term_gpu < inst->phys_dev_count_term; term_gpu++) { + if (local_phys_dev_groups[group].physicalDevices[group_gpu] == inst->phys_devs_term[term_gpu]->phys_dev) { + local_phys_dev_groups[group].physicalDevices[group_gpu] = (VkPhysicalDevice)inst->phys_devs_term[term_gpu]; + found = true; + break; + } + } + if (!found) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to find GPU %d in group %d" + " returned by \'EnumeratePhysicalDeviceGroups\' in list returned" + " by \'EnumeratePhysicalDevices\'", group_gpu, group); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + } + } + + uint32_t idx = 0; + +#if defined(_WIN32) + // Copy over everything found through sorted enumeration + for (uint32_t i = 0; i < sorted_count; ++i) { + + // Find the VkPhysicalDeviceGroupProperties object in local_phys_dev_groups + VkPhysicalDeviceGroupProperties *group_properties = NULL; + for (uint32_t group = 0; group < total_count; group++) { + if (sorted_phys_dev_array[i].device_count != local_phys_dev_groups[group].physicalDeviceCount) { + continue; + } + + bool match = true; + for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].physicalDeviceCount; group_gpu++) { + if (sorted_phys_dev_array[i].physical_devices[group_gpu] != ((struct loader_physical_device_term*) local_phys_dev_groups[group].physicalDevices[group_gpu])->phys_dev) { + match = false; + break; + } + } + + if (match) { + group_properties = &local_phys_dev_groups[group]; + } + } + + // Check if this physical device group with the same contents is already in the old buffer + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { + if (NULL != group_properties && group_properties->physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) { + bool found_all_gpus = true; + for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) { + bool found_gpu = false; + for (uint32_t new_gpu = 0; new_gpu < group_properties->physicalDeviceCount; new_gpu++) { + if (group_properties->physicalDevices[new_gpu] == inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) { + found_gpu = true; + break; + } + } + + if (!found_gpu) { + found_all_gpus = false; + break; + } + } + if (!found_all_gpus) { + continue; + } + else { + new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx]; + break; + } + } + } + + // If this physical device group isn't in the old buffer, create it + if (group_properties != NULL && NULL == new_phys_dev_groups[idx]) { + new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupPropertiesKHR*)loader_instance_heap_alloc( + inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_dev_groups[idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to allocate " + "physical device group Terminator object %d", + idx); + total_count = idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(new_phys_dev_groups[idx], group_properties, sizeof(VkPhysicalDeviceGroupPropertiesKHR)); + } + + ++idx; + } +#endif + + // Copy or create everything to fill the new array of physical device groups + for (uint32_t new_idx = 0; new_idx < total_count; new_idx++) { + // Skip groups which have been included through sorting + if (local_phys_dev_group_sorted[new_idx] || local_phys_dev_groups[new_idx].physicalDeviceCount == 0) { + continue; + } + + // Check if this physical device group with the same contents is already in the old buffer + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_term; old_idx++) { + if (local_phys_dev_groups[new_idx].physicalDeviceCount == inst->phys_dev_groups_term[old_idx]->physicalDeviceCount) { + bool found_all_gpus = true; + for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_term[old_idx]->physicalDeviceCount; old_gpu++) { + bool found_gpu = false; + for (uint32_t new_gpu = 0; new_gpu < local_phys_dev_groups[new_idx].physicalDeviceCount; new_gpu++) { + if (local_phys_dev_groups[new_idx].physicalDevices[new_gpu] == inst->phys_dev_groups_term[old_idx]->physicalDevices[old_gpu]) { + found_gpu = true; + break; + } + } + + if (!found_gpu) { + found_all_gpus = false; + break; + } + } + if (!found_all_gpus) { + continue; + } else { + new_phys_dev_groups[idx] = inst->phys_dev_groups_term[old_idx]; + break; + } + } + } + + // If this physical device group isn't in the old buffer, create it + if (NULL == new_phys_dev_groups[idx]) { + new_phys_dev_groups[idx] = (VkPhysicalDeviceGroupPropertiesKHR *)loader_instance_heap_alloc( + inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_dev_groups[idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTermPhysDevGroups: Failed to allocate " + "physical device group Terminator object %d", + idx); + total_count = idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(new_phys_dev_groups[idx], &local_phys_dev_groups[new_idx], + sizeof(VkPhysicalDeviceGroupPropertiesKHR)); + } + + ++idx; + } + +out: + + if (VK_SUCCESS != res) { + if (NULL != new_phys_dev_groups) { + for (uint32_t i = 0; i < total_count; i++) { + loader_instance_heap_free(inst, new_phys_dev_groups[i]); + } + loader_instance_heap_free(inst, new_phys_dev_groups); + } + total_count = 0; + } else { + // Free everything that didn't carry over to the new array of + // physical device groups + if (NULL != inst->phys_dev_groups_term) { + for (uint32_t i = 0; i < inst->phys_dev_group_count_term; i++) { + bool found = false; + for (uint32_t j = 0; j < total_count; j++) { + if (inst->phys_dev_groups_term[i] == new_phys_dev_groups[j]) { + found = true; + break; + } + } + if (!found) { + loader_instance_heap_free(inst, inst->phys_dev_groups_term[i]); + } + } + loader_instance_heap_free(inst, inst->phys_dev_groups_term); + } + + // Swap in the new physical device group list + inst->phys_dev_group_count_term = total_count; + inst->phys_dev_groups_term = new_phys_dev_groups; + } + + if (sorted_phys_dev_array != NULL) { + for (uint32_t i = 0; i < sorted_count; ++i) { + if (sorted_phys_dev_array[i].device_count > 0 && sorted_phys_dev_array[i].physical_devices != NULL) { + loader_instance_heap_free(inst, sorted_phys_dev_array[i].physical_devices); + } + } + loader_instance_heap_free(inst, sorted_phys_dev_array); + } + + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups( + VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) { + struct loader_instance *inst = (struct loader_instance *)instance; + VkResult res = VK_SUCCESS; + + // Always call the setup loader terminator physical device groups because they may + // have changed at any point. + res = setupLoaderTermPhysDevGroups(inst); + if (VK_SUCCESS != res) { + goto out; + } + + uint32_t copy_count = inst->phys_dev_group_count_term; + if (NULL != pPhysicalDeviceGroupProperties) { + if (copy_count > *pPhysicalDeviceGroupCount) { + copy_count = *pPhysicalDeviceGroupCount; + res = VK_INCOMPLETE; + } + + for (uint32_t i = 0; i < copy_count; i++) { + memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_term[i], + sizeof(VkPhysicalDeviceGroupPropertiesKHR)); + } + } + + *pPhysicalDeviceGroupCount = copy_count; + +out: + + return res; +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2 *pFeatures) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceFeatures2 fpGetPhysicalDeviceFeatures2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceFeatures2 = icd_term->dispatch.GetPhysicalDeviceFeatures2KHR; + } else { + fpGetPhysicalDeviceFeatures2 = icd_term->dispatch.GetPhysicalDeviceFeatures2; + } + + if (fpGetPhysicalDeviceFeatures2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceFeatures2(phys_dev_term->phys_dev, pFeatures); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceFeatures2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceFeatures", + icd_term->scanned_icd->lib_name); + + // Write to the VkPhysicalDeviceFeatures2 struct + icd_term->dispatch.GetPhysicalDeviceFeatures(phys_dev_term->phys_dev, &pFeatures->features); + + const VkBaseInStructure *pNext = pFeatures->pNext; + while (pNext != NULL) { + switch (pNext->sType) { + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: { + // Skip the check if VK_KHR_multiview is enabled because it's a device extension + // Write to the VkPhysicalDeviceMultiviewFeaturesKHR struct + VkPhysicalDeviceMultiviewFeaturesKHR *multiview_features = (VkPhysicalDeviceMultiviewFeaturesKHR *)pNext; + multiview_features->multiview = VK_FALSE; + multiview_features->multiviewGeometryShader = VK_FALSE; + multiview_features->multiviewTessellationShader = VK_FALSE; + + pNext = multiview_features->pNext; + break; + } + default: { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceFeatures2: Emulation found unrecognized structure type in pFeatures->pNext - " + "this struct will be ignored"); + + pNext = pNext->pNext; + break; + } + } + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2 *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2KHR; + } else { + fpGetPhysicalDeviceProperties2 = icd_term->dispatch.GetPhysicalDeviceProperties2; + } + + if (fpGetPhysicalDeviceProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceProperties2(phys_dev_term->phys_dev, pProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceProperties", + icd_term->scanned_icd->lib_name); + + // Write to the VkPhysicalDeviceProperties2 struct + icd_term->dispatch.GetPhysicalDeviceProperties(phys_dev_term->phys_dev, &pProperties->properties); + + const VkBaseInStructure *pNext = pProperties->pNext; + while (pNext != NULL) { + switch (pNext->sType) { + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: { + VkPhysicalDeviceIDPropertiesKHR *id_properties = (VkPhysicalDeviceIDPropertiesKHR *)pNext; + + // Verify that "VK_KHR_external_memory_capabilities" is enabled + if (icd_term->this_instance->enabled_known_extensions.khr_external_memory_capabilities) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceProperties2: Emulation cannot generate unique IDs for struct " + "VkPhysicalDeviceIDProperties - setting IDs to zero instead"); + + // Write to the VkPhysicalDeviceIDPropertiesKHR struct + memset(id_properties->deviceUUID, 0, VK_UUID_SIZE); + memset(id_properties->driverUUID, 0, VK_UUID_SIZE); + id_properties->deviceLUIDValid = VK_FALSE; + } + + pNext = id_properties->pNext; + break; + } + default: { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceProperties2KHR: Emulation found unrecognized structure type in " + "pProperties->pNext - this struct will be ignored"); + + pNext = pNext->pNext; + break; + } + } + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format, + VkFormatProperties2 *pFormatProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceFormatProperties2 fpGetPhysicalDeviceFormatProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceFormatProperties2KHR; + } else { + fpGetPhysicalDeviceFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceFormatProperties2; + } + + if (fpGetPhysicalDeviceFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceFormatProperties2(phys_dev_term->phys_dev, format, pFormatProperties); + } else { + // Emulate the call + loader_log( + icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceFormatProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceFormatProperties", + icd_term->scanned_icd->lib_name); + + // Write to the VkFormatProperties2 struct + icd_term->dispatch.GetPhysicalDeviceFormatProperties(phys_dev_term->phys_dev, format, &pFormatProperties->formatProperties); + + if (pFormatProperties->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceFormatProperties2: Emulation found unrecognized structure type in " + "pFormatProperties->pNext - this struct will be ignored"); + } + } +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2KHR *pImageFormatInfo, + VkImageFormatProperties2KHR *pImageFormatProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceImageFormatProperties2 fpGetPhysicalDeviceImageFormatProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceImageFormatProperties2KHR; + } else { + fpGetPhysicalDeviceImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceImageFormatProperties2; + } + + if (fpGetPhysicalDeviceImageFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + return fpGetPhysicalDeviceImageFormatProperties2(phys_dev_term->phys_dev, pImageFormatInfo, pImageFormatProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceImageFormatProperties2: Emulating call in ICD \"%s\" using " + "vkGetPhysicalDeviceImageFormatProperties", + icd_term->scanned_icd->lib_name); + + // If there is more info in either pNext, then this is unsupported + if (pImageFormatInfo->pNext != NULL || pImageFormatProperties->pNext != NULL) { + return VK_ERROR_FORMAT_NOT_SUPPORTED; + } + + // Write to the VkImageFormatProperties2KHR struct + return icd_term->dispatch.GetPhysicalDeviceImageFormatProperties( + phys_dev_term->phys_dev, pImageFormatInfo->format, pImageFormatInfo->type, pImageFormatInfo->tiling, + pImageFormatInfo->usage, pImageFormatInfo->flags, &pImageFormatProperties->imageFormatProperties); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount, VkQueueFamilyProperties2KHR *pQueueFamilyProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 fpGetPhysicalDeviceQueueFamilyProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceQueueFamilyProperties2 = icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties2KHR; + } else { + fpGetPhysicalDeviceQueueFamilyProperties2 = icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties2; + } + + if (fpGetPhysicalDeviceQueueFamilyProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceQueueFamilyProperties2(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceQueueFamilyProperties2: Emulating call in ICD \"%s\" using " + "vkGetPhysicalDeviceQueueFamilyProperties", + icd_term->scanned_icd->lib_name); + + if (pQueueFamilyProperties == NULL || *pQueueFamilyPropertyCount == 0) { + // Write to pQueueFamilyPropertyCount + icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, NULL); + } else { + // Allocate a temporary array for the output of the old function + VkQueueFamilyProperties *properties = loader_stack_alloc(*pQueueFamilyPropertyCount * sizeof(VkQueueFamilyProperties)); + if (properties == NULL) { + *pQueueFamilyPropertyCount = 0; + loader_log( + icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkGetPhysicalDeviceQueueFamilyProperties2: Out of memory - Failed to allocate array for loader emulation."); + return; + } + + icd_term->dispatch.GetPhysicalDeviceQueueFamilyProperties(phys_dev_term->phys_dev, pQueueFamilyPropertyCount, + properties); + for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; ++i) { + // Write to the VkQueueFamilyProperties2KHR struct + memcpy(&pQueueFamilyProperties[i].queueFamilyProperties, &properties[i], sizeof(VkQueueFamilyProperties)); + + if (pQueueFamilyProperties[i].pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceQueueFamilyProperties2: Emulation found unrecognized structure type in " + "pQueueFamilyProperties[%d].pNext - this struct will be ignored", + i); + } + } + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2 *pMemoryProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceMemoryProperties2 fpGetPhysicalDeviceMemoryProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceMemoryProperties2 = icd_term->dispatch.GetPhysicalDeviceMemoryProperties2KHR; + } else { + fpGetPhysicalDeviceMemoryProperties2 = icd_term->dispatch.GetPhysicalDeviceMemoryProperties2; + } + + if (fpGetPhysicalDeviceMemoryProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceMemoryProperties2(phys_dev_term->phys_dev, pMemoryProperties); + } else { + // Emulate the call + loader_log( + icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceMemoryProperties2: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceMemoryProperties", + icd_term->scanned_icd->lib_name); + + // Write to the VkPhysicalDeviceMemoryProperties2 struct + icd_term->dispatch.GetPhysicalDeviceMemoryProperties(phys_dev_term->phys_dev, &pMemoryProperties->memoryProperties); + + if (pMemoryProperties->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceMemoryProperties2: Emulation found unrecognized structure type in " + "pMemoryProperties->pNext - this struct will be ignored"); + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount, + VkSparseImageFormatProperties2KHR *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 fpGetPhysicalDeviceSparseImageFormatProperties2 = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + fpGetPhysicalDeviceSparseImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties2KHR; + } else { + fpGetPhysicalDeviceSparseImageFormatProperties2 = icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties2; + } + + if (fpGetPhysicalDeviceSparseImageFormatProperties2 != NULL || !inst->enabled_known_extensions.khr_get_physical_device_properties2) { + // Pass the call to the driver + fpGetPhysicalDeviceSparseImageFormatProperties2(phys_dev_term->phys_dev, pFormatInfo, pPropertyCount, pProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulating call in ICD \"%s\" using " + "vkGetPhysicalDeviceSparseImageFormatProperties", + icd_term->scanned_icd->lib_name); + + if (pFormatInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulation found unrecognized structure type in " + "pFormatInfo->pNext - this struct will be ignored"); + } + + if (pProperties == NULL || *pPropertyCount == 0) { + // Write to pPropertyCount + icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties( + phys_dev_term->phys_dev, pFormatInfo->format, pFormatInfo->type, pFormatInfo->samples, pFormatInfo->usage, + pFormatInfo->tiling, pPropertyCount, NULL); + } else { + // Allocate a temporary array for the output of the old function + VkSparseImageFormatProperties *properties = + loader_stack_alloc(*pPropertyCount * sizeof(VkSparseImageMemoryRequirements)); + if (properties == NULL) { + *pPropertyCount = 0; + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkGetPhysicalDeviceSparseImageFormatProperties2: Out of memory - Failed to allocate array for " + "loader emulation."); + return; + } + + icd_term->dispatch.GetPhysicalDeviceSparseImageFormatProperties( + phys_dev_term->phys_dev, pFormatInfo->format, pFormatInfo->type, pFormatInfo->samples, pFormatInfo->usage, + pFormatInfo->tiling, pPropertyCount, properties); + for (uint32_t i = 0; i < *pPropertyCount; ++i) { + // Write to the VkSparseImageFormatProperties2KHR struct + memcpy(&pProperties[i].properties, &properties[i], sizeof(VkSparseImageFormatProperties)); + + if (pProperties[i].pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSparseImageFormatProperties2: Emulation found unrecognized structure type in " + "pProperties[%d].pNext - this struct will be ignored", + i); + } + } + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, + VkExternalBufferProperties *pExternalBufferProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceExternalBufferProperties fpGetPhysicalDeviceExternalBufferProperties = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_external_memory_capabilities) { + fpGetPhysicalDeviceExternalBufferProperties = icd_term->dispatch.GetPhysicalDeviceExternalBufferPropertiesKHR; + } else { + fpGetPhysicalDeviceExternalBufferProperties = icd_term->dispatch.GetPhysicalDeviceExternalBufferProperties; + } + + if (fpGetPhysicalDeviceExternalBufferProperties || !inst->enabled_known_extensions.khr_external_memory_capabilities) { + // Pass the call to the driver + fpGetPhysicalDeviceExternalBufferProperties(phys_dev_term->phys_dev, pExternalBufferInfo, pExternalBufferProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalBufferProperties: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + if (pExternalBufferInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalBufferProperties: Emulation found unrecognized structure type in " + "pExternalBufferInfo->pNext - this struct will be ignored"); + } + + // Fill in everything being unsupported + memset(&pExternalBufferProperties->externalMemoryProperties, 0, sizeof(VkExternalMemoryPropertiesKHR)); + + if (pExternalBufferProperties->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalBufferProperties: Emulation found unrecognized structure type in " + "pExternalBufferProperties->pNext - this struct will be ignored"); + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo, + VkExternalSemaphoreProperties *pExternalSemaphoreProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties fpGetPhysicalDeviceExternalSemaphoreProperties = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_external_semaphore_capabilities) { + fpGetPhysicalDeviceExternalSemaphoreProperties = icd_term->dispatch.GetPhysicalDeviceExternalSemaphorePropertiesKHR; + } else { + fpGetPhysicalDeviceExternalSemaphoreProperties = icd_term->dispatch.GetPhysicalDeviceExternalSemaphoreProperties; + } + + if (fpGetPhysicalDeviceExternalSemaphoreProperties != NULL || !inst->enabled_known_extensions.khr_external_semaphore_capabilities) { + // Pass the call to the driver + fpGetPhysicalDeviceExternalSemaphoreProperties(phys_dev_term->phys_dev, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulating call in ICD \"%s\"", + icd_term->scanned_icd->lib_name); + + if (pExternalSemaphoreInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulation found unrecognized structure type in " + "pExternalSemaphoreInfo->pNext - this struct will be ignored"); + } + + // Fill in everything being unsupported + pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0; + pExternalSemaphoreProperties->compatibleHandleTypes = 0; + pExternalSemaphoreProperties->externalSemaphoreFeatures = 0; + + if (pExternalSemaphoreProperties->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalSemaphoreProperties: Emulation found unrecognized structure type in " + "pExternalSemaphoreProperties->pNext - this struct will be ignored"); + } + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, + VkExternalFenceProperties *pExternalFenceProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + const struct loader_instance *inst = icd_term->this_instance; + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + PFN_vkGetPhysicalDeviceExternalFenceProperties fpGetPhysicalDeviceExternalFenceProperties = NULL; + if (inst != NULL && inst->enabled_known_extensions.khr_external_fence_capabilities) { + fpGetPhysicalDeviceExternalFenceProperties = icd_term->dispatch.GetPhysicalDeviceExternalFencePropertiesKHR; + } else { + fpGetPhysicalDeviceExternalFenceProperties = icd_term->dispatch.GetPhysicalDeviceExternalFenceProperties; + } + + if (fpGetPhysicalDeviceExternalFenceProperties != NULL || !inst->enabled_known_extensions.khr_external_fence_capabilities) { + // Pass the call to the driver + fpGetPhysicalDeviceExternalFenceProperties(phys_dev_term->phys_dev, pExternalFenceInfo, pExternalFenceProperties); + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalFenceProperties: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + if (pExternalFenceInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalFenceProperties: Emulation found unrecognized structure type in " + "pExternalFenceInfo->pNext - this struct will be ignored"); + } + + // Fill in everything being unsupported + pExternalFenceProperties->exportFromImportedHandleTypes = 0; + pExternalFenceProperties->compatibleHandleTypes = 0; + pExternalFenceProperties->externalFenceFeatures = 0; + + if (pExternalFenceProperties->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceExternalFenceProperties: Emulation found unrecognized structure type in " + "pExternalFenceProperties->pNext - this struct will be ignored"); + } + } +} diff --git a/src/third_party/vulkan/loader/loader.h b/src/third_party/vulkan/loader/loader.h new file mode 100644 index 0000000..b263955 --- /dev/null +++ b/src/third_party/vulkan/loader/loader.h @@ -0,0 +1,547 @@ +/* + * + * Copyright (c) 2014-2019 The Khronos Group Inc. + * Copyright (c) 2014-2019 Valve Corporation + * Copyright (c) 2014-2019 LunarG, Inc. + * Copyright (C) 2015 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Jon Ashburn + * Author: Courtney Goeltzenleuchter + * Author: Chia-I Wu + * Author: Chia-I Wu + * Author: Mark Lobodzinski + * Author: Lenny Komow + * + */ + +#ifndef LOADER_H +#define LOADER_H + +#include "../vulkan.h" +#include "vk_loader_platform.h" +#include "vk_loader_layer.h" +#include "../vk_layer.h" +#include "../vk_icd.h" +#include +#include "vk_layer_dispatch_table.h" +#include "vk_loader_extensions.h" + +#if defined(__GNUC__) && __GNUC__ >= 4 +#define LOADER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define LOADER_EXPORT __attribute__((visibility("default"))) +#else +#define LOADER_EXPORT +#endif + +// A debug option to disable allocators at compile time to investigate future issues. +#define DEBUG_DISABLE_APP_ALLOCATORS 0 + +#define MAX_STRING_SIZE 1024 + +// This is defined in vk_layer.h, but if there's problems we need to create the define +// here. +#ifndef MAX_NUM_UNKNOWN_EXTS +#define MAX_NUM_UNKNOWN_EXTS 250 +#endif + +enum layer_type_flags { + VK_LAYER_TYPE_FLAG_INSTANCE_LAYER = 0x1, // If not set, indicates Device layer + VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER = 0x2, // If not set, indicates Implicit layer + VK_LAYER_TYPE_FLAG_META_LAYER = 0x4, // If not set, indicates standard layer +}; + +typedef enum VkStringErrorFlagBits { + VK_STRING_ERROR_NONE = 0x00000000, + VK_STRING_ERROR_LENGTH = 0x00000001, + VK_STRING_ERROR_BAD_DATA = 0x00000002, + VK_STRING_ERROR_NULL_PTR = 0x00000004, +} VkStringErrorFlagBits; +typedef VkFlags VkStringErrorFlags; + +static const int MaxLoaderStringLength = 256; +static const char UTF8_ONE_BYTE_CODE = 0xC0; +static const char UTF8_ONE_BYTE_MASK = 0xE0; +static const char UTF8_TWO_BYTE_CODE = 0xE0; +static const char UTF8_TWO_BYTE_MASK = 0xF0; +static const char UTF8_THREE_BYTE_CODE = 0xF0; +static const char UTF8_THREE_BYTE_MASK = 0xF8; +static const char UTF8_DATA_BYTE_CODE = 0x80; +static const char UTF8_DATA_BYTE_MASK = 0xC0; + +// form of all dynamic lists/arrays +// only the list element should be changed +struct loader_generic_list { + size_t capacity; + uint32_t count; + void *list; +}; + +struct loader_extension_list { + size_t capacity; + uint32_t count; + VkExtensionProperties *list; +}; + +struct loader_dev_ext_props { + VkExtensionProperties props; + uint32_t entrypoint_count; + char **entrypoints; +}; + +struct loader_device_extension_list { + size_t capacity; + uint32_t count; + struct loader_dev_ext_props *list; +}; + +struct loader_name_value { + char name[MAX_STRING_SIZE]; + char value[MAX_STRING_SIZE]; +}; + +struct loader_layer_functions { + char str_gipa[MAX_STRING_SIZE]; + char str_gdpa[MAX_STRING_SIZE]; + char str_negotiate_interface[MAX_STRING_SIZE]; + PFN_vkNegotiateLoaderLayerInterfaceVersion negotiate_layer_interface; + PFN_vkGetInstanceProcAddr get_instance_proc_addr; + PFN_vkGetDeviceProcAddr get_device_proc_addr; + PFN_GetPhysicalDeviceProcAddr get_physical_device_proc_addr; +}; + +struct loader_override_expiration { + uint16_t year; + uint8_t month; + uint8_t day; + uint8_t hour; + uint8_t minute; +}; + +struct loader_layer_properties { + VkLayerProperties info; + enum layer_type_flags type_flags; + uint32_t interface_version; // PFN_vkNegotiateLoaderLayerInterfaceVersion + char lib_name[MAX_STRING_SIZE]; + loader_platform_dl_handle lib_handle; + struct loader_layer_functions functions; + struct loader_extension_list instance_extension_list; + struct loader_device_extension_list device_extension_list; + struct loader_name_value disable_env_var; + struct loader_name_value enable_env_var; + uint32_t num_component_layers; + char (*component_layer_names)[MAX_STRING_SIZE]; + struct { + char enumerate_instance_extension_properties[MAX_STRING_SIZE]; + char enumerate_instance_layer_properties[MAX_STRING_SIZE]; + char enumerate_instance_version[MAX_STRING_SIZE]; + } pre_instance_functions; + uint32_t num_override_paths; + char (*override_paths)[MAX_STRING_SIZE]; + bool is_override; + bool has_expiration; + struct loader_override_expiration expiration; + bool keep; + uint32_t num_blacklist_layers; + char (*blacklist_layer_names)[MAX_STRING_SIZE]; + uint32_t num_app_key_paths; + char (*app_key_paths)[MAX_STRING_SIZE]; +}; + +struct loader_layer_list { + size_t capacity; + uint32_t count; + struct loader_layer_properties *list; +}; + +struct loader_dispatch_hash_list { + size_t capacity; + uint32_t count; + uint32_t *index; // index into the dev_ext dispatch table +}; + +// loader_dispatch_hash_entry and loader_dev_ext_dispatch_table.dev_ext have +// one to one correspondence; one loader_dispatch_hash_entry for one dev_ext +// dispatch entry. +// Also have a one to one correspondence with functions in dev_ext_trampoline.c +struct loader_dispatch_hash_entry { + char *func_name; + struct loader_dispatch_hash_list list; // to handle hashing collisions +}; + +typedef VkResult(VKAPI_PTR *PFN_vkDevExt)(VkDevice device); +struct loader_dev_ext_dispatch_table { + PFN_vkDevExt dev_ext[MAX_NUM_UNKNOWN_EXTS]; +}; + +struct loader_dev_dispatch_table { + VkLayerDispatchTable core_dispatch; + struct loader_dev_ext_dispatch_table ext_dispatch; +}; + +// per CreateDevice structure +struct loader_device { + struct loader_dev_dispatch_table loader_dispatch; + VkDevice chain_device; // device object from the dispatch chain + VkDevice icd_device; // device object from the icd + struct loader_physical_device_term *phys_dev_term; + + // List of activated layers. + // app_ is the version based on exactly what the application asked for. + // This is what must be returned to the application on Enumerate calls. + // expanded_ is the version based on expanding meta-layers into their + // individual component layers. This is what is used internally. + struct loader_layer_list app_activated_layer_list; + struct loader_layer_list expanded_activated_layer_list; + + VkAllocationCallbacks alloc_callbacks; + + // List of activated device extensions that have terminators implemented in the loader + struct { + bool khr_swapchain_enabled; + bool khr_display_swapchain_enabled; + bool khr_device_group_enabled; + bool ext_debug_marker_enabled; + bool ext_debug_utils_enabled; + bool ext_full_screen_exclusive_enabled; + } extensions; + + struct loader_device *next; +}; + +// Per ICD information + +// Per ICD structure +struct loader_icd_term { + // pointers to find other structs + const struct loader_scanned_icd *scanned_icd; + const struct loader_instance *this_instance; + struct loader_device *logical_device_list; + VkInstance instance; // instance object from the icd + struct loader_icd_term_dispatch dispatch; + + struct loader_icd_term *next; + + PFN_PhysDevExt phys_dev_ext[MAX_NUM_UNKNOWN_EXTS]; +}; + +// Per ICD library structure +struct loader_icd_tramp_list { + size_t capacity; + uint32_t count; + struct loader_scanned_icd *scanned_list; +}; + +struct loader_instance_dispatch_table { + VkLayerInstanceDispatchTable layer_inst_disp; // must be first entry in structure + + // Physical device functions unknown to the loader + PFN_PhysDevExt phys_dev_ext[MAX_NUM_UNKNOWN_EXTS]; +}; + +// Per instance structure +struct loader_instance { + struct loader_instance_dispatch_table *disp; // must be first entry in structure + + // Vulkan API version the app is intending to use. + uint16_t app_api_major_version; + uint16_t app_api_minor_version; + + // We need to manually track physical devices over time. If the user + // re-queries the information, we don't want to delete old data or + // create new data unless necessary. + uint32_t total_gpu_count; + uint32_t phys_dev_count_term; + struct loader_physical_device_term **phys_devs_term; + uint32_t phys_dev_count_tramp; + struct loader_physical_device_tramp **phys_devs_tramp; + + // We also need to manually track physical device groups, but we don't need + // loader specific structures since we have that content in the physical + // device stored internal to the public structures. + uint32_t phys_dev_group_count_term; + struct VkPhysicalDeviceGroupProperties **phys_dev_groups_term; + uint32_t phys_dev_group_count_tramp; + struct VkPhysicalDeviceGroupProperties **phys_dev_groups_tramp; + + struct loader_instance *next; + + uint32_t total_icd_count; + struct loader_icd_term *icd_terms; + struct loader_icd_tramp_list icd_tramp_list; + + struct loader_dispatch_hash_entry dev_ext_disp_hash[MAX_NUM_UNKNOWN_EXTS]; + struct loader_dispatch_hash_entry phys_dev_ext_disp_hash[MAX_NUM_UNKNOWN_EXTS]; + + struct loader_msg_callback_map_entry *icd_msg_callback_map; + + struct loader_layer_list instance_layer_list; + bool override_layer_present; + + // List of activated layers. + // app_ is the version based on exactly what the application asked for. + // This is what must be returned to the application on Enumerate calls. + // expanded_ is the version based on expanding meta-layers into their + // individual component layers. This is what is used internally. + struct loader_layer_list app_activated_layer_list; + struct loader_layer_list expanded_activated_layer_list; + + VkInstance instance; // layers/ICD instance returned to trampoline + + struct loader_extension_list ext_list; // icds and loaders extensions + union loader_instance_extension_enables enabled_known_extensions; + + VkLayerDbgFunctionNode *DbgFunctionHead; + uint32_t num_tmp_report_callbacks; + VkDebugReportCallbackCreateInfoEXT *tmp_report_create_infos; + VkDebugReportCallbackEXT *tmp_report_callbacks; + uint32_t num_tmp_messengers; + VkDebugUtilsMessengerCreateInfoEXT *tmp_messenger_create_infos; + VkDebugUtilsMessengerEXT *tmp_messengers; + + VkAllocationCallbacks alloc_callbacks; + + bool wsi_surface_enabled; +#ifdef VK_USE_PLATFORM_WIN32_KHR + bool wsi_win32_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + bool wsi_wayland_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_XCB_KHR + bool wsi_xcb_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_XLIB_KHR + bool wsi_xlib_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + bool wsi_directfb_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_ANDROID_KHR + bool wsi_android_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_MACOS_MVK + bool wsi_macos_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_IOS_MVK + bool wsi_ios_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_GGP + bool wsi_ggp_surface_enabled; +#endif + bool wsi_headless_surface_enabled; +#if defined(VK_USE_PLATFORM_METAL_EXT) + bool wsi_metal_surface_enabled; +#endif +#ifdef VK_USE_PLATFORM_FUCHSIA + bool wsi_imagepipe_surface_enabled; +#endif + bool wsi_display_enabled; + bool wsi_display_props2_enabled; +}; + +// VkPhysicalDevice requires special treatment by loader. Firstly, terminator +// code must be able to get the struct loader_icd_term to call into the proper +// driver (multiple ICD/gpu case). This can be accomplished by wrapping the +// created VkPhysicalDevice in loader terminate_EnumeratePhysicalDevices(). +// Secondly, the loader must be able to handle wrapped by layer VkPhysicalDevice +// in trampoline code. This implies, that the loader trampoline code must also +// wrap the VkPhysicalDevice object in trampoline code. Thus, loader has to +// wrap the VkPhysicalDevice created object twice. In trampoline code it can't +// rely on the terminator object wrapping since a layer may also wrap. Since +// trampoline code wraps the VkPhysicalDevice this means all loader trampoline +// code that passes a VkPhysicalDevice should unwrap it. + +// Per enumerated PhysicalDevice structure, used to wrap in trampoline code and +// also same structure used to wrap in terminator code +struct loader_physical_device_tramp { + struct loader_instance_dispatch_table *disp; // must be first entry in structure + struct loader_instance *this_instance; + VkPhysicalDevice phys_dev; // object from layers/loader terminator +}; + +// Per enumerated PhysicalDevice structure, used to wrap in terminator code +struct loader_physical_device_term { + struct loader_instance_dispatch_table *disp; // must be first entry in structure + struct loader_icd_term *this_icd_term; + uint8_t icd_index; + VkPhysicalDevice phys_dev; // object from ICD +}; + +struct loader_struct { + struct loader_instance *instances; +}; + +struct loader_scanned_icd { + char *lib_name; + loader_platform_dl_handle handle; + uint32_t api_version; + uint32_t interface_version; + PFN_vkGetInstanceProcAddr GetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr GetPhysicalDeviceProcAddr; + PFN_vkCreateInstance CreateInstance; + PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties; +#if defined(VK_USE_PLATFORM_WIN32_KHR) + PFN_vk_icdEnumerateAdapterPhysicalDevices EnumerateAdapterPhysicalDevices; +#endif +}; + +static inline struct loader_instance *loader_instance(VkInstance instance) { return (struct loader_instance *)instance; } + +static inline VkPhysicalDevice loader_unwrap_physical_device(VkPhysicalDevice physicalDevice) { + struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice; + return phys_dev->phys_dev; +} + +static inline void loader_set_dispatch(void *obj, const void *data) { *((const void **)obj) = data; } + +static inline VkLayerDispatchTable *loader_get_dispatch(const void *obj) { return *((VkLayerDispatchTable **)obj); } + +static inline struct loader_dev_dispatch_table *loader_get_dev_dispatch(const void *obj) { + return *((struct loader_dev_dispatch_table **)obj); +} + +static inline VkLayerInstanceDispatchTable *loader_get_instance_layer_dispatch(const void *obj) { + return *((VkLayerInstanceDispatchTable **)obj); +} + +static inline struct loader_instance_dispatch_table *loader_get_instance_dispatch(const void *obj) { + return *((struct loader_instance_dispatch_table **)obj); +} + +static inline void loader_init_dispatch(void *obj, const void *data) { +#ifdef DEBUG + assert(valid_loader_magic_value(obj) && + "Incompatible ICD, first dword must be initialized to " + "ICD_LOADER_MAGIC. See loader/README.md for details."); +#endif + + loader_set_dispatch(obj, data); +} + +// Global variables used across files +extern struct loader_struct loader; +extern THREAD_LOCAL_DECL struct loader_instance *tls_instance; +extern loader_platform_thread_mutex loader_lock; +extern loader_platform_thread_mutex loader_json_lock; +extern loader_platform_thread_mutex loader_preload_icd_lock; + +struct loader_msg_callback_map_entry { + VkDebugReportCallbackEXT icd_obj; + VkDebugReportCallbackEXT loader_obj; +}; + +// Helper function definitions +void *loader_instance_heap_alloc(const struct loader_instance *instance, size_t size, VkSystemAllocationScope allocationScope); +void loader_instance_heap_free(const struct loader_instance *instance, void *pMemory); +void *loader_instance_heap_realloc(const struct loader_instance *instance, void *pMemory, size_t orig_size, size_t size, + VkSystemAllocationScope alloc_scope); +void *loader_instance_tls_heap_alloc(size_t size); +void loader_instance_tls_heap_free(void *pMemory); +void *loader_device_heap_alloc(const struct loader_device *device, size_t size, VkSystemAllocationScope allocationScope); +void loader_device_heap_free(const struct loader_device *device, void *pMemory); +void *loader_device_heap_realloc(const struct loader_device *device, void *pMemory, size_t orig_size, size_t size, + VkSystemAllocationScope alloc_scope); + +void loader_log(const struct loader_instance *inst, VkFlags msg_type, int32_t msg_code, const char *format, ...); + +bool compare_vk_extension_properties(const VkExtensionProperties *op1, const VkExtensionProperties *op2); + +VkResult loaderValidateLayers(const struct loader_instance *inst, const uint32_t layer_count, + const char *const *ppEnabledLayerNames, const struct loader_layer_list *list); + +VkResult loader_validate_instance_extensions(struct loader_instance *inst, const struct loader_extension_list *icd_exts, + const struct loader_layer_list *instance_layer, + const VkInstanceCreateInfo *pCreateInfo); + +void loader_initialize(void); +void loader_preload_icds(void); +void loader_unload_preloaded_icds(void); +bool has_vk_extension_property_array(const VkExtensionProperties *vk_ext_prop, const uint32_t count, + const VkExtensionProperties *ext_array); +bool has_vk_extension_property(const VkExtensionProperties *vk_ext_prop, const struct loader_extension_list *ext_list); + +VkResult loader_add_to_ext_list(const struct loader_instance *inst, struct loader_extension_list *ext_list, + uint32_t prop_list_count, const VkExtensionProperties *props); +VkResult loader_add_to_dev_ext_list(const struct loader_instance *inst, struct loader_device_extension_list *ext_list, + const VkExtensionProperties *props, uint32_t entry_count, char **entrys); +VkResult loader_add_device_extensions(const struct loader_instance *inst, + PFN_vkEnumerateDeviceExtensionProperties fpEnumerateDeviceExtensionProperties, + VkPhysicalDevice physical_device, const char *lib_name, + struct loader_extension_list *ext_list); +VkResult loader_init_generic_list(const struct loader_instance *inst, struct loader_generic_list *list_info, size_t element_size); +void loader_destroy_generic_list(const struct loader_instance *inst, struct loader_generic_list *list); +void loaderDestroyLayerList(const struct loader_instance *inst, struct loader_device *device, struct loader_layer_list *layer_list); +void loaderDeleteLayerListAndProperties(const struct loader_instance *inst, struct loader_layer_list *layer_list); +VkResult loaderAddLayerNameToList(const struct loader_instance *inst, const char *name, const enum layer_type_flags type_flags, + const struct loader_layer_list *source_list, struct loader_layer_list *target_list, + struct loader_layer_list *expanded_target_list); +void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list); +VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list); +void loaderScanForLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers); +void loaderScanForImplicitLayers(struct loader_instance *inst, struct loader_layer_list *instance_layers); +bool loaderImplicitLayerIsEnabled(const struct loader_instance *inst, const struct loader_layer_properties *prop); +VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list, + struct loader_extension_list *inst_exts); +struct loader_icd_term *loader_get_icd_and_device(const void *device, struct loader_device **found_dev, uint32_t *icd_index); +void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev); +void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName); +void *loader_get_dev_ext_trampoline(uint32_t index); +bool loader_phys_dev_ext_gpa(struct loader_instance *inst, const char *funcName, bool perform_checking, void **tramp_addr, + void **term_addr); +void *loader_get_phys_dev_ext_tramp(uint32_t index); +void *loader_get_phys_dev_ext_termin(uint32_t index); +struct loader_instance *loader_get_instance(const VkInstance instance); +void loaderDeactivateLayers(const struct loader_instance *instance, struct loader_device *device, struct loader_layer_list *list); +struct loader_device *loader_create_logical_device(const struct loader_instance *inst, const VkAllocationCallbacks *pAllocator); +void loader_add_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, + struct loader_device *found_dev); +void loader_remove_logical_device(const struct loader_instance *inst, struct loader_icd_term *icd_term, + struct loader_device *found_dev, const VkAllocationCallbacks *pAllocator); +// NOTE: Outside of loader, this entry-point is only provided for error +// cleanup. +void loader_destroy_logical_device(const struct loader_instance *inst, struct loader_device *dev, + const VkAllocationCallbacks *pAllocator); + +VkResult loaderEnableInstanceLayers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo, + const struct loader_layer_list *instance_layers); + +VkResult loader_create_instance_chain(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, + struct loader_instance *inst, VkInstance *created_instance); + +void loaderActivateInstanceLayerExtensions(struct loader_instance *inst, VkInstance created_inst); + +VKAPI_ATTR VkResult VKAPI_CALL loader_layer_create_device(VkInstance instance, VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, + PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA); +VKAPI_ATTR void VKAPI_CALL loader_layer_destroy_device(VkDevice device, const VkAllocationCallbacks *pAllocator, + PFN_vkDestroyDevice destroyFunction); + +VkResult loader_create_device_chain(const VkPhysicalDevice pd, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, const struct loader_instance *inst, + struct loader_device *dev, PFN_vkGetInstanceProcAddr callingLayer, + PFN_vkGetDeviceProcAddr *layerNextGDPA); + +VkResult loader_validate_device_extensions(struct loader_instance *this_instance, + const struct loader_layer_list *activated_device_layers, + const struct loader_extension_list *icd_exts, const VkDeviceCreateInfo *pCreateInfo); + +VkResult setupLoaderTrampPhysDevs(VkInstance instance); +VkResult setupLoaderTermPhysDevs(struct loader_instance *inst); + +VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array); + +#endif // LOADER_H diff --git a/src/third_party/vulkan/loader/murmurhash.c b/src/third_party/vulkan/loader/murmurhash.c new file mode 100644 index 0000000..40f0d5e --- /dev/null +++ b/src/third_party/vulkan/loader/murmurhash.c @@ -0,0 +1,98 @@ + +/** + * `murmurhash.h' - murmurhash + * + * copyright (c) 2014 joseph werle + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and/or associated documentation files (the "Materials"), to + * deal in the Materials without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Materials, and to permit persons to whom the Materials are + * furnished to do so, subject to the following conditions: + * + * The above copyright notice(s) and this permission notice shall be included in + * all copies or substantial portions of the Materials. + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE + * USE OR OTHER DEALINGS IN THE MATERIALS. + */ + +#include +#include +#include +#include "murmurhash.h" + +uint32_t murmurhash(const char *key, size_t len, uint32_t seed) { + uint32_t c1 = 0xcc9e2d51; + uint32_t c2 = 0x1b873593; + uint32_t r1 = 15; + uint32_t r2 = 13; + uint32_t m = 5; + uint32_t n = 0xe6546b64; + uint32_t h = 0; + uint32_t k = 0; + uint8_t *d = (uint8_t *)key; // 32 bit extract from `key' + const uint32_t *chunks = NULL; + const uint8_t *tail = NULL; // tail - last 8 bytes + int i = 0; + int l = (int)len / 4; // chunk length + + h = seed; + + chunks = (const uint32_t *)(d + l * 4); // body + tail = (const uint8_t *)(d + l * 4); // last 8 byte chunk of `key' + + // for each 4 byte chunk of `key' + for (i = -l; i != 0; ++i) { + // next 4 byte chunk of `key' + k = chunks[i]; + + // encode next 4 byte chunk of `key' + k *= c1; + k = (k << r1) | (k >> (32 - r1)); + k *= c2; + + // append to hash + h ^= k; + h = (h << r2) | (h >> (32 - r2)); + h = h * m + n; + } + + k = 0; + + // remainder + switch (len & 3) { // `len % 4' + case 3: + k ^= (tail[2] << 16); + // fall through + case 2: + k ^= (tail[1] << 8); + // fall through + case 1: + k ^= tail[0]; + k *= c1; + k = (k << r1) | (k >> (32 - r1)); + k *= c2; + h ^= k; + } + + h ^= len; + + h ^= (h >> 16); + h *= 0x85ebca6b; + h ^= (h >> 13); + h *= 0xc2b2ae35; + h ^= (h >> 16); + + return h; +} diff --git a/src/third_party/vulkan/loader/murmurhash.h b/src/third_party/vulkan/loader/murmurhash.h new file mode 100644 index 0000000..775532e --- /dev/null +++ b/src/third_party/vulkan/loader/murmurhash.h @@ -0,0 +1,52 @@ + +/** + * `murmurhash.h' - murmurhash + * + * copyright (c) 2014 joseph werle + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and/or associated documentation files (the "Materials"), to + * deal in the Materials without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Materials, and to permit persons to whom the Materials are + * furnished to do so, subject to the following conditions: + * + * The above copyright notice(s) and this permission notice shall be included in + * all copies or substantial portions of the Materials. + * + * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE + * USE OR OTHER DEALINGS IN THE MATERIALS. + */ + +#ifndef MURMURHASH_H +#define MURMURHASH_H 1 + +#include + +#define MURMURHASH_VERSION "0.0.3" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Returns a murmur hash of `key' based on `seed' + * using the MurmurHash3 algorithm + */ + +uint32_t murmurhash(const char *key, size_t len, uint32_t seed); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/loader/phys_dev_ext.c b/src/third_party/vulkan/loader/phys_dev_ext.c new file mode 100644 index 0000000..91e0ef8 --- /dev/null +++ b/src/third_party/vulkan/loader/phys_dev_ext.c @@ -0,0 +1,1056 @@ +/* + * + * Copyright (c) 2016-17 The Khronos Group Inc. + * Copyright (c) 2016-17 Valve Corporation + * Copyright (c) 2016-17 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Young + * Author: Lenny Komow + * + */ + +// This code is used to enable generic instance extensions which use a physical device +// as the first parameter. If the extension is already known by the loader, it will +// not use this code, but instead use the more direct route. However, if it is +// unknown to the loader, it will use this code. Technically, this is not trampoline +// code since we don't want to optimize it out. + +#include "vk_loader_platform.h" +#include "loader.h" + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC optimize(3) // force gcc to use tail-calls +#endif + +// Declarations for the trampoline +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp0(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp1(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp2(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp3(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp4(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp5(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp6(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp7(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp8(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp9(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp10(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp11(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp12(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp13(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp14(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp15(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp16(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp17(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp18(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp19(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp20(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp21(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp22(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp23(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp24(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp25(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp26(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp27(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp28(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp29(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp30(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp31(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp32(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp33(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp34(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp35(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp36(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp37(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp38(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp39(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp40(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp41(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp42(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp43(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp44(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp45(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp46(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp47(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp48(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp49(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp50(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp51(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp52(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp53(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp54(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp55(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp56(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp57(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp58(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp59(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp60(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp61(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp62(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp63(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp64(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp65(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp66(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp67(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp68(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp69(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp70(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp71(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp72(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp73(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp74(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp75(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp76(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp77(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp78(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp79(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp80(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp81(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp82(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp83(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp84(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp85(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp86(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp87(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp88(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp89(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp90(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp91(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp92(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp93(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp94(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp95(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp96(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp97(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp98(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp99(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp100(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp101(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp102(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp103(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp104(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp105(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp106(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp107(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp108(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp109(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp110(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp111(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp112(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp113(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp114(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp115(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp116(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp117(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp118(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp119(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp120(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp121(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp122(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp123(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp124(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp125(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp126(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp127(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp128(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp129(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp130(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp131(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp132(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp133(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp134(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp135(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp136(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp137(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp138(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp139(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp140(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp141(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp142(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp143(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp144(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp145(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp146(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp147(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp148(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp149(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp150(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp151(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp152(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp153(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp154(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp155(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp156(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp157(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp158(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp159(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp160(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp161(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp162(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp163(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp164(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp165(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp166(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp167(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp168(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp169(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp170(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp171(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp172(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp173(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp174(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp175(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp176(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp177(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp178(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp179(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp180(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp181(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp182(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp183(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp184(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp185(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp186(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp187(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp188(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp189(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp190(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp191(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp192(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp193(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp194(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp195(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp196(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp197(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp198(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp199(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp200(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp201(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp202(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp203(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp204(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp205(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp206(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp207(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp208(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp209(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp210(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp211(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp212(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp213(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp214(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp215(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp216(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp217(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp218(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp219(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp220(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp221(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp222(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp223(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp224(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp225(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp226(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp227(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp228(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp229(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp230(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp231(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp232(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp233(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp234(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp235(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp236(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp237(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp238(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp239(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp240(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp241(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp242(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp243(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp244(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp245(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp246(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp247(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp248(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp249(VkPhysicalDevice); + +// Disable clang-format for lists of macros +// clang-format off + +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin0(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin1(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin2(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin3(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin4(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin5(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin6(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin7(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin8(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin9(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin10(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin11(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin12(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin13(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin14(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin15(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin16(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin17(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin18(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin19(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin20(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin21(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin22(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin23(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin24(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin25(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin26(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin27(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin28(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin29(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin30(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin31(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin32(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin33(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin34(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin35(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin36(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin37(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin38(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin39(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin40(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin41(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin42(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin43(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin44(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin45(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin46(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin47(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin48(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin49(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin50(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin51(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin52(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin53(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin54(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin55(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin56(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin57(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin58(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin59(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin60(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin61(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin62(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin63(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin64(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin65(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin66(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin67(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin68(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin69(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin70(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin71(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin72(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin73(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin74(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin75(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin76(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin77(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin78(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin79(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin80(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin81(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin82(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin83(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin84(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin85(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin86(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin87(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin88(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin89(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin90(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin91(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin92(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin93(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin94(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin95(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin96(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin97(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin98(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin99(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin100(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin101(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin102(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin103(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin104(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin105(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin106(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin107(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin108(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin109(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin110(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin111(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin112(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin113(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin114(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin115(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin116(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin117(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin118(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin119(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin120(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin121(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin122(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin123(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin124(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin125(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin126(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin127(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin128(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin129(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin130(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin131(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin132(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin133(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin134(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin135(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin136(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin137(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin138(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin139(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin140(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin141(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin142(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin143(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin144(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin145(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin146(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin147(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin148(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin149(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin150(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin151(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin152(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin153(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin154(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin155(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin156(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin157(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin158(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin159(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin160(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin161(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin162(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin163(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin164(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin165(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin166(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin167(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin168(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin169(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin170(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin171(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin172(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin173(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin174(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin175(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin176(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin177(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin178(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin179(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin180(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin181(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin182(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin183(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin184(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin185(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin186(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin187(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin188(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin189(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin190(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin191(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin192(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin193(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin194(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin195(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin196(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin197(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin198(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin199(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin200(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin201(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin202(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin203(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin204(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin205(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin206(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin207(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin208(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin209(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin210(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin211(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin212(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin213(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin214(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin215(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin216(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin217(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin218(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin219(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin220(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin221(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin222(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin223(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin224(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin225(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin226(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin227(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin228(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin229(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin230(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin231(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin232(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin233(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin234(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin235(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin236(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin237(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin238(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin239(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin240(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin241(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin242(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin243(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin244(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin245(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin246(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin247(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin248(VkPhysicalDevice); +VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin249(VkPhysicalDevice); + + +void *loader_get_phys_dev_ext_tramp(uint32_t index) { + switch (index) { +#define TRAMP_CASE_HANDLE(num) case num: return vkPhysDevExtTramp##num + TRAMP_CASE_HANDLE(0); + TRAMP_CASE_HANDLE(1); + TRAMP_CASE_HANDLE(2); + TRAMP_CASE_HANDLE(3); + TRAMP_CASE_HANDLE(4); + TRAMP_CASE_HANDLE(5); + TRAMP_CASE_HANDLE(6); + TRAMP_CASE_HANDLE(7); + TRAMP_CASE_HANDLE(8); + TRAMP_CASE_HANDLE(9); + TRAMP_CASE_HANDLE(10); + TRAMP_CASE_HANDLE(11); + TRAMP_CASE_HANDLE(12); + TRAMP_CASE_HANDLE(13); + TRAMP_CASE_HANDLE(14); + TRAMP_CASE_HANDLE(15); + TRAMP_CASE_HANDLE(16); + TRAMP_CASE_HANDLE(17); + TRAMP_CASE_HANDLE(18); + TRAMP_CASE_HANDLE(19); + TRAMP_CASE_HANDLE(20); + TRAMP_CASE_HANDLE(21); + TRAMP_CASE_HANDLE(22); + TRAMP_CASE_HANDLE(23); + TRAMP_CASE_HANDLE(24); + TRAMP_CASE_HANDLE(25); + TRAMP_CASE_HANDLE(26); + TRAMP_CASE_HANDLE(27); + TRAMP_CASE_HANDLE(28); + TRAMP_CASE_HANDLE(29); + TRAMP_CASE_HANDLE(30); + TRAMP_CASE_HANDLE(31); + TRAMP_CASE_HANDLE(32); + TRAMP_CASE_HANDLE(33); + TRAMP_CASE_HANDLE(34); + TRAMP_CASE_HANDLE(35); + TRAMP_CASE_HANDLE(36); + TRAMP_CASE_HANDLE(37); + TRAMP_CASE_HANDLE(38); + TRAMP_CASE_HANDLE(39); + TRAMP_CASE_HANDLE(40); + TRAMP_CASE_HANDLE(41); + TRAMP_CASE_HANDLE(42); + TRAMP_CASE_HANDLE(43); + TRAMP_CASE_HANDLE(44); + TRAMP_CASE_HANDLE(45); + TRAMP_CASE_HANDLE(46); + TRAMP_CASE_HANDLE(47); + TRAMP_CASE_HANDLE(48); + TRAMP_CASE_HANDLE(49); + TRAMP_CASE_HANDLE(50); + TRAMP_CASE_HANDLE(51); + TRAMP_CASE_HANDLE(52); + TRAMP_CASE_HANDLE(53); + TRAMP_CASE_HANDLE(54); + TRAMP_CASE_HANDLE(55); + TRAMP_CASE_HANDLE(56); + TRAMP_CASE_HANDLE(57); + TRAMP_CASE_HANDLE(58); + TRAMP_CASE_HANDLE(59); + TRAMP_CASE_HANDLE(60); + TRAMP_CASE_HANDLE(61); + TRAMP_CASE_HANDLE(62); + TRAMP_CASE_HANDLE(63); + TRAMP_CASE_HANDLE(64); + TRAMP_CASE_HANDLE(65); + TRAMP_CASE_HANDLE(66); + TRAMP_CASE_HANDLE(67); + TRAMP_CASE_HANDLE(68); + TRAMP_CASE_HANDLE(69); + TRAMP_CASE_HANDLE(70); + TRAMP_CASE_HANDLE(71); + TRAMP_CASE_HANDLE(72); + TRAMP_CASE_HANDLE(73); + TRAMP_CASE_HANDLE(74); + TRAMP_CASE_HANDLE(75); + TRAMP_CASE_HANDLE(76); + TRAMP_CASE_HANDLE(77); + TRAMP_CASE_HANDLE(78); + TRAMP_CASE_HANDLE(79); + TRAMP_CASE_HANDLE(80); + TRAMP_CASE_HANDLE(81); + TRAMP_CASE_HANDLE(82); + TRAMP_CASE_HANDLE(83); + TRAMP_CASE_HANDLE(84); + TRAMP_CASE_HANDLE(85); + TRAMP_CASE_HANDLE(86); + TRAMP_CASE_HANDLE(87); + TRAMP_CASE_HANDLE(88); + TRAMP_CASE_HANDLE(89); + TRAMP_CASE_HANDLE(90); + TRAMP_CASE_HANDLE(91); + TRAMP_CASE_HANDLE(92); + TRAMP_CASE_HANDLE(93); + TRAMP_CASE_HANDLE(94); + TRAMP_CASE_HANDLE(95); + TRAMP_CASE_HANDLE(96); + TRAMP_CASE_HANDLE(97); + TRAMP_CASE_HANDLE(98); + TRAMP_CASE_HANDLE(99); + TRAMP_CASE_HANDLE(100); + TRAMP_CASE_HANDLE(101); + TRAMP_CASE_HANDLE(102); + TRAMP_CASE_HANDLE(103); + TRAMP_CASE_HANDLE(104); + TRAMP_CASE_HANDLE(105); + TRAMP_CASE_HANDLE(106); + TRAMP_CASE_HANDLE(107); + TRAMP_CASE_HANDLE(108); + TRAMP_CASE_HANDLE(109); + TRAMP_CASE_HANDLE(110); + TRAMP_CASE_HANDLE(111); + TRAMP_CASE_HANDLE(112); + TRAMP_CASE_HANDLE(113); + TRAMP_CASE_HANDLE(114); + TRAMP_CASE_HANDLE(115); + TRAMP_CASE_HANDLE(116); + TRAMP_CASE_HANDLE(117); + TRAMP_CASE_HANDLE(118); + TRAMP_CASE_HANDLE(119); + TRAMP_CASE_HANDLE(120); + TRAMP_CASE_HANDLE(121); + TRAMP_CASE_HANDLE(122); + TRAMP_CASE_HANDLE(123); + TRAMP_CASE_HANDLE(124); + TRAMP_CASE_HANDLE(125); + TRAMP_CASE_HANDLE(126); + TRAMP_CASE_HANDLE(127); + TRAMP_CASE_HANDLE(128); + TRAMP_CASE_HANDLE(129); + TRAMP_CASE_HANDLE(130); + TRAMP_CASE_HANDLE(131); + TRAMP_CASE_HANDLE(132); + TRAMP_CASE_HANDLE(133); + TRAMP_CASE_HANDLE(134); + TRAMP_CASE_HANDLE(135); + TRAMP_CASE_HANDLE(136); + TRAMP_CASE_HANDLE(137); + TRAMP_CASE_HANDLE(138); + TRAMP_CASE_HANDLE(139); + TRAMP_CASE_HANDLE(140); + TRAMP_CASE_HANDLE(141); + TRAMP_CASE_HANDLE(142); + TRAMP_CASE_HANDLE(143); + TRAMP_CASE_HANDLE(144); + TRAMP_CASE_HANDLE(145); + TRAMP_CASE_HANDLE(146); + TRAMP_CASE_HANDLE(147); + TRAMP_CASE_HANDLE(148); + TRAMP_CASE_HANDLE(149); + TRAMP_CASE_HANDLE(150); + TRAMP_CASE_HANDLE(151); + TRAMP_CASE_HANDLE(152); + TRAMP_CASE_HANDLE(153); + TRAMP_CASE_HANDLE(154); + TRAMP_CASE_HANDLE(155); + TRAMP_CASE_HANDLE(156); + TRAMP_CASE_HANDLE(157); + TRAMP_CASE_HANDLE(158); + TRAMP_CASE_HANDLE(159); + TRAMP_CASE_HANDLE(160); + TRAMP_CASE_HANDLE(161); + TRAMP_CASE_HANDLE(162); + TRAMP_CASE_HANDLE(163); + TRAMP_CASE_HANDLE(164); + TRAMP_CASE_HANDLE(165); + TRAMP_CASE_HANDLE(166); + TRAMP_CASE_HANDLE(167); + TRAMP_CASE_HANDLE(168); + TRAMP_CASE_HANDLE(169); + TRAMP_CASE_HANDLE(170); + TRAMP_CASE_HANDLE(171); + TRAMP_CASE_HANDLE(172); + TRAMP_CASE_HANDLE(173); + TRAMP_CASE_HANDLE(174); + TRAMP_CASE_HANDLE(175); + TRAMP_CASE_HANDLE(176); + TRAMP_CASE_HANDLE(177); + TRAMP_CASE_HANDLE(178); + TRAMP_CASE_HANDLE(179); + TRAMP_CASE_HANDLE(180); + TRAMP_CASE_HANDLE(181); + TRAMP_CASE_HANDLE(182); + TRAMP_CASE_HANDLE(183); + TRAMP_CASE_HANDLE(184); + TRAMP_CASE_HANDLE(185); + TRAMP_CASE_HANDLE(186); + TRAMP_CASE_HANDLE(187); + TRAMP_CASE_HANDLE(188); + TRAMP_CASE_HANDLE(189); + TRAMP_CASE_HANDLE(190); + TRAMP_CASE_HANDLE(191); + TRAMP_CASE_HANDLE(192); + TRAMP_CASE_HANDLE(193); + TRAMP_CASE_HANDLE(194); + TRAMP_CASE_HANDLE(195); + TRAMP_CASE_HANDLE(196); + TRAMP_CASE_HANDLE(197); + TRAMP_CASE_HANDLE(198); + TRAMP_CASE_HANDLE(199); + TRAMP_CASE_HANDLE(200); + TRAMP_CASE_HANDLE(201); + TRAMP_CASE_HANDLE(202); + TRAMP_CASE_HANDLE(203); + TRAMP_CASE_HANDLE(204); + TRAMP_CASE_HANDLE(205); + TRAMP_CASE_HANDLE(206); + TRAMP_CASE_HANDLE(207); + TRAMP_CASE_HANDLE(208); + TRAMP_CASE_HANDLE(209); + TRAMP_CASE_HANDLE(210); + TRAMP_CASE_HANDLE(211); + TRAMP_CASE_HANDLE(212); + TRAMP_CASE_HANDLE(213); + TRAMP_CASE_HANDLE(214); + TRAMP_CASE_HANDLE(215); + TRAMP_CASE_HANDLE(216); + TRAMP_CASE_HANDLE(217); + TRAMP_CASE_HANDLE(218); + TRAMP_CASE_HANDLE(219); + TRAMP_CASE_HANDLE(220); + TRAMP_CASE_HANDLE(221); + TRAMP_CASE_HANDLE(222); + TRAMP_CASE_HANDLE(223); + TRAMP_CASE_HANDLE(224); + TRAMP_CASE_HANDLE(225); + TRAMP_CASE_HANDLE(226); + TRAMP_CASE_HANDLE(227); + TRAMP_CASE_HANDLE(228); + TRAMP_CASE_HANDLE(229); + TRAMP_CASE_HANDLE(230); + TRAMP_CASE_HANDLE(231); + TRAMP_CASE_HANDLE(232); + TRAMP_CASE_HANDLE(233); + TRAMP_CASE_HANDLE(234); + TRAMP_CASE_HANDLE(235); + TRAMP_CASE_HANDLE(236); + TRAMP_CASE_HANDLE(237); + TRAMP_CASE_HANDLE(238); + TRAMP_CASE_HANDLE(239); + TRAMP_CASE_HANDLE(240); + TRAMP_CASE_HANDLE(241); + TRAMP_CASE_HANDLE(242); + TRAMP_CASE_HANDLE(243); + TRAMP_CASE_HANDLE(244); + TRAMP_CASE_HANDLE(245); + TRAMP_CASE_HANDLE(246); + TRAMP_CASE_HANDLE(247); + TRAMP_CASE_HANDLE(248); + TRAMP_CASE_HANDLE(249); + } + return NULL; +} + +void *loader_get_phys_dev_ext_termin(uint32_t index) { + switch (index) { +#define TERM_CASE_HANDLE(num) case num: return vkPhysDevExtTermin##num + TERM_CASE_HANDLE(0); + TERM_CASE_HANDLE(1); + TERM_CASE_HANDLE(2); + TERM_CASE_HANDLE(3); + TERM_CASE_HANDLE(4); + TERM_CASE_HANDLE(5); + TERM_CASE_HANDLE(6); + TERM_CASE_HANDLE(7); + TERM_CASE_HANDLE(8); + TERM_CASE_HANDLE(9); + TERM_CASE_HANDLE(10); + TERM_CASE_HANDLE(11); + TERM_CASE_HANDLE(12); + TERM_CASE_HANDLE(13); + TERM_CASE_HANDLE(14); + TERM_CASE_HANDLE(15); + TERM_CASE_HANDLE(16); + TERM_CASE_HANDLE(17); + TERM_CASE_HANDLE(18); + TERM_CASE_HANDLE(19); + TERM_CASE_HANDLE(20); + TERM_CASE_HANDLE(21); + TERM_CASE_HANDLE(22); + TERM_CASE_HANDLE(23); + TERM_CASE_HANDLE(24); + TERM_CASE_HANDLE(25); + TERM_CASE_HANDLE(26); + TERM_CASE_HANDLE(27); + TERM_CASE_HANDLE(28); + TERM_CASE_HANDLE(29); + TERM_CASE_HANDLE(30); + TERM_CASE_HANDLE(31); + TERM_CASE_HANDLE(32); + TERM_CASE_HANDLE(33); + TERM_CASE_HANDLE(34); + TERM_CASE_HANDLE(35); + TERM_CASE_HANDLE(36); + TERM_CASE_HANDLE(37); + TERM_CASE_HANDLE(38); + TERM_CASE_HANDLE(39); + TERM_CASE_HANDLE(40); + TERM_CASE_HANDLE(41); + TERM_CASE_HANDLE(42); + TERM_CASE_HANDLE(43); + TERM_CASE_HANDLE(44); + TERM_CASE_HANDLE(45); + TERM_CASE_HANDLE(46); + TERM_CASE_HANDLE(47); + TERM_CASE_HANDLE(48); + TERM_CASE_HANDLE(49); + TERM_CASE_HANDLE(50); + TERM_CASE_HANDLE(51); + TERM_CASE_HANDLE(52); + TERM_CASE_HANDLE(53); + TERM_CASE_HANDLE(54); + TERM_CASE_HANDLE(55); + TERM_CASE_HANDLE(56); + TERM_CASE_HANDLE(57); + TERM_CASE_HANDLE(58); + TERM_CASE_HANDLE(59); + TERM_CASE_HANDLE(60); + TERM_CASE_HANDLE(61); + TERM_CASE_HANDLE(62); + TERM_CASE_HANDLE(63); + TERM_CASE_HANDLE(64); + TERM_CASE_HANDLE(65); + TERM_CASE_HANDLE(66); + TERM_CASE_HANDLE(67); + TERM_CASE_HANDLE(68); + TERM_CASE_HANDLE(69); + TERM_CASE_HANDLE(70); + TERM_CASE_HANDLE(71); + TERM_CASE_HANDLE(72); + TERM_CASE_HANDLE(73); + TERM_CASE_HANDLE(74); + TERM_CASE_HANDLE(75); + TERM_CASE_HANDLE(76); + TERM_CASE_HANDLE(77); + TERM_CASE_HANDLE(78); + TERM_CASE_HANDLE(79); + TERM_CASE_HANDLE(80); + TERM_CASE_HANDLE(81); + TERM_CASE_HANDLE(82); + TERM_CASE_HANDLE(83); + TERM_CASE_HANDLE(84); + TERM_CASE_HANDLE(85); + TERM_CASE_HANDLE(86); + TERM_CASE_HANDLE(87); + TERM_CASE_HANDLE(88); + TERM_CASE_HANDLE(89); + TERM_CASE_HANDLE(90); + TERM_CASE_HANDLE(91); + TERM_CASE_HANDLE(92); + TERM_CASE_HANDLE(93); + TERM_CASE_HANDLE(94); + TERM_CASE_HANDLE(95); + TERM_CASE_HANDLE(96); + TERM_CASE_HANDLE(97); + TERM_CASE_HANDLE(98); + TERM_CASE_HANDLE(99); + TERM_CASE_HANDLE(100); + TERM_CASE_HANDLE(101); + TERM_CASE_HANDLE(102); + TERM_CASE_HANDLE(103); + TERM_CASE_HANDLE(104); + TERM_CASE_HANDLE(105); + TERM_CASE_HANDLE(106); + TERM_CASE_HANDLE(107); + TERM_CASE_HANDLE(108); + TERM_CASE_HANDLE(109); + TERM_CASE_HANDLE(110); + TERM_CASE_HANDLE(111); + TERM_CASE_HANDLE(112); + TERM_CASE_HANDLE(113); + TERM_CASE_HANDLE(114); + TERM_CASE_HANDLE(115); + TERM_CASE_HANDLE(116); + TERM_CASE_HANDLE(117); + TERM_CASE_HANDLE(118); + TERM_CASE_HANDLE(119); + TERM_CASE_HANDLE(120); + TERM_CASE_HANDLE(121); + TERM_CASE_HANDLE(122); + TERM_CASE_HANDLE(123); + TERM_CASE_HANDLE(124); + TERM_CASE_HANDLE(125); + TERM_CASE_HANDLE(126); + TERM_CASE_HANDLE(127); + TERM_CASE_HANDLE(128); + TERM_CASE_HANDLE(129); + TERM_CASE_HANDLE(130); + TERM_CASE_HANDLE(131); + TERM_CASE_HANDLE(132); + TERM_CASE_HANDLE(133); + TERM_CASE_HANDLE(134); + TERM_CASE_HANDLE(135); + TERM_CASE_HANDLE(136); + TERM_CASE_HANDLE(137); + TERM_CASE_HANDLE(138); + TERM_CASE_HANDLE(139); + TERM_CASE_HANDLE(140); + TERM_CASE_HANDLE(141); + TERM_CASE_HANDLE(142); + TERM_CASE_HANDLE(143); + TERM_CASE_HANDLE(144); + TERM_CASE_HANDLE(145); + TERM_CASE_HANDLE(146); + TERM_CASE_HANDLE(147); + TERM_CASE_HANDLE(148); + TERM_CASE_HANDLE(149); + TERM_CASE_HANDLE(150); + TERM_CASE_HANDLE(151); + TERM_CASE_HANDLE(152); + TERM_CASE_HANDLE(153); + TERM_CASE_HANDLE(154); + TERM_CASE_HANDLE(155); + TERM_CASE_HANDLE(156); + TERM_CASE_HANDLE(157); + TERM_CASE_HANDLE(158); + TERM_CASE_HANDLE(159); + TERM_CASE_HANDLE(160); + TERM_CASE_HANDLE(161); + TERM_CASE_HANDLE(162); + TERM_CASE_HANDLE(163); + TERM_CASE_HANDLE(164); + TERM_CASE_HANDLE(165); + TERM_CASE_HANDLE(166); + TERM_CASE_HANDLE(167); + TERM_CASE_HANDLE(168); + TERM_CASE_HANDLE(169); + TERM_CASE_HANDLE(170); + TERM_CASE_HANDLE(171); + TERM_CASE_HANDLE(172); + TERM_CASE_HANDLE(173); + TERM_CASE_HANDLE(174); + TERM_CASE_HANDLE(175); + TERM_CASE_HANDLE(176); + TERM_CASE_HANDLE(177); + TERM_CASE_HANDLE(178); + TERM_CASE_HANDLE(179); + TERM_CASE_HANDLE(180); + TERM_CASE_HANDLE(181); + TERM_CASE_HANDLE(182); + TERM_CASE_HANDLE(183); + TERM_CASE_HANDLE(184); + TERM_CASE_HANDLE(185); + TERM_CASE_HANDLE(186); + TERM_CASE_HANDLE(187); + TERM_CASE_HANDLE(188); + TERM_CASE_HANDLE(189); + TERM_CASE_HANDLE(190); + TERM_CASE_HANDLE(191); + TERM_CASE_HANDLE(192); + TERM_CASE_HANDLE(193); + TERM_CASE_HANDLE(194); + TERM_CASE_HANDLE(195); + TERM_CASE_HANDLE(196); + TERM_CASE_HANDLE(197); + TERM_CASE_HANDLE(198); + TERM_CASE_HANDLE(199); + TERM_CASE_HANDLE(200); + TERM_CASE_HANDLE(201); + TERM_CASE_HANDLE(202); + TERM_CASE_HANDLE(203); + TERM_CASE_HANDLE(204); + TERM_CASE_HANDLE(205); + TERM_CASE_HANDLE(206); + TERM_CASE_HANDLE(207); + TERM_CASE_HANDLE(208); + TERM_CASE_HANDLE(209); + TERM_CASE_HANDLE(210); + TERM_CASE_HANDLE(211); + TERM_CASE_HANDLE(212); + TERM_CASE_HANDLE(213); + TERM_CASE_HANDLE(214); + TERM_CASE_HANDLE(215); + TERM_CASE_HANDLE(216); + TERM_CASE_HANDLE(217); + TERM_CASE_HANDLE(218); + TERM_CASE_HANDLE(219); + TERM_CASE_HANDLE(220); + TERM_CASE_HANDLE(221); + TERM_CASE_HANDLE(222); + TERM_CASE_HANDLE(223); + TERM_CASE_HANDLE(224); + TERM_CASE_HANDLE(225); + TERM_CASE_HANDLE(226); + TERM_CASE_HANDLE(227); + TERM_CASE_HANDLE(228); + TERM_CASE_HANDLE(229); + TERM_CASE_HANDLE(230); + TERM_CASE_HANDLE(231); + TERM_CASE_HANDLE(232); + TERM_CASE_HANDLE(233); + TERM_CASE_HANDLE(234); + TERM_CASE_HANDLE(235); + TERM_CASE_HANDLE(236); + TERM_CASE_HANDLE(237); + TERM_CASE_HANDLE(238); + TERM_CASE_HANDLE(239); + TERM_CASE_HANDLE(240); + TERM_CASE_HANDLE(241); + TERM_CASE_HANDLE(242); + TERM_CASE_HANDLE(243); + TERM_CASE_HANDLE(244); + TERM_CASE_HANDLE(245); + TERM_CASE_HANDLE(246); + TERM_CASE_HANDLE(247); + TERM_CASE_HANDLE(248); + TERM_CASE_HANDLE(249); + } + return NULL; +} diff --git a/src/third_party/vulkan/loader/trampoline.c b/src/third_party/vulkan/loader/trampoline.c new file mode 100644 index 0000000..e053080 --- /dev/null +++ b/src/third_party/vulkan/loader/trampoline.c @@ -0,0 +1,2599 @@ +/* + * + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * Copyright (C) 2015 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Courtney Goeltzenleuchter + * Author: Jon Ashburn + * Author: Tony Barbour + * Author: Chia-I Wu + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include + +#include "vk_loader_platform.h" +#include "loader.h" +#include "debug_utils.h" +#include "wsi.h" +#include "vk_loader_extensions.h" +#include "gpa_helper.h" + + +// Trampoline entrypoints are in this file for core Vulkan commands + +// Get an instance level or global level entry point address. +// @param instance +// @param pName +// @return +// If instance == NULL returns a global level functions only +// If instance is valid returns a trampoline entry point for all dispatchable Vulkan +// functions both core and extensions. +LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *pName) { + void *addr; + + addr = globalGetProcAddr(pName); + if (instance == VK_NULL_HANDLE || addr != NULL) { + return addr; + } + + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (ptr_instance == NULL) return NULL; + // Return trampoline code for non-global entrypoints including any extensions. + // Device extensions are returned if a layer or ICD supports the extension. + // Instance extensions are returned if the extension is enabled and the + // loader or someone else supports the extension + return trampolineGetProcAddr(ptr_instance, pName); +} + +// Get a device level or global level entry point address. +// @param device +// @param pName +// @return +// If device is valid, returns a device relative entry point for device level +// entry points both core and extensions. +// Device relative means call down the device chain. +LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *pName) { + void *addr; + + // For entrypoints that loader must handle (ie non-dispatchable or create object) + // make sure the loader entrypoint is returned + addr = loader_non_passthrough_gdpa(pName); + if (addr) { + return addr; + } + + // Although CreateDevice is on device chain it's dispatchable object isn't + // a VkDevice or child of VkDevice so return NULL. + if (!strcmp(pName, "CreateDevice")) return NULL; + + // Return the dispatch table entrypoint for the fastest case + const VkLayerDispatchTable *disp_table = *(VkLayerDispatchTable **)device; + if (disp_table == NULL) return NULL; + + addr = loader_lookup_device_dispatch_table(disp_table, pName); + if (addr) return addr; + + if (disp_table->GetDeviceProcAddr == NULL) return NULL; + return disp_table->GetDeviceProcAddr(device, pName); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, + uint32_t *pPropertyCount, + VkExtensionProperties *pProperties) { + tls_instance = NULL; + LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); + + // We know we need to call at least the terminator + VkResult res = VK_SUCCESS; + VkEnumerateInstanceExtensionPropertiesChain chain_tail = { + .header = + { + .type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES, + .version = VK_CURRENT_CHAIN_VERSION, + .size = sizeof(chain_tail), + }, + .pfnNextLayer = &terminator_EnumerateInstanceExtensionProperties, + .pNextLink = NULL, + }; + VkEnumerateInstanceExtensionPropertiesChain *chain_head = &chain_tail; + + // Get the implicit layers + struct loader_layer_list layers; + memset(&layers, 0, sizeof(layers)); + loaderScanForImplicitLayers(NULL, &layers); + + // We'll need to save the dl handles so we can close them later + loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count); + if (libs == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + size_t lib_count = 0; + + // Prepend layers onto the chain if they implement this entry point + for (uint32_t i = 0; i < layers.count; ++i) { + if (!loaderImplicitLayerIsEnabled(NULL, layers.list + i) || + layers.list[i].pre_instance_functions.enumerate_instance_extension_properties[0] == '\0') { + continue; + } + + loader_platform_dl_handle layer_lib = loader_platform_open_library(layers.list[i].lib_name); + if (layer_lib == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "%s: Unable to load implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].lib_name); + continue; + } + + libs[lib_count++] = layer_lib; + void *pfn = loader_platform_get_proc_address(layer_lib, + layers.list[i].pre_instance_functions.enumerate_instance_extension_properties); + if (pfn == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "%s: Unable to resolve symbol \"%s\" in implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].pre_instance_functions.enumerate_instance_extension_properties, layers.list[i].lib_name); + continue; + } + + VkEnumerateInstanceExtensionPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceExtensionPropertiesChain)); + if (chain_link == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + break; + } + + chain_link->header.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES; + chain_link->header.version = VK_CURRENT_CHAIN_VERSION; + chain_link->header.size = sizeof(*chain_link); + chain_link->pfnNextLayer = pfn; + chain_link->pNextLink = chain_head; + + chain_head = chain_link; + } + + // Call down the chain + if (res == VK_SUCCESS) { + res = chain_head->pfnNextLayer(chain_head->pNextLink, pLayerName, pPropertyCount, pProperties); + } + + // Free up the layers + loaderDeleteLayerListAndProperties(NULL, &layers); + + // Tear down the chain + while (chain_head != &chain_tail) { + VkEnumerateInstanceExtensionPropertiesChain *holder = chain_head; + chain_head = (VkEnumerateInstanceExtensionPropertiesChain *)chain_head->pNextLink; + free(holder); + } + + // Close the dl handles + for (size_t i = 0; i < lib_count; ++i) { + loader_platform_close_library(libs[i]); + } + free(libs); + + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pPropertyCount, + VkLayerProperties *pProperties) { + tls_instance = NULL; + LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); + + // We know we need to call at least the terminator + VkResult res = VK_SUCCESS; + VkEnumerateInstanceLayerPropertiesChain chain_tail = { + .header = + { + .type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES, + .version = VK_CURRENT_CHAIN_VERSION, + .size = sizeof(chain_tail), + }, + .pfnNextLayer = &terminator_EnumerateInstanceLayerProperties, + .pNextLink = NULL, + }; + VkEnumerateInstanceLayerPropertiesChain *chain_head = &chain_tail; + + // Get the implicit layers + struct loader_layer_list layers; + memset(&layers, 0, sizeof(layers)); + loaderScanForImplicitLayers(NULL, &layers); + + // We'll need to save the dl handles so we can close them later + loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count); + if (libs == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + size_t lib_count = 0; + + // Prepend layers onto the chain if they implement this entry point + for (uint32_t i = 0; i < layers.count; ++i) { + if (!loaderImplicitLayerIsEnabled(NULL, layers.list + i) || + layers.list[i].pre_instance_functions.enumerate_instance_layer_properties[0] == '\0') { + continue; + } + + loader_platform_dl_handle layer_lib = loader_platform_open_library(layers.list[i].lib_name); + if (layer_lib == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "%s: Unable to load implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].lib_name); + continue; + } + + libs[lib_count++] = layer_lib; + void *pfn = + loader_platform_get_proc_address(layer_lib, layers.list[i].pre_instance_functions.enumerate_instance_layer_properties); + if (pfn == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "%s: Unable to resolve symbol \"%s\" in implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].pre_instance_functions.enumerate_instance_layer_properties, layers.list[i].lib_name); + continue; + } + + VkEnumerateInstanceLayerPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceLayerPropertiesChain)); + if (chain_link == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + break; + } + + chain_link->header.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES; + chain_link->header.version = VK_CURRENT_CHAIN_VERSION; + chain_link->header.size = sizeof(*chain_link); + chain_link->pfnNextLayer = pfn; + chain_link->pNextLink = chain_head; + + chain_head = chain_link; + } + + // Call down the chain + if (res == VK_SUCCESS) { + res = chain_head->pfnNextLayer(chain_head->pNextLink, pPropertyCount, pProperties); + } + + // Free up the layers + loaderDeleteLayerListAndProperties(NULL, &layers); + + // Tear down the chain + while (chain_head != &chain_tail) { + VkEnumerateInstanceLayerPropertiesChain *holder = chain_head; + chain_head = (VkEnumerateInstanceLayerPropertiesChain *)chain_head->pNextLink; + free(holder); + } + + // Close the dl handles + for (size_t i = 0; i < lib_count; ++i) { + loader_platform_close_library(libs[i]); + } + free(libs); + + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion(uint32_t* pApiVersion) { + + tls_instance = NULL; + LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); + + // We know we need to call at least the terminator + VkResult res = VK_SUCCESS; + VkEnumerateInstanceVersionChain chain_tail = { + .header = + { + .type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION, + .version = VK_CURRENT_CHAIN_VERSION, + .size = sizeof(chain_tail), + }, + .pfnNextLayer = &terminator_EnumerateInstanceVersion, + .pNextLink = NULL, + }; + VkEnumerateInstanceVersionChain *chain_head = &chain_tail; + + // Get the implicit layers + struct loader_layer_list layers; + memset(&layers, 0, sizeof(layers)); + loaderScanForImplicitLayers(NULL, &layers); + + // We'll need to save the dl handles so we can close them later + loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count); + if (libs == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + size_t lib_count = 0; + + // Prepend layers onto the chain if they implement this entry point + for (uint32_t i = 0; i < layers.count; ++i) { + if (!loaderImplicitLayerIsEnabled(NULL, layers.list + i) || + layers.list[i].pre_instance_functions.enumerate_instance_version[0] == '\0') { + continue; + } + + loader_platform_dl_handle layer_lib = loader_platform_open_library(layers.list[i].lib_name); + if (layer_lib == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, "%s: Unable to load implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].lib_name); + continue; + } + + libs[lib_count++] = layer_lib; + void *pfn = loader_platform_get_proc_address(layer_lib, + layers.list[i].pre_instance_functions.enumerate_instance_version); + if (pfn == NULL) { + loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "%s: Unable to resolve symbol \"%s\" in implicit layer library \"%s\"", __FUNCTION__, + layers.list[i].pre_instance_functions.enumerate_instance_version, layers.list[i].lib_name); + continue; + } + + VkEnumerateInstanceVersionChain *chain_link = malloc(sizeof(VkEnumerateInstanceVersionChain)); + if (chain_link == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + break; + } + + chain_link->header.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION; + chain_link->header.version = VK_CURRENT_CHAIN_VERSION; + chain_link->header.size = sizeof(*chain_link); + chain_link->pfnNextLayer = pfn; + chain_link->pNextLink = chain_head; + + chain_head = chain_link; + } + + // Call down the chain + if (res == VK_SUCCESS) { + res = chain_head->pfnNextLayer(chain_head->pNextLink, pApiVersion); + } + + // Free up the layers + loaderDeleteLayerListAndProperties(NULL, &layers); + + // Tear down the chain + while (chain_head != &chain_tail) { + VkEnumerateInstanceVersionChain *holder = chain_head; + chain_head = (VkEnumerateInstanceVersionChain *)chain_head->pNextLink; + free(holder); + } + + // Close the dl handles + for (size_t i = 0; i < lib_count; ++i) { + loader_platform_close_library(libs[i]); + } + free(libs); + + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) { + struct loader_instance *ptr_instance = NULL; + VkInstance created_instance = VK_NULL_HANDLE; + bool loaderLocked = false; + VkResult res = VK_ERROR_INITIALIZATION_FAILED; + + LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize); + +#if (DEBUG_DISABLE_APP_ALLOCATORS == 1) + { +#else + if (pAllocator) { + ptr_instance = (struct loader_instance *)pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(struct loader_instance), + sizeof(int *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + } else { +#endif + ptr_instance = (struct loader_instance *)malloc(sizeof(struct loader_instance)); + } + + VkInstanceCreateInfo ici = *pCreateInfo; + + if (ptr_instance == NULL) { + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + tls_instance = ptr_instance; + loader_platform_thread_lock_mutex(&loader_lock); + loaderLocked = true; + memset(ptr_instance, 0, sizeof(struct loader_instance)); + if (pAllocator) { + ptr_instance->alloc_callbacks = *pAllocator; + } + + // Save the application version + if (NULL == pCreateInfo || NULL == pCreateInfo->pApplicationInfo || 0 == pCreateInfo->pApplicationInfo->apiVersion) +{ + ptr_instance->app_api_major_version = 1; + ptr_instance->app_api_minor_version = 0; + } else { + ptr_instance->app_api_major_version = VK_VERSION_MAJOR(pCreateInfo->pApplicationInfo->apiVersion); + ptr_instance->app_api_minor_version = VK_VERSION_MINOR(pCreateInfo->pApplicationInfo->apiVersion); + } + + // Look for one or more VK_EXT_debug_report or VK_EXT_debug_utils create info structures + // and setup a callback(s) for each one found. + ptr_instance->num_tmp_report_callbacks = 0; + ptr_instance->tmp_report_create_infos = NULL; + ptr_instance->tmp_report_callbacks = NULL; + ptr_instance->num_tmp_messengers = 0; + ptr_instance->tmp_messenger_create_infos = NULL; + ptr_instance->tmp_messengers = NULL; + + // Handle cases of VK_EXT_debug_utils + if (util_CopyDebugUtilsMessengerCreateInfos(pCreateInfo->pNext, pAllocator, &ptr_instance->num_tmp_messengers, + &ptr_instance->tmp_messenger_create_infos, &ptr_instance->tmp_messengers)) { + // One or more were found, but allocation failed. Therefore, clean up and fail this function: + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } else if (ptr_instance->num_tmp_messengers > 0) { + // Setup the temporary messenger(s) here to catch early issues: + if (util_CreateDebugUtilsMessengers(ptr_instance, pAllocator, ptr_instance->num_tmp_messengers, + ptr_instance->tmp_messenger_create_infos, ptr_instance->tmp_messengers)) { + // Failure of setting up one or more of the messenger. Therefore, clean up and fail this function: + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + } + + // Handle cases of VK_EXT_debug_report + if (util_CopyDebugReportCreateInfos(pCreateInfo->pNext, pAllocator, &ptr_instance->num_tmp_report_callbacks, + &ptr_instance->tmp_report_create_infos, &ptr_instance->tmp_report_callbacks)) { + // One or more were found, but allocation failed. Therefore, clean up and fail this function: + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } else if (ptr_instance->num_tmp_report_callbacks > 0) { + // Setup the temporary callback(s) here to catch early issues: + if (util_CreateDebugReportCallbacks(ptr_instance, pAllocator, ptr_instance->num_tmp_report_callbacks, + ptr_instance->tmp_report_create_infos, ptr_instance->tmp_report_callbacks)) { + // Failure of setting up one or more of the callback. Therefore, clean up and fail this function: + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + } + + // Due to implicit layers need to get layer list even if + // enabledLayerCount == 0 and VK_INSTANCE_LAYERS is unset. For now always + // get layer list via loaderScanForLayers(). + memset(&ptr_instance->instance_layer_list, 0, sizeof(ptr_instance->instance_layer_list)); + loaderScanForLayers(ptr_instance, &ptr_instance->instance_layer_list); + + // Validate the app requested layers to be enabled + if (pCreateInfo->enabledLayerCount > 0) { + res = loaderValidateLayers(ptr_instance, pCreateInfo->enabledLayerCount, pCreateInfo->ppEnabledLayerNames, + &ptr_instance->instance_layer_list); + if (res != VK_SUCCESS) { + goto out; + } + } + + // Scan/discover all ICD libraries + memset(&ptr_instance->icd_tramp_list, 0, sizeof(ptr_instance->icd_tramp_list)); + res = loader_icd_scan(ptr_instance, &ptr_instance->icd_tramp_list); + if (res != VK_SUCCESS) { + goto out; + } + + // Get extensions from all ICD's, merge so no duplicates, then validate + res = loader_get_icd_loader_instance_extensions(ptr_instance, &ptr_instance->icd_tramp_list, &ptr_instance->ext_list); + if (res != VK_SUCCESS) { + goto out; + } + res = loader_validate_instance_extensions(ptr_instance, &ptr_instance->ext_list, &ptr_instance->instance_layer_list, &ici); + if (res != VK_SUCCESS) { + goto out; + } + + ptr_instance->disp = loader_instance_heap_alloc(ptr_instance, sizeof(struct loader_instance_dispatch_table), + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (ptr_instance->disp == NULL) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkCreateInstance: Failed to allocate Loader's full Instance dispatch table."); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(&ptr_instance->disp->layer_inst_disp, &instance_disp, sizeof(instance_disp)); + + ptr_instance->next = loader.instances; + loader.instances = ptr_instance; + + // Activate any layers on instance chain + res = loaderEnableInstanceLayers(ptr_instance, &ici, &ptr_instance->instance_layer_list); + if (res != VK_SUCCESS) { + goto out; + } + + created_instance = (VkInstance)ptr_instance; + res = loader_create_instance_chain(&ici, pAllocator, ptr_instance, &created_instance); + + if (res == VK_SUCCESS) { + memset(ptr_instance->enabled_known_extensions.padding, 0, sizeof(uint64_t) * 4); + + wsi_create_instance(ptr_instance, &ici); + debug_utils_CreateInstance(ptr_instance, &ici); + extensions_create_instance(ptr_instance, &ici); + + *pInstance = created_instance; + + // Finally have the layers in place and everyone has seen + // the CreateInstance command go by. This allows the layer's + // GetInstanceProcAddr functions to return valid extension functions + // if enabled. + loaderActivateInstanceLayerExtensions(ptr_instance, *pInstance); + } + +out: + + if (NULL != ptr_instance) { + if (res != VK_SUCCESS) { + if (loader.instances == ptr_instance) { + loader.instances = ptr_instance->next; + } + if (NULL != ptr_instance->disp) { + loader_instance_heap_free(ptr_instance, ptr_instance->disp); + } + if (ptr_instance->num_tmp_report_callbacks > 0) { + // Remove temporary VK_EXT_debug_report items + util_DestroyDebugReportCallbacks(ptr_instance, pAllocator, ptr_instance->num_tmp_report_callbacks, + ptr_instance->tmp_report_callbacks); + util_FreeDebugReportCreateInfos(pAllocator, ptr_instance->tmp_report_create_infos, + ptr_instance->tmp_report_callbacks); + } + if (ptr_instance->num_tmp_messengers > 0) { + // Remove temporary VK_EXT_debug_utils items + util_DestroyDebugUtilsMessengers(ptr_instance, pAllocator, ptr_instance->num_tmp_messengers, + ptr_instance->tmp_messengers); + util_FreeDebugUtilsMessengerCreateInfos(pAllocator, ptr_instance->tmp_messenger_create_infos, + ptr_instance->tmp_messengers); + } + + if (NULL != ptr_instance->expanded_activated_layer_list.list) { + loaderDeactivateLayers(ptr_instance, NULL, &ptr_instance->expanded_activated_layer_list); + } + if (NULL != ptr_instance->app_activated_layer_list.list) { + loaderDestroyLayerList(ptr_instance, NULL, &ptr_instance->app_activated_layer_list); + } + + loaderDeleteLayerListAndProperties(ptr_instance, &ptr_instance->instance_layer_list); + loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_tramp_list); + loader_destroy_generic_list(ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list); + + loader_instance_heap_free(ptr_instance, ptr_instance); + } else { + // Remove temporary VK_EXT_debug_report or VK_EXT_debug_utils items + util_DestroyDebugUtilsMessengers(ptr_instance, pAllocator, ptr_instance->num_tmp_messengers, + ptr_instance->tmp_messengers); + util_DestroyDebugReportCallbacks(ptr_instance, pAllocator, ptr_instance->num_tmp_report_callbacks, + ptr_instance->tmp_report_callbacks); + } + + if (loaderLocked) { + loader_platform_thread_unlock_mutex(&loader_lock); + } + } + + return res; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { + const VkLayerInstanceDispatchTable *disp; + struct loader_instance *ptr_instance = NULL; + bool callback_setup = false; + bool messenger_setup = false; + + if (instance == VK_NULL_HANDLE) { + return; + } + + disp = loader_get_instance_layer_dispatch(instance); + + loader_platform_thread_lock_mutex(&loader_lock); + + ptr_instance = loader_get_instance(instance); + + if (pAllocator) { + ptr_instance->alloc_callbacks = *pAllocator; + } + + if (ptr_instance->num_tmp_messengers > 0) { + // Setup the temporary VK_EXT_debug_utils messenger(s) here to catch cleanup issues: + if (!util_CreateDebugUtilsMessengers(ptr_instance, pAllocator, ptr_instance->num_tmp_messengers, + ptr_instance->tmp_messenger_create_infos, ptr_instance->tmp_messengers)) { + messenger_setup = true; + } + } + + if (ptr_instance->num_tmp_report_callbacks > 0) { + // Setup the temporary VK_EXT_debug_report callback(s) here to catch cleanup issues: + if (!util_CreateDebugReportCallbacks(ptr_instance, pAllocator, ptr_instance->num_tmp_report_callbacks, + ptr_instance->tmp_report_create_infos, ptr_instance->tmp_report_callbacks)) { + callback_setup = true; + } + } + + disp->DestroyInstance(instance, pAllocator); + + if (NULL != ptr_instance->expanded_activated_layer_list.list) { + loaderDeactivateLayers(ptr_instance, NULL, &ptr_instance->expanded_activated_layer_list); + } + if (NULL != ptr_instance->app_activated_layer_list.list) { + loaderDestroyLayerList(ptr_instance, NULL, &ptr_instance->app_activated_layer_list); + } + + if (ptr_instance->phys_devs_tramp) { + for (uint32_t i = 0; i < ptr_instance->phys_dev_count_tramp; i++) { + loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_tramp[i]); + } + loader_instance_heap_free(ptr_instance, ptr_instance->phys_devs_tramp); + } + + if (ptr_instance->phys_dev_groups_tramp) { + for (uint32_t i = 0; i < ptr_instance->phys_dev_group_count_tramp; i++) { + loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_tramp[i]); + } + loader_instance_heap_free(ptr_instance, ptr_instance->phys_dev_groups_tramp); + } + + if (messenger_setup) { + util_DestroyDebugUtilsMessengers(ptr_instance, pAllocator, ptr_instance->num_tmp_messengers, ptr_instance->tmp_messengers); + util_FreeDebugUtilsMessengerCreateInfos(pAllocator, ptr_instance->tmp_messenger_create_infos, ptr_instance->tmp_messengers); + } + + if (callback_setup) { + util_DestroyDebugReportCallbacks(ptr_instance, pAllocator, ptr_instance->num_tmp_report_callbacks, + ptr_instance->tmp_report_callbacks); + util_FreeDebugReportCreateInfos(pAllocator, ptr_instance->tmp_report_create_infos, ptr_instance->tmp_report_callbacks); + } + + loader_instance_heap_free(ptr_instance, ptr_instance->disp); + loader_instance_heap_free(ptr_instance, ptr_instance); + loader_platform_thread_unlock_mutex(&loader_lock); + + // Unload preloaded layers, so if vkEnumerateInstanceExtensionProperties or vkCreateInstance is called again, the ICD's are up + // to date + loader_unload_preloaded_icds(); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, + VkPhysicalDevice *pPhysicalDevices) { + VkResult res = VK_SUCCESS; + uint32_t count; + uint32_t i; + struct loader_instance *inst; + + loader_platform_thread_lock_mutex(&loader_lock); + + inst = loader_get_instance(instance); + if (NULL == inst) { + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + if (NULL == pPhysicalDeviceCount) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkEnumeratePhysicalDevices: Received NULL pointer for physical device count return value."); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + // Setup the trampoline loader physical devices. This will actually + // call down and setup the terminator loader physical devices during the + // process. + VkResult setup_res = setupLoaderTrampPhysDevs(instance); + if (setup_res != VK_SUCCESS && setup_res != VK_INCOMPLETE) { + res = setup_res; + goto out; + } + + count = inst->phys_dev_count_tramp; + + // Wrap the PhysDev object for loader usage, return wrapped objects + if (NULL != pPhysicalDevices) { + if (inst->phys_dev_count_tramp > *pPhysicalDeviceCount) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkEnumeratePhysicalDevices: Trimming device count down" + " by application request from %d to %d physical devices", + inst->phys_dev_count_tramp, *pPhysicalDeviceCount); + count = *pPhysicalDeviceCount; + res = VK_INCOMPLETE; + } + for (i = 0; i < count; i++) { + pPhysicalDevices[i] = (VkPhysicalDevice)inst->phys_devs_tramp[i]; + } + } + + *pPhysicalDeviceCount = count; + +out: + + loader_platform_thread_unlock_mutex(&loader_lock); + return res; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures *pFeatures) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceFeatures(unwrapped_phys_dev, pFeatures); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, + VkFormatProperties *pFormatInfo) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_pd = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceFormatProperties(unwrapped_pd, format, pFormatInfo); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, + VkImageCreateFlags flags, VkImageFormatProperties *pImageFormatProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceImageFormatProperties(unwrapped_phys_dev, format, type, tiling, usage, flags, + pImageFormatProperties); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties *pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceProperties(unwrapped_phys_dev, pProperties); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, + uint32_t *pQueueFamilyPropertyCount, + VkQueueFamilyProperties *pQueueProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceQueueFamilyProperties(unwrapped_phys_dev, pQueueFamilyPropertyCount, pQueueProperties); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties *pMemoryProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceMemoryProperties(unwrapped_phys_dev, pMemoryProperties); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { + loader_platform_thread_lock_mutex(&loader_lock); + VkResult res = loader_layer_create_device(NULL, physicalDevice, pCreateInfo, pAllocator, pDevice, NULL, NULL); + loader_platform_thread_unlock_mutex(&loader_lock); + return res; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + if (device == VK_NULL_HANDLE) { + return; + } + disp = loader_get_dispatch(device); + + loader_platform_thread_lock_mutex(&loader_lock); + + loader_layer_destroy_device(device, pAllocator, disp->DestroyDevice); + + loader_platform_thread_unlock_mutex(&loader_lock); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, + const char *pLayerName, uint32_t *pPropertyCount, + VkExtensionProperties *pProperties) { + VkResult res = VK_SUCCESS; + struct loader_physical_device_tramp *phys_dev; + const VkLayerInstanceDispatchTable *disp; + phys_dev = (struct loader_physical_device_tramp *)physicalDevice; + + loader_platform_thread_lock_mutex(&loader_lock); + + // always pass this call down the instance chain which will terminate + // in the ICD. This allows layers to filter the extensions coming back + // up the chain. In the terminator we look up layer extensions from the + // manifest file if it wasn't provided by the layer itself. + disp = loader_get_instance_layer_dispatch(physicalDevice); + res = disp->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, pLayerName, pPropertyCount, pProperties); + + loader_platform_thread_unlock_mutex(&loader_lock); + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkLayerProperties *pProperties) { + uint32_t copy_size; + struct loader_physical_device_tramp *phys_dev; + struct loader_layer_list *enabled_layers, layers_list; + memset(&layers_list, 0, sizeof(layers_list)); + loader_platform_thread_lock_mutex(&loader_lock); + + // Don't dispatch this call down the instance chain, want all device layers + // enumerated and instance chain may not contain all device layers + // TODO re-evaluate the above statement we maybe able to start calling + // down the chain + + phys_dev = (struct loader_physical_device_tramp *)physicalDevice; + const struct loader_instance *inst = phys_dev->this_instance; + + uint32_t count = inst->app_activated_layer_list.count; + if (count == 0 || pProperties == NULL) { + *pPropertyCount = count; + loader_platform_thread_unlock_mutex(&loader_lock); + return VK_SUCCESS; + } + enabled_layers = (struct loader_layer_list *)&inst->app_activated_layer_list; + + copy_size = (*pPropertyCount < count) ? *pPropertyCount : count; + for (uint32_t i = 0; i < copy_size; i++) { + memcpy(&pProperties[i], &(enabled_layers->list[i].info), sizeof(VkLayerProperties)); + } + *pPropertyCount = copy_size; + + if (copy_size < count) { + loader_platform_thread_unlock_mutex(&loader_lock); + return VK_INCOMPLETE; + } + + loader_platform_thread_unlock_mutex(&loader_lock); + return VK_SUCCESS; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, + VkQueue *pQueue) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue); + if (pQueue != NULL) { + loader_set_dispatch(*pQueue, disp); + } +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, + VkFence fence) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(queue); + + return disp->QueueSubmit(queue, submitCount, pSubmits, fence); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(queue); + + return disp->QueueWaitIdle(queue); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->DeviceWaitIdle(device); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, + const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeMemory(VkDevice device, VkDeviceMemory mem, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->FreeMemory(device, mem, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, + VkDeviceSize size, VkFlags flags, void **ppData) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->MapMemory(device, mem, offset, size, flags, ppData); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->UnmapMemory(device, mem); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, + const VkMappedMemoryRange *pMemoryRanges) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->FlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, + const VkMappedMemoryRange *pMemoryRanges) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->InvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory memory, + VkDeviceSize *pCommittedMemoryInBytes) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, + VkDeviceSize offset) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->BindBufferMemory(device, buffer, mem, offset); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, + VkDeviceSize offset) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->BindImageMemory(device, image, mem, offset); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, + VkMemoryRequirements *pMemoryRequirements) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(VkDevice device, VkImage image, + VkMemoryRequirements *pMemoryRequirements) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetImageMemoryRequirements(device, image, pMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL +vkGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements *pSparseMemoryRequirements) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, + VkImageTiling tiling, uint32_t *pPropertyCount, VkSparseImageFormatProperties *pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + + disp->GetPhysicalDeviceSparseImageFormatProperties(unwrapped_phys_dev, format, type, samples, usage, tiling, pPropertyCount, + pProperties); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, + const VkBindSparseInfo *pBindInfo, VkFence fence) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(queue); + + return disp->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkFence *pFence) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateFence(device, pCreateInfo, pAllocator, pFence); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyFence(device, fence, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->ResetFences(device, fenceCount, pFences); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->GetFenceStatus(device, fence); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, + VkBool32 waitAll, uint64_t timeout) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->WaitForFences(device, fenceCount, pFences, waitAll, timeout); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroySemaphore(device, semaphore, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateEvent(device, pCreateInfo, pAllocator, pEvent); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyEvent(device, event, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(VkDevice device, VkEvent event) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->GetEventStatus(device, event); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->SetEvent(device, event); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(VkDevice device, VkEvent event) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->ResetEvent(device, event); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyQueryPool(device, queryPool, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, + uint32_t queryCount, size_t dataSize, void *pData, + VkDeviceSize stride, VkQueryResultFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyBuffer(device, buffer, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkBufferView *pView) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateBufferView(device, pCreateInfo, pAllocator, pView); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyBufferView(device, bufferView, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkImage *pImage) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateImage(device, pCreateInfo, pAllocator, pImage); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyImage(device, image, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(VkDevice device, VkImage image, + const VkImageSubresource *pSubresource, + VkSubresourceLayout *pLayout) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetImageSubresourceLayout(device, image, pSubresource, pLayout); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkImageView *pView) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateImageView(device, pCreateInfo, pAllocator, pView); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(VkDevice device, VkImageView imageView, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyImageView(device, imageView, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkShaderModule *pShader) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateShaderModule(device, pCreateInfo, pAllocator, pShader); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyShaderModule(device, shaderModule, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkPipelineCache *pPipelineCache) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyPipelineCache(device, pipelineCache, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, + size_t *pDataSize, void *pData) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->GetPipelineCacheData(device, pipelineCache, pDataSize, pData); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, + uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo *pCreateInfos, + const VkAllocationCallbacks *pAllocator, + VkPipeline *pPipelines) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo *pCreateInfos, + const VkAllocationCallbacks *pAllocator, + VkPipeline *pPipelines) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(VkDevice device, VkPipeline pipeline, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyPipeline(device, pipeline, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkPipelineLayout *pPipelineLayout) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyPipelineLayout(device, pipelineLayout, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateSampler(device, pCreateInfo, pAllocator, pSampler); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySampler(VkDevice device, VkSampler sampler, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroySampler(device, sampler, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDevice device, + const VkDescriptorSetLayoutCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDescriptorSetLayout *pSetLayout) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDescriptorPool *pDescriptorPool) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyDescriptorPool(device, descriptorPool, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->ResetDescriptorPool(device, descriptorPool, flags); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice device, + const VkDescriptorSetAllocateInfo *pAllocateInfo, + VkDescriptorSet *pDescriptorSets) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet *pDescriptorSets) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, + const VkWriteDescriptorSet *pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet *pDescriptorCopies) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkFramebuffer *pFramebuffer) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyFramebuffer(device, framebuffer, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkRenderPass *pRenderPass) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyRenderPass(device, renderPass, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, + VkExtent2D *pGranularity) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->GetRenderAreaGranularity(device, renderPass, pGranularity); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkCommandPool *pCommandPool) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->DestroyCommandPool(device, commandPool, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(VkDevice device, VkCommandPool commandPool, + VkCommandPoolResetFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + return disp->ResetCommandPool(device, commandPool, flags); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice device, + const VkCommandBufferAllocateInfo *pAllocateInfo, + VkCommandBuffer *pCommandBuffers) { + const VkLayerDispatchTable *disp; + VkResult res; + + disp = loader_get_dispatch(device); + + res = disp->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers); + if (res == VK_SUCCESS) { + for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) { + if (pCommandBuffers[i]) { + loader_init_dispatch(pCommandBuffers[i], disp); + } + } + } + + return res; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, + uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(device); + + disp->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo *pBeginInfo) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + return disp->BeginCommandBuffer(commandBuffer, pBeginInfo); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + return disp->EndCommandBuffer(commandBuffer); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + return disp->ResetCommandBuffer(commandBuffer, flags); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, + uint32_t viewportCount, const VkViewport *pViewports) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, + uint32_t scissorCount, const VkRect2D *pScissors) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetLineWidth(commandBuffer, lineWidth); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, + float depthBiasClamp, float depthBiasSlopeFactor) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetBlendConstants(commandBuffer, blendConstants); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, + float maxDepthBounds) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, + uint32_t compareMask) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, + uint32_t writeMask) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, + uint32_t reference) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetStencilReference(commandBuffer, faceMask, reference); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, + uint32_t firstSet, uint32_t descriptorSetCount, + const VkDescriptorSet *pDescriptorSets, + uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, + dynamicOffsetCount, pDynamicOffsets); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, + VkIndexType indexType) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, + uint32_t bindingCount, const VkBuffer *pBuffers, + const VkDeviceSize *pOffsets) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, + uint32_t firstVertex, uint32_t firstInstance) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, + uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, + uint32_t firstInstance) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, + uint32_t drawCount, uint32_t stride) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, + VkDeviceSize offset, uint32_t drawCount, uint32_t stride) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDispatch(commandBuffer, x, y, z); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, + VkDeviceSize offset) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdDispatchIndirect(commandBuffer, buffer, offset); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, + uint32_t regionCount, const VkBufferCopy *pRegions) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, + VkImageLayout srcImageLayout, VkImage dstImage, + VkImageLayout dstImageLayout, uint32_t regionCount, + const VkImageCopy *pRegions) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, + VkImageLayout srcImageLayout, VkImage dstImage, + VkImageLayout dstImageLayout, uint32_t regionCount, + const VkImageBlit *pRegions, VkFilter filter) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, + VkImageLayout dstImageLayout, uint32_t regionCount, + const VkBufferImageCopy *pRegions) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, + VkImageLayout srcImageLayout, VkBuffer dstBuffer, + uint32_t regionCount, const VkBufferImageCopy *pRegions) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, + VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, + VkDeviceSize size, uint32_t data) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, + VkImageLayout imageLayout, const VkClearColorValue *pColor, + uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue *pDepthStencil, + uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, + const VkClearAttachment *pAttachments, uint32_t rectCount, + const VkClearRect *pRects) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, + VkImageLayout srcImageLayout, VkImage dstImage, + VkImageLayout dstImageLayout, uint32_t regionCount, + const VkImageResolve *pRegions) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, + VkPipelineStageFlags stageMask) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdSetEvent(commandBuffer, event, stageMask); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, + VkPipelineStageFlags stageMask) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdResetEvent(commandBuffer, event, stageMask); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, + VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier *pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier *pImageMemoryBarriers) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, + bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier *pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier *pImageMemoryBarriers) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, + bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, + VkFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBeginQuery(commandBuffer, queryPool, slot, flags); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdEndQuery(commandBuffer, queryPool, slot); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, + uint32_t firstQuery, uint32_t queryCount) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, uint32_t slot) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, + uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, + VkDeviceSize dstOffset, VkDeviceSize stride, VkFlags flags) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, + VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, + const void *pValues) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo *pRenderPassBegin, + VkSubpassContents contents) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdNextSubpass(commandBuffer, contents); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdEndRenderPass(commandBuffer); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, + const VkCommandBuffer *pCommandBuffers) { + const VkLayerDispatchTable *disp; + + disp = loader_get_dispatch(commandBuffer); + + disp->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); +} + +// ---- Vulkan core 1.1 trampolines + +VkResult setupLoaderTrampPhysDevGroups(VkInstance instance) { + VkResult res = VK_SUCCESS; + struct loader_instance *inst; + uint32_t total_count = 0; + VkPhysicalDeviceGroupPropertiesKHR **new_phys_dev_groups = NULL; + VkPhysicalDeviceGroupPropertiesKHR *local_phys_dev_groups = NULL; + PFN_vkEnumeratePhysicalDeviceGroups fpEnumeratePhysicalDeviceGroups = NULL; + + inst = loader_get_instance(instance); + if (NULL == inst) { + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + // Get the function pointer to use to call into the ICD. This could be the core or KHR version + if (inst->enabled_known_extensions.khr_device_group_creation) { + fpEnumeratePhysicalDeviceGroups = inst->disp->layer_inst_disp.EnumeratePhysicalDeviceGroupsKHR; + } else { + fpEnumeratePhysicalDeviceGroups = inst->disp->layer_inst_disp.EnumeratePhysicalDeviceGroups; + } + + // Setup the trampoline loader physical devices. This will actually + // call down and setup the terminator loader physical devices during the + // process. + VkResult setup_res = setupLoaderTrampPhysDevs(instance); + if (setup_res != VK_SUCCESS && setup_res != VK_INCOMPLETE) { + res = setup_res; + goto out; + } + + // Query how many physical device groups there + res = fpEnumeratePhysicalDeviceGroups(instance, &total_count, NULL); + if (res != VK_SUCCESS) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDeviceGroupsKHR\' to lower layers or " + "loader to get count."); + goto out; + } + + // Create an array for the new physical device groups, which will be stored + // in the instance for the trampoline code. + new_phys_dev_groups = (VkPhysicalDeviceGroupPropertiesKHR **)loader_instance_heap_alloc( + inst, total_count * sizeof(VkPhysicalDeviceGroupPropertiesKHR *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_dev_groups) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed to allocate new physical device" + " group array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memset(new_phys_dev_groups, 0, total_count * sizeof(VkPhysicalDeviceGroupPropertiesKHR *)); + + // Create a temporary array (on the stack) to keep track of the + // returned VkPhysicalDevice values. + local_phys_dev_groups = loader_stack_alloc(sizeof(VkPhysicalDeviceGroupPropertiesKHR) * total_count); + if (NULL == local_phys_dev_groups) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed to allocate local " + "physical device group array of size %d", + total_count); + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + // Initialize the memory to something valid + memset(local_phys_dev_groups, 0, sizeof(VkPhysicalDeviceGroupPropertiesKHR) * total_count); + for (uint32_t group = 0; group < total_count; group++) { + local_phys_dev_groups[group].sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR; + local_phys_dev_groups[group].pNext = NULL; + local_phys_dev_groups[group].subsetAllocation = false; + } + + // Call down and get the content + fpEnumeratePhysicalDeviceGroups(instance, &total_count, local_phys_dev_groups); + if (VK_SUCCESS != res) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed during dispatch call of " + "\'EnumeratePhysicalDeviceGroupsKHR\' to lower layers or " + "loader to get content."); + goto out; + } + + // Replace all the physical device IDs with the proper loader values + for (uint32_t group = 0; group < total_count; group++) { + for (uint32_t group_gpu = 0; group_gpu < local_phys_dev_groups[group].physicalDeviceCount; group_gpu++) { + bool found = false; + for (uint32_t tramp_gpu = 0; tramp_gpu < inst->phys_dev_count_tramp; tramp_gpu++) { + if (local_phys_dev_groups[group].physicalDevices[group_gpu] == inst->phys_devs_tramp[tramp_gpu]->phys_dev) { + local_phys_dev_groups[group].physicalDevices[group_gpu] = (VkPhysicalDevice)inst->phys_devs_tramp[tramp_gpu]; + found = true; + break; + } + } + if (!found) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed to find GPU %d in group %d" + " returned by \'EnumeratePhysicalDeviceGroupsKHR\' in list returned" + " by \'EnumeratePhysicalDevices\'", group_gpu, group); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + } + } + + // Copy or create everything to fill the new array of physical device groups + for (uint32_t new_idx = 0; new_idx < total_count; new_idx++) { + // Check if this physical device group with the same contents is already in the old buffer + for (uint32_t old_idx = 0; old_idx < inst->phys_dev_group_count_tramp; old_idx++) { + if (local_phys_dev_groups[new_idx].physicalDeviceCount == inst->phys_dev_groups_tramp[old_idx]->physicalDeviceCount) { + bool found_all_gpus = true; + for (uint32_t old_gpu = 0; old_gpu < inst->phys_dev_groups_tramp[old_idx]->physicalDeviceCount; old_gpu++) { + bool found_gpu = false; + for (uint32_t new_gpu = 0; new_gpu < local_phys_dev_groups[new_idx].physicalDeviceCount; new_gpu++) { + if (local_phys_dev_groups[new_idx].physicalDevices[new_gpu] == inst->phys_dev_groups_tramp[old_idx]->physicalDevices[old_gpu]) { + found_gpu = true; + break; + } + } + + if (!found_gpu) { + found_all_gpus = false; + break; + } + } + if (!found_all_gpus) { + continue; + } else { + new_phys_dev_groups[new_idx] = inst->phys_dev_groups_tramp[old_idx]; + break; + } + } + } + + // If this physical device group isn't in the old buffer, create it + if (NULL == new_phys_dev_groups[new_idx]) { + new_phys_dev_groups[new_idx] = (VkPhysicalDeviceGroupPropertiesKHR *)loader_instance_heap_alloc( + inst, sizeof(VkPhysicalDeviceGroupPropertiesKHR), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + if (NULL == new_phys_dev_groups[new_idx]) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "setupLoaderTrampPhysDevGroups: Failed to allocate " + "physical device group trampoline object %d", + new_idx); + total_count = new_idx; + res = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + memcpy(new_phys_dev_groups[new_idx], &local_phys_dev_groups[new_idx], + sizeof(VkPhysicalDeviceGroupPropertiesKHR)); + } + } + +out: + + if (VK_SUCCESS != res) { + if (NULL != new_phys_dev_groups) { + for (uint32_t i = 0; i < total_count; i++) { + loader_instance_heap_free(inst, new_phys_dev_groups[i]); + } + loader_instance_heap_free(inst, new_phys_dev_groups); + } + total_count = 0; + } else { + // Free everything that didn't carry over to the new array of + // physical device groups + if (NULL != inst->phys_dev_groups_tramp) { + for (uint32_t i = 0; i < inst->phys_dev_group_count_tramp; i++) { + bool found = false; + for (uint32_t j = 0; j < total_count; j++) { + if (inst->phys_dev_groups_tramp[i] == new_phys_dev_groups[j]) { + found = true; + break; + } + } + if (!found) { + loader_instance_heap_free(inst, inst->phys_dev_groups_tramp[i]); + } + } + loader_instance_heap_free(inst, inst->phys_dev_groups_tramp); + } + + // Swap in the new physical device group list + inst->phys_dev_group_count_tramp = total_count; + inst->phys_dev_groups_tramp = new_phys_dev_groups; + } + + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties *pPhysicalDeviceGroupProperties) { + VkResult res = VK_SUCCESS; + uint32_t count; + uint32_t i; + struct loader_instance *inst = NULL; + + loader_platform_thread_lock_mutex(&loader_lock); + + inst = loader_get_instance(instance); + if (NULL == inst) { + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + if (NULL == pPhysicalDeviceGroupCount) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "vkEnumeratePhysicalDeviceGroupsKHR: Received NULL pointer for physical " + "device group count return value."); + res = VK_ERROR_INITIALIZATION_FAILED; + goto out; + } + + VkResult setup_res = setupLoaderTrampPhysDevGroups(instance); + if (VK_SUCCESS != setup_res) { + res = setup_res; + goto out; + } + + count = inst->phys_dev_group_count_tramp; + + // Wrap the PhysDev object for loader usage, return wrapped objects + if (NULL != pPhysicalDeviceGroupProperties) { + if (inst->phys_dev_group_count_tramp > *pPhysicalDeviceGroupCount) { + loader_log(inst, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkEnumeratePhysicalDeviceGroupsKHR: Trimming device group count down" + " by application request from %d to %d physical device groups", + inst->phys_dev_group_count_tramp, *pPhysicalDeviceGroupCount); + count = *pPhysicalDeviceGroupCount; + res = VK_INCOMPLETE; + } + for (i = 0; i < count; i++) { + memcpy(&pPhysicalDeviceGroupProperties[i], inst->phys_dev_groups_tramp[i], + sizeof(VkPhysicalDeviceGroupPropertiesKHR)); + } + } + + *pPhysicalDeviceGroupCount = count; + +out: + + loader_platform_thread_unlock_mutex(&loader_lock); + return res; +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2 *pFeatures) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceFeatures2KHR(unwrapped_phys_dev, pFeatures); + } else { + disp->GetPhysicalDeviceFeatures2(unwrapped_phys_dev, pFeatures); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2 *pProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceProperties2KHR(unwrapped_phys_dev, pProperties); + } else { + disp->GetPhysicalDeviceProperties2(unwrapped_phys_dev, pProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format, + VkFormatProperties2 *pFormatProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceFormatProperties2KHR(unwrapped_phys_dev, format, pFormatProperties); + } else { + disp->GetPhysicalDeviceFormatProperties2(unwrapped_phys_dev, format, pFormatProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, + VkImageFormatProperties2 *pImageFormatProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + return disp->GetPhysicalDeviceImageFormatProperties2KHR(unwrapped_phys_dev, pImageFormatInfo, pImageFormatProperties); + } else { + return disp->GetPhysicalDeviceImageFormatProperties2(unwrapped_phys_dev, pImageFormatInfo, pImageFormatProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, + uint32_t *pQueueFamilyPropertyCount, + VkQueueFamilyProperties2 *pQueueFamilyProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceQueueFamilyProperties2KHR(unwrapped_phys_dev, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } else { + disp->GetPhysicalDeviceQueueFamilyProperties2(unwrapped_phys_dev, pQueueFamilyPropertyCount, pQueueFamilyProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2 *pMemoryProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceMemoryProperties2KHR(unwrapped_phys_dev, pMemoryProperties); + } else { + disp->GetPhysicalDeviceMemoryProperties2(unwrapped_phys_dev, pMemoryProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2 *pFormatInfo, uint32_t *pPropertyCount, + VkSparseImageFormatProperties2 *pProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_get_physical_device_properties2) { + disp->GetPhysicalDeviceSparseImageFormatProperties2KHR(unwrapped_phys_dev, pFormatInfo, pPropertyCount, pProperties); + } else { + disp->GetPhysicalDeviceSparseImageFormatProperties2(unwrapped_phys_dev, pFormatInfo, pPropertyCount, pProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo, + VkExternalBufferProperties *pExternalBufferProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_external_memory_capabilities){ + disp->GetPhysicalDeviceExternalBufferPropertiesKHR(unwrapped_phys_dev, pExternalBufferInfo, pExternalBufferProperties); + } else { + disp->GetPhysicalDeviceExternalBufferProperties(unwrapped_phys_dev, pExternalBufferInfo, pExternalBufferProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo, + VkExternalSemaphoreProperties *pExternalSemaphoreProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_external_semaphore_capabilities) { + disp->GetPhysicalDeviceExternalSemaphorePropertiesKHR(unwrapped_phys_dev, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } else { + disp->GetPhysicalDeviceExternalSemaphoreProperties(unwrapped_phys_dev, pExternalSemaphoreInfo, pExternalSemaphoreProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo, + VkExternalFenceProperties *pExternalFenceProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(physicalDevice); + const struct loader_instance *inst = ((struct loader_physical_device_tramp*) physicalDevice)->this_instance; + + if (inst != NULL && inst->enabled_known_extensions.khr_external_fence_capabilities) { + disp->GetPhysicalDeviceExternalFencePropertiesKHR(unwrapped_phys_dev, pExternalFenceInfo, pExternalFenceProperties); + } else { + disp->GetPhysicalDeviceExternalFenceProperties(unwrapped_phys_dev, pExternalFenceInfo, pExternalFenceProperties); + } +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BindBufferMemory2(device, bindInfoCount, pBindInfos); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BindImageMemory2(device, bindInfoCount, pBindInfos); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDeviceMask(commandBuffer, deviceMask); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDispatchBase(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->TrimCommandPool(device, commandPool, flags); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDeviceQueue2(device, pQueueInfo, pQueue); + if (pQueue != NULL && *pQueue != NULL) { + loader_set_dispatch(*pQueue, disp); + } +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateSamplerYcbcrConversion(device, pCreateInfo, pAllocator, pYcbcrConversion); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL +vkCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate(VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void *pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData); +} + +// ---- Vulkan core 1.2 trampolines + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2(VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2(VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdEndRenderPass2(commandBuffer, pSubpassEndInfo); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, + VkBuffer countBuffer, VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, uint32_t stride) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, + VkDeviceSize offset, VkBuffer countBuffer, + VkDeviceSize countBufferOffset, uint32_t maxDrawCount, + uint32_t stride) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t* pValue) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSemaphoreCounterValue(device, semaphore, pValue); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->WaitSemaphores(device, pWaitInfo, timeout); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->SignalSemaphore(device, pSignalInfo); +} + +LOADER_EXPORT VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress(VkDevice device, + const VkBufferDeviceAddressInfo* pInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetBufferDeviceAddress(device, pInfo); +} + +LOADER_EXPORT VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress(VkDevice device, + const VkBufferDeviceAddressInfo* pInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetBufferOpaqueCaptureAddress(device, pInfo); +} + +LOADER_EXPORT VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeviceMemoryOpaqueCaptureAddress(device, pInfo); +} + +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, + uint32_t queryCount) +{ + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->ResetQueryPool(device, queryPool, firstQuery, queryCount); +} diff --git a/src/third_party/vulkan/loader/unknown_ext_chain.c b/src/third_party/vulkan/loader/unknown_ext_chain.c new file mode 100644 index 0000000..1c8560d --- /dev/null +++ b/src/third_party/vulkan/loader/unknown_ext_chain.c @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2017 The Khronos Group Inc. + * Copyright (c) 2017 Valve Corporation + * Copyright (c) 2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author Jon Ashburn + * Author: Lenny Komow + */ + + // This code is used to pass on physical device extensions through the call chain. It must do this without creating a stack frame, + // because the actual parameters of the call are not known. Since the first parameter is known to be a VkPhysicalDevice, it can +// unwrap the physical device, overwriting the wrapped device, and then jump to the next function in the call chain. This code +// attempts to accomplish this by relying on tail-call optimizations, but there is no guarantee that this will work. As a result, +// this code is only compiled on systems where an assembly alternative has not been written. + + #include "vk_loader_platform.h" + #include "loader.h" + + #if defined(__GNUC__) && !defined(__clang__) + #pragma GCC optimize(3) // force gcc to use tail-calls + #endif + + // Trampoline function macro for unknown physical device extension command. + #define PhysDevExtTramp(num) \ + VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTramp##num(VkPhysicalDevice physical_device) { \ + const struct loader_instance_dispatch_table *disp; \ + disp = loader_get_instance_dispatch(physical_device); \ + disp->phys_dev_ext[num](loader_unwrap_physical_device(physical_device)); \ + } + +// Terminator function macro for unknown physical device extension command. +#define PhysDevExtTermin(num) \ + VKAPI_ATTR void VKAPI_CALL vkPhysDevExtTermin##num(VkPhysicalDevice physical_device) { \ + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physical_device; \ + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; \ + struct loader_instance *inst = (struct loader_instance *)icd_term->this_instance; \ + if (NULL == icd_term->phys_dev_ext[num]) { \ + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "Extension %s not supported for this physical device", \ + inst->phys_dev_ext_disp_hash[num].func_name); \ + } \ + icd_term->phys_dev_ext[num](phys_dev_term->phys_dev); \ + } + +// Trampoline function macro for unknown physical device extension command. +#define DevExtTramp(num) \ + VKAPI_ATTR void VKAPI_CALL vkdev_ext##num(VkDevice device) { \ + const struct loader_dev_dispatch_table *disp; \ + disp = loader_get_dev_dispatch(device); \ + disp->ext_dispatch.dev_ext[num](device); \ + } + + +// Instantiations of the trampoline +PhysDevExtTramp(0) +PhysDevExtTramp(1) +PhysDevExtTramp(2) +PhysDevExtTramp(3) +PhysDevExtTramp(4) +PhysDevExtTramp(5) +PhysDevExtTramp(6) +PhysDevExtTramp(7) +PhysDevExtTramp(8) +PhysDevExtTramp(9) +PhysDevExtTramp(10) +PhysDevExtTramp(11) +PhysDevExtTramp(12) +PhysDevExtTramp(13) +PhysDevExtTramp(14) +PhysDevExtTramp(15) +PhysDevExtTramp(16) +PhysDevExtTramp(17) +PhysDevExtTramp(18) +PhysDevExtTramp(19) +PhysDevExtTramp(20) +PhysDevExtTramp(21) +PhysDevExtTramp(22) +PhysDevExtTramp(23) +PhysDevExtTramp(24) +PhysDevExtTramp(25) +PhysDevExtTramp(26) +PhysDevExtTramp(27) +PhysDevExtTramp(28) +PhysDevExtTramp(29) +PhysDevExtTramp(30) +PhysDevExtTramp(31) +PhysDevExtTramp(32) +PhysDevExtTramp(33) +PhysDevExtTramp(34) +PhysDevExtTramp(35) +PhysDevExtTramp(36) +PhysDevExtTramp(37) +PhysDevExtTramp(38) +PhysDevExtTramp(39) +PhysDevExtTramp(40) +PhysDevExtTramp(41) +PhysDevExtTramp(42) +PhysDevExtTramp(43) +PhysDevExtTramp(44) +PhysDevExtTramp(45) +PhysDevExtTramp(46) +PhysDevExtTramp(47) +PhysDevExtTramp(48) +PhysDevExtTramp(49) +PhysDevExtTramp(50) +PhysDevExtTramp(51) +PhysDevExtTramp(52) +PhysDevExtTramp(53) +PhysDevExtTramp(54) +PhysDevExtTramp(55) +PhysDevExtTramp(56) +PhysDevExtTramp(57) +PhysDevExtTramp(58) +PhysDevExtTramp(59) +PhysDevExtTramp(60) +PhysDevExtTramp(61) +PhysDevExtTramp(62) +PhysDevExtTramp(63) +PhysDevExtTramp(64) +PhysDevExtTramp(65) +PhysDevExtTramp(66) +PhysDevExtTramp(67) +PhysDevExtTramp(68) +PhysDevExtTramp(69) +PhysDevExtTramp(70) +PhysDevExtTramp(71) +PhysDevExtTramp(72) +PhysDevExtTramp(73) +PhysDevExtTramp(74) +PhysDevExtTramp(75) +PhysDevExtTramp(76) +PhysDevExtTramp(77) +PhysDevExtTramp(78) +PhysDevExtTramp(79) +PhysDevExtTramp(80) +PhysDevExtTramp(81) +PhysDevExtTramp(82) +PhysDevExtTramp(83) +PhysDevExtTramp(84) +PhysDevExtTramp(85) +PhysDevExtTramp(86) +PhysDevExtTramp(87) +PhysDevExtTramp(88) +PhysDevExtTramp(89) +PhysDevExtTramp(90) +PhysDevExtTramp(91) +PhysDevExtTramp(92) +PhysDevExtTramp(93) +PhysDevExtTramp(94) +PhysDevExtTramp(95) +PhysDevExtTramp(96) +PhysDevExtTramp(97) +PhysDevExtTramp(98) +PhysDevExtTramp(99) +PhysDevExtTramp(100) +PhysDevExtTramp(101) +PhysDevExtTramp(102) +PhysDevExtTramp(103) +PhysDevExtTramp(104) +PhysDevExtTramp(105) +PhysDevExtTramp(106) +PhysDevExtTramp(107) +PhysDevExtTramp(108) +PhysDevExtTramp(109) +PhysDevExtTramp(110) +PhysDevExtTramp(111) +PhysDevExtTramp(112) +PhysDevExtTramp(113) +PhysDevExtTramp(114) +PhysDevExtTramp(115) +PhysDevExtTramp(116) +PhysDevExtTramp(117) +PhysDevExtTramp(118) +PhysDevExtTramp(119) +PhysDevExtTramp(120) +PhysDevExtTramp(121) +PhysDevExtTramp(122) +PhysDevExtTramp(123) +PhysDevExtTramp(124) +PhysDevExtTramp(125) +PhysDevExtTramp(126) +PhysDevExtTramp(127) +PhysDevExtTramp(128) +PhysDevExtTramp(129) +PhysDevExtTramp(130) +PhysDevExtTramp(131) +PhysDevExtTramp(132) +PhysDevExtTramp(133) +PhysDevExtTramp(134) +PhysDevExtTramp(135) +PhysDevExtTramp(136) +PhysDevExtTramp(137) +PhysDevExtTramp(138) +PhysDevExtTramp(139) +PhysDevExtTramp(140) +PhysDevExtTramp(141) +PhysDevExtTramp(142) +PhysDevExtTramp(143) +PhysDevExtTramp(144) +PhysDevExtTramp(145) +PhysDevExtTramp(146) +PhysDevExtTramp(147) +PhysDevExtTramp(148) +PhysDevExtTramp(149) +PhysDevExtTramp(150) +PhysDevExtTramp(151) +PhysDevExtTramp(152) +PhysDevExtTramp(153) +PhysDevExtTramp(154) +PhysDevExtTramp(155) +PhysDevExtTramp(156) +PhysDevExtTramp(157) +PhysDevExtTramp(158) +PhysDevExtTramp(159) +PhysDevExtTramp(160) +PhysDevExtTramp(161) +PhysDevExtTramp(162) +PhysDevExtTramp(163) +PhysDevExtTramp(164) +PhysDevExtTramp(165) +PhysDevExtTramp(166) +PhysDevExtTramp(167) +PhysDevExtTramp(168) +PhysDevExtTramp(169) +PhysDevExtTramp(170) +PhysDevExtTramp(171) +PhysDevExtTramp(172) +PhysDevExtTramp(173) +PhysDevExtTramp(174) +PhysDevExtTramp(175) +PhysDevExtTramp(176) +PhysDevExtTramp(177) +PhysDevExtTramp(178) +PhysDevExtTramp(179) +PhysDevExtTramp(180) +PhysDevExtTramp(181) +PhysDevExtTramp(182) +PhysDevExtTramp(183) +PhysDevExtTramp(184) +PhysDevExtTramp(185) +PhysDevExtTramp(186) +PhysDevExtTramp(187) +PhysDevExtTramp(188) +PhysDevExtTramp(189) +PhysDevExtTramp(190) +PhysDevExtTramp(191) +PhysDevExtTramp(192) +PhysDevExtTramp(193) +PhysDevExtTramp(194) +PhysDevExtTramp(195) +PhysDevExtTramp(196) +PhysDevExtTramp(197) +PhysDevExtTramp(198) +PhysDevExtTramp(199) +PhysDevExtTramp(200) +PhysDevExtTramp(201) +PhysDevExtTramp(202) +PhysDevExtTramp(203) +PhysDevExtTramp(204) +PhysDevExtTramp(205) +PhysDevExtTramp(206) +PhysDevExtTramp(207) +PhysDevExtTramp(208) +PhysDevExtTramp(209) +PhysDevExtTramp(210) +PhysDevExtTramp(211) +PhysDevExtTramp(212) +PhysDevExtTramp(213) +PhysDevExtTramp(214) +PhysDevExtTramp(215) +PhysDevExtTramp(216) +PhysDevExtTramp(217) +PhysDevExtTramp(218) +PhysDevExtTramp(219) +PhysDevExtTramp(220) +PhysDevExtTramp(221) +PhysDevExtTramp(222) +PhysDevExtTramp(223) +PhysDevExtTramp(224) +PhysDevExtTramp(225) +PhysDevExtTramp(226) +PhysDevExtTramp(227) +PhysDevExtTramp(228) +PhysDevExtTramp(229) +PhysDevExtTramp(230) +PhysDevExtTramp(231) +PhysDevExtTramp(232) +PhysDevExtTramp(233) +PhysDevExtTramp(234) +PhysDevExtTramp(235) +PhysDevExtTramp(236) +PhysDevExtTramp(237) +PhysDevExtTramp(238) +PhysDevExtTramp(239) +PhysDevExtTramp(240) +PhysDevExtTramp(241) +PhysDevExtTramp(242) +PhysDevExtTramp(243) +PhysDevExtTramp(244) +PhysDevExtTramp(245) +PhysDevExtTramp(246) +PhysDevExtTramp(247) +PhysDevExtTramp(248) +PhysDevExtTramp(249) + +// Instantiations of the terminator +PhysDevExtTermin(0) +PhysDevExtTermin(1) +PhysDevExtTermin(2) +PhysDevExtTermin(3) +PhysDevExtTermin(4) +PhysDevExtTermin(5) +PhysDevExtTermin(6) +PhysDevExtTermin(7) +PhysDevExtTermin(8) +PhysDevExtTermin(9) +PhysDevExtTermin(10) +PhysDevExtTermin(11) +PhysDevExtTermin(12) +PhysDevExtTermin(13) +PhysDevExtTermin(14) +PhysDevExtTermin(15) +PhysDevExtTermin(16) +PhysDevExtTermin(17) +PhysDevExtTermin(18) +PhysDevExtTermin(19) +PhysDevExtTermin(20) +PhysDevExtTermin(21) +PhysDevExtTermin(22) +PhysDevExtTermin(23) +PhysDevExtTermin(24) +PhysDevExtTermin(25) +PhysDevExtTermin(26) +PhysDevExtTermin(27) +PhysDevExtTermin(28) +PhysDevExtTermin(29) +PhysDevExtTermin(30) +PhysDevExtTermin(31) +PhysDevExtTermin(32) +PhysDevExtTermin(33) +PhysDevExtTermin(34) +PhysDevExtTermin(35) +PhysDevExtTermin(36) +PhysDevExtTermin(37) +PhysDevExtTermin(38) +PhysDevExtTermin(39) +PhysDevExtTermin(40) +PhysDevExtTermin(41) +PhysDevExtTermin(42) +PhysDevExtTermin(43) +PhysDevExtTermin(44) +PhysDevExtTermin(45) +PhysDevExtTermin(46) +PhysDevExtTermin(47) +PhysDevExtTermin(48) +PhysDevExtTermin(49) +PhysDevExtTermin(50) +PhysDevExtTermin(51) +PhysDevExtTermin(52) +PhysDevExtTermin(53) +PhysDevExtTermin(54) +PhysDevExtTermin(55) +PhysDevExtTermin(56) +PhysDevExtTermin(57) +PhysDevExtTermin(58) +PhysDevExtTermin(59) +PhysDevExtTermin(60) +PhysDevExtTermin(61) +PhysDevExtTermin(62) +PhysDevExtTermin(63) +PhysDevExtTermin(64) +PhysDevExtTermin(65) +PhysDevExtTermin(66) +PhysDevExtTermin(67) +PhysDevExtTermin(68) +PhysDevExtTermin(69) +PhysDevExtTermin(70) +PhysDevExtTermin(71) +PhysDevExtTermin(72) +PhysDevExtTermin(73) +PhysDevExtTermin(74) +PhysDevExtTermin(75) +PhysDevExtTermin(76) +PhysDevExtTermin(77) +PhysDevExtTermin(78) +PhysDevExtTermin(79) +PhysDevExtTermin(80) +PhysDevExtTermin(81) +PhysDevExtTermin(82) +PhysDevExtTermin(83) +PhysDevExtTermin(84) +PhysDevExtTermin(85) +PhysDevExtTermin(86) +PhysDevExtTermin(87) +PhysDevExtTermin(88) +PhysDevExtTermin(89) +PhysDevExtTermin(90) +PhysDevExtTermin(91) +PhysDevExtTermin(92) +PhysDevExtTermin(93) +PhysDevExtTermin(94) +PhysDevExtTermin(95) +PhysDevExtTermin(96) +PhysDevExtTermin(97) +PhysDevExtTermin(98) +PhysDevExtTermin(99) +PhysDevExtTermin(100) +PhysDevExtTermin(101) +PhysDevExtTermin(102) +PhysDevExtTermin(103) +PhysDevExtTermin(104) +PhysDevExtTermin(105) +PhysDevExtTermin(106) +PhysDevExtTermin(107) +PhysDevExtTermin(108) +PhysDevExtTermin(109) +PhysDevExtTermin(110) +PhysDevExtTermin(111) +PhysDevExtTermin(112) +PhysDevExtTermin(113) +PhysDevExtTermin(114) +PhysDevExtTermin(115) +PhysDevExtTermin(116) +PhysDevExtTermin(117) +PhysDevExtTermin(118) +PhysDevExtTermin(119) +PhysDevExtTermin(120) +PhysDevExtTermin(121) +PhysDevExtTermin(122) +PhysDevExtTermin(123) +PhysDevExtTermin(124) +PhysDevExtTermin(125) +PhysDevExtTermin(126) +PhysDevExtTermin(127) +PhysDevExtTermin(128) +PhysDevExtTermin(129) +PhysDevExtTermin(130) +PhysDevExtTermin(131) +PhysDevExtTermin(132) +PhysDevExtTermin(133) +PhysDevExtTermin(134) +PhysDevExtTermin(135) +PhysDevExtTermin(136) +PhysDevExtTermin(137) +PhysDevExtTermin(138) +PhysDevExtTermin(139) +PhysDevExtTermin(140) +PhysDevExtTermin(141) +PhysDevExtTermin(142) +PhysDevExtTermin(143) +PhysDevExtTermin(144) +PhysDevExtTermin(145) +PhysDevExtTermin(146) +PhysDevExtTermin(147) +PhysDevExtTermin(148) +PhysDevExtTermin(149) +PhysDevExtTermin(150) +PhysDevExtTermin(151) +PhysDevExtTermin(152) +PhysDevExtTermin(153) +PhysDevExtTermin(154) +PhysDevExtTermin(155) +PhysDevExtTermin(156) +PhysDevExtTermin(157) +PhysDevExtTermin(158) +PhysDevExtTermin(159) +PhysDevExtTermin(160) +PhysDevExtTermin(161) +PhysDevExtTermin(162) +PhysDevExtTermin(163) +PhysDevExtTermin(164) +PhysDevExtTermin(165) +PhysDevExtTermin(166) +PhysDevExtTermin(167) +PhysDevExtTermin(168) +PhysDevExtTermin(169) +PhysDevExtTermin(170) +PhysDevExtTermin(171) +PhysDevExtTermin(172) +PhysDevExtTermin(173) +PhysDevExtTermin(174) +PhysDevExtTermin(175) +PhysDevExtTermin(176) +PhysDevExtTermin(177) +PhysDevExtTermin(178) +PhysDevExtTermin(179) +PhysDevExtTermin(180) +PhysDevExtTermin(181) +PhysDevExtTermin(182) +PhysDevExtTermin(183) +PhysDevExtTermin(184) +PhysDevExtTermin(185) +PhysDevExtTermin(186) +PhysDevExtTermin(187) +PhysDevExtTermin(188) +PhysDevExtTermin(189) +PhysDevExtTermin(190) +PhysDevExtTermin(191) +PhysDevExtTermin(192) +PhysDevExtTermin(193) +PhysDevExtTermin(194) +PhysDevExtTermin(195) +PhysDevExtTermin(196) +PhysDevExtTermin(197) +PhysDevExtTermin(198) +PhysDevExtTermin(199) +PhysDevExtTermin(200) +PhysDevExtTermin(201) +PhysDevExtTermin(202) +PhysDevExtTermin(203) +PhysDevExtTermin(204) +PhysDevExtTermin(205) +PhysDevExtTermin(206) +PhysDevExtTermin(207) +PhysDevExtTermin(208) +PhysDevExtTermin(209) +PhysDevExtTermin(210) +PhysDevExtTermin(211) +PhysDevExtTermin(212) +PhysDevExtTermin(213) +PhysDevExtTermin(214) +PhysDevExtTermin(215) +PhysDevExtTermin(216) +PhysDevExtTermin(217) +PhysDevExtTermin(218) +PhysDevExtTermin(219) +PhysDevExtTermin(220) +PhysDevExtTermin(221) +PhysDevExtTermin(222) +PhysDevExtTermin(223) +PhysDevExtTermin(224) +PhysDevExtTermin(225) +PhysDevExtTermin(226) +PhysDevExtTermin(227) +PhysDevExtTermin(228) +PhysDevExtTermin(229) +PhysDevExtTermin(230) +PhysDevExtTermin(231) +PhysDevExtTermin(232) +PhysDevExtTermin(233) +PhysDevExtTermin(234) +PhysDevExtTermin(235) +PhysDevExtTermin(236) +PhysDevExtTermin(237) +PhysDevExtTermin(238) +PhysDevExtTermin(239) +PhysDevExtTermin(240) +PhysDevExtTermin(241) +PhysDevExtTermin(242) +PhysDevExtTermin(243) +PhysDevExtTermin(244) +PhysDevExtTermin(245) +PhysDevExtTermin(246) +PhysDevExtTermin(247) +PhysDevExtTermin(248) +PhysDevExtTermin(249) + +// Instantiations of the device trampoline +DevExtTramp(0) +DevExtTramp(1) +DevExtTramp(2) +DevExtTramp(3) +DevExtTramp(4) +DevExtTramp(5) +DevExtTramp(6) +DevExtTramp(7) +DevExtTramp(8) +DevExtTramp(9) +DevExtTramp(10) +DevExtTramp(11) +DevExtTramp(12) +DevExtTramp(13) +DevExtTramp(14) +DevExtTramp(15) +DevExtTramp(16) +DevExtTramp(17) +DevExtTramp(18) +DevExtTramp(19) +DevExtTramp(20) +DevExtTramp(21) +DevExtTramp(22) +DevExtTramp(23) +DevExtTramp(24) +DevExtTramp(25) +DevExtTramp(26) +DevExtTramp(27) +DevExtTramp(28) +DevExtTramp(29) +DevExtTramp(30) +DevExtTramp(31) +DevExtTramp(32) +DevExtTramp(33) +DevExtTramp(34) +DevExtTramp(35) +DevExtTramp(36) +DevExtTramp(37) +DevExtTramp(38) +DevExtTramp(39) +DevExtTramp(40) +DevExtTramp(41) +DevExtTramp(42) +DevExtTramp(43) +DevExtTramp(44) +DevExtTramp(45) +DevExtTramp(46) +DevExtTramp(47) +DevExtTramp(48) +DevExtTramp(49) +DevExtTramp(50) +DevExtTramp(51) +DevExtTramp(52) +DevExtTramp(53) +DevExtTramp(54) +DevExtTramp(55) +DevExtTramp(56) +DevExtTramp(57) +DevExtTramp(58) +DevExtTramp(59) +DevExtTramp(60) +DevExtTramp(61) +DevExtTramp(62) +DevExtTramp(63) +DevExtTramp(64) +DevExtTramp(65) +DevExtTramp(66) +DevExtTramp(67) +DevExtTramp(68) +DevExtTramp(69) +DevExtTramp(70) +DevExtTramp(71) +DevExtTramp(72) +DevExtTramp(73) +DevExtTramp(74) +DevExtTramp(75) +DevExtTramp(76) +DevExtTramp(77) +DevExtTramp(78) +DevExtTramp(79) +DevExtTramp(80) +DevExtTramp(81) +DevExtTramp(82) +DevExtTramp(83) +DevExtTramp(84) +DevExtTramp(85) +DevExtTramp(86) +DevExtTramp(87) +DevExtTramp(88) +DevExtTramp(89) +DevExtTramp(90) +DevExtTramp(91) +DevExtTramp(92) +DevExtTramp(93) +DevExtTramp(94) +DevExtTramp(95) +DevExtTramp(96) +DevExtTramp(97) +DevExtTramp(98) +DevExtTramp(99) +DevExtTramp(100) +DevExtTramp(101) +DevExtTramp(102) +DevExtTramp(103) +DevExtTramp(104) +DevExtTramp(105) +DevExtTramp(106) +DevExtTramp(107) +DevExtTramp(108) +DevExtTramp(109) +DevExtTramp(110) +DevExtTramp(111) +DevExtTramp(112) +DevExtTramp(113) +DevExtTramp(114) +DevExtTramp(115) +DevExtTramp(116) +DevExtTramp(117) +DevExtTramp(118) +DevExtTramp(119) +DevExtTramp(120) +DevExtTramp(121) +DevExtTramp(122) +DevExtTramp(123) +DevExtTramp(124) +DevExtTramp(125) +DevExtTramp(126) +DevExtTramp(127) +DevExtTramp(128) +DevExtTramp(129) +DevExtTramp(130) +DevExtTramp(131) +DevExtTramp(132) +DevExtTramp(133) +DevExtTramp(134) +DevExtTramp(135) +DevExtTramp(136) +DevExtTramp(137) +DevExtTramp(138) +DevExtTramp(139) +DevExtTramp(140) +DevExtTramp(141) +DevExtTramp(142) +DevExtTramp(143) +DevExtTramp(144) +DevExtTramp(145) +DevExtTramp(146) +DevExtTramp(147) +DevExtTramp(148) +DevExtTramp(149) +DevExtTramp(150) +DevExtTramp(151) +DevExtTramp(152) +DevExtTramp(153) +DevExtTramp(154) +DevExtTramp(155) +DevExtTramp(156) +DevExtTramp(157) +DevExtTramp(158) +DevExtTramp(159) +DevExtTramp(160) +DevExtTramp(161) +DevExtTramp(162) +DevExtTramp(163) +DevExtTramp(164) +DevExtTramp(165) +DevExtTramp(166) +DevExtTramp(167) +DevExtTramp(168) +DevExtTramp(169) +DevExtTramp(170) +DevExtTramp(171) +DevExtTramp(172) +DevExtTramp(173) +DevExtTramp(174) +DevExtTramp(175) +DevExtTramp(176) +DevExtTramp(177) +DevExtTramp(178) +DevExtTramp(179) +DevExtTramp(180) +DevExtTramp(181) +DevExtTramp(182) +DevExtTramp(183) +DevExtTramp(184) +DevExtTramp(185) +DevExtTramp(186) +DevExtTramp(187) +DevExtTramp(188) +DevExtTramp(189) +DevExtTramp(190) +DevExtTramp(191) +DevExtTramp(192) +DevExtTramp(193) +DevExtTramp(194) +DevExtTramp(195) +DevExtTramp(196) +DevExtTramp(197) +DevExtTramp(198) +DevExtTramp(199) +DevExtTramp(200) +DevExtTramp(201) +DevExtTramp(202) +DevExtTramp(203) +DevExtTramp(204) +DevExtTramp(205) +DevExtTramp(206) +DevExtTramp(207) +DevExtTramp(208) +DevExtTramp(209) +DevExtTramp(210) +DevExtTramp(211) +DevExtTramp(212) +DevExtTramp(213) +DevExtTramp(214) +DevExtTramp(215) +DevExtTramp(216) +DevExtTramp(217) +DevExtTramp(218) +DevExtTramp(219) +DevExtTramp(220) +DevExtTramp(221) +DevExtTramp(222) +DevExtTramp(223) +DevExtTramp(224) +DevExtTramp(225) +DevExtTramp(226) +DevExtTramp(227) +DevExtTramp(228) +DevExtTramp(229) +DevExtTramp(230) +DevExtTramp(231) +DevExtTramp(232) +DevExtTramp(233) +DevExtTramp(234) +DevExtTramp(235) +DevExtTramp(236) +DevExtTramp(237) +DevExtTramp(238) +DevExtTramp(239) +DevExtTramp(240) +DevExtTramp(241) +DevExtTramp(242) +DevExtTramp(243) +DevExtTramp(244) +DevExtTramp(245) +DevExtTramp(246) +DevExtTramp(247) +DevExtTramp(248) +DevExtTramp(249) diff --git a/src/third_party/vulkan/loader/vk_dispatch_table_helper.h b/src/third_party/vulkan/loader/vk_dispatch_table_helper.h new file mode 100644 index 0000000..819a0e7 --- /dev/null +++ b/src/third_party/vulkan/loader/vk_dispatch_table_helper.h @@ -0,0 +1,951 @@ +#pragma once +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See dispatch_helper_generator.py for modifications + +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Courtney Goeltzenleuchter + * Author: Jon Ashburn + * Author: Mark Lobodzinski + */ + +#include "../vulkan.h" +#include "../vk_layer.h" +#include +#include "vk_layer_dispatch_table.h" + +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeviceGroupPresentCapabilitiesKHR(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubGetDeviceGroupPeerMemoryFeaturesKHR(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) { }; +static VKAPI_ATTR void VKAPI_CALL StubTrimCommandPoolKHR(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags) { }; +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryWin32HandleKHR(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryWin32HandlePropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryFdPropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties) { return VK_SUCCESS; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR void VKAPI_CALL StubUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSwapchainStatusKHR(VkDevice device, VkSwapchainKHR swapchain) { return VK_SUCCESS; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubAcquireProfilingLockKHR(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubReleaseProfilingLockKHR(VkDevice device) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubGetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t* pValue) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { }; +static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; }; +static VKAPI_ATTR uint64_t VKAPI_CALL StubGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; }; +static VKAPI_ATTR uint64_t VKAPI_CALL StubGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) { return 0L; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateDeferredOperationKHR(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyDeferredOperationKHR(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR uint32_t VKAPI_CALL StubGetDeferredOperationMaxConcurrencyKHR(VkDevice device, VkDeferredOperationKHR operation) { return 0; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeferredOperationResultKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubDeferredOperationJoinKHR(VkDevice device, VkDeferredOperationKHR operation) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetPipelineExecutableInternalRepresentationsKHR(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdResolveImage2KHR(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubDebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubDebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride) { }; +static VKAPI_ATTR uint32_t VKAPI_CALL StubGetImageViewHandleNVX(VkDevice device, const VkImageViewHandleInfoNVX* pInfo) { return 0; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetImageViewAddressNVX(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetShaderInfoAMD(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo) { return VK_SUCCESS; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryWin32HandleNV(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR void VKAPI_CALL StubCmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubDisplayPowerControlEXT(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubRegisterDeviceEventEXT(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubRegisterDisplayEventEXT(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetSwapchainCounterEXT(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetRefreshCycleDurationGOOGLE(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetPastPresentationTimingGOOGLE(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles) { }; +static VKAPI_ATTR void VKAPI_CALL StubSetHdrMetadataEXT(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata) { }; +#ifdef VK_USE_PLATFORM_ANDROID_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_ANDROID_KHR +static VKAPI_ATTR void VKAPI_CALL StubCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetImageDrmFormatModifierPropertiesEXT(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureMemoryRequirementsNV(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingShaderGroupHandlesNV(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCompileDeferredNV(VkDevice device, VkPipeline pipeline, uint32_t shader) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetMemoryHostPointerPropertiesEXT(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetCalibratedTimestampsEXT(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void* pCheckpointMarker) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetQueueCheckpointDataNV(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubInitializePerformanceApiINTEL(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubUninitializePerformanceApiINTEL(VkDevice device) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCmdSetPerformanceMarkerINTEL(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCmdSetPerformanceStreamMarkerINTEL(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCmdSetPerformanceOverrideINTEL(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubAcquirePerformanceConfigurationINTEL(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubReleasePerformanceConfigurationINTEL(VkDevice device, VkPerformanceConfigurationINTEL configuration) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubQueueSetPerformanceConfigurationINTEL(VkQueue queue, VkPerformanceConfigurationINTEL configuration) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetPerformanceParameterINTEL(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubSetLocalDimmingAMD(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable) { }; +static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo* pInfo) { return 0L; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubAcquireFullScreenExclusiveModeEXT(VkDevice device, VkSwapchainKHR swapchain) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubReleaseFullScreenExclusiveModeEXT(VkDevice device, VkSwapchainKHR swapchain) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR VkResult VKAPI_CALL StubGetDeviceGroupSurfacePresentModes2EXT(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes) { return VK_SUCCESS; }; +#endif // VK_USE_PLATFORM_WIN32_KHR +static VKAPI_ATTR void VKAPI_CALL StubCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) { }; +static VKAPI_ATTR void VKAPI_CALL StubResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetGeneratedCommandsMemoryRequirementsNV(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdPreprocessGeneratedCommandsNV(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdExecuteGeneratedCommandsNV(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBindPipelineShaderGroupNV(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateIndirectCommandsLayoutNV(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyIndirectCommandsLayoutNV(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreatePrivateDataSlotEXT(VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyPrivateDataSlotEXT(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubSetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubGetPrivateDataEXT(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetFragmentShadingRateEnumNV(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubBuildAccelerationStructuresKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyAccelerationStructureToMemoryKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCopyMemoryToAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyAccelerationStructureToMemoryKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdCopyMemoryToAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { }; +static VKAPI_ATTR VkDeviceAddress VKAPI_CALL StubGetAccelerationStructureDeviceAddressKHR(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { return 0L; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetDeviceAccelerationStructureCompatibilityKHR(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility) { }; +static VKAPI_ATTR void VKAPI_CALL StubGetAccelerationStructureBuildSizesKHR(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) { }; +static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth) { }; +static VKAPI_ATTR VkResult VKAPI_CALL StubCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return VK_SUCCESS; }; +static VKAPI_ATTR VkResult VKAPI_CALL StubGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData) { return VK_SUCCESS; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdTraceRaysIndirectKHR(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress) { }; +static VKAPI_ATTR VkDeviceSize VKAPI_CALL StubGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) { return 0L; }; +static VKAPI_ATTR void VKAPI_CALL StubCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) { }; + + + +static inline void layer_init_device_dispatch_table(VkDevice device, VkLayerDispatchTable *table, PFN_vkGetDeviceProcAddr gpa) { + memset(table, 0, sizeof(*table)); + // Device function pointers + table->GetDeviceProcAddr = gpa; + table->DestroyDevice = (PFN_vkDestroyDevice) gpa(device, "vkDestroyDevice"); + table->GetDeviceQueue = (PFN_vkGetDeviceQueue) gpa(device, "vkGetDeviceQueue"); + table->QueueSubmit = (PFN_vkQueueSubmit) gpa(device, "vkQueueSubmit"); + table->QueueWaitIdle = (PFN_vkQueueWaitIdle) gpa(device, "vkQueueWaitIdle"); + table->DeviceWaitIdle = (PFN_vkDeviceWaitIdle) gpa(device, "vkDeviceWaitIdle"); + table->AllocateMemory = (PFN_vkAllocateMemory) gpa(device, "vkAllocateMemory"); + table->FreeMemory = (PFN_vkFreeMemory) gpa(device, "vkFreeMemory"); + table->MapMemory = (PFN_vkMapMemory) gpa(device, "vkMapMemory"); + table->UnmapMemory = (PFN_vkUnmapMemory) gpa(device, "vkUnmapMemory"); + table->FlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges) gpa(device, "vkFlushMappedMemoryRanges"); + table->InvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges) gpa(device, "vkInvalidateMappedMemoryRanges"); + table->GetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment) gpa(device, "vkGetDeviceMemoryCommitment"); + table->BindBufferMemory = (PFN_vkBindBufferMemory) gpa(device, "vkBindBufferMemory"); + table->BindImageMemory = (PFN_vkBindImageMemory) gpa(device, "vkBindImageMemory"); + table->GetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements) gpa(device, "vkGetBufferMemoryRequirements"); + table->GetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements) gpa(device, "vkGetImageMemoryRequirements"); + table->GetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements) gpa(device, "vkGetImageSparseMemoryRequirements"); + table->QueueBindSparse = (PFN_vkQueueBindSparse) gpa(device, "vkQueueBindSparse"); + table->CreateFence = (PFN_vkCreateFence) gpa(device, "vkCreateFence"); + table->DestroyFence = (PFN_vkDestroyFence) gpa(device, "vkDestroyFence"); + table->ResetFences = (PFN_vkResetFences) gpa(device, "vkResetFences"); + table->GetFenceStatus = (PFN_vkGetFenceStatus) gpa(device, "vkGetFenceStatus"); + table->WaitForFences = (PFN_vkWaitForFences) gpa(device, "vkWaitForFences"); + table->CreateSemaphore = (PFN_vkCreateSemaphore) gpa(device, "vkCreateSemaphore"); + table->DestroySemaphore = (PFN_vkDestroySemaphore) gpa(device, "vkDestroySemaphore"); + table->CreateEvent = (PFN_vkCreateEvent) gpa(device, "vkCreateEvent"); + table->DestroyEvent = (PFN_vkDestroyEvent) gpa(device, "vkDestroyEvent"); + table->GetEventStatus = (PFN_vkGetEventStatus) gpa(device, "vkGetEventStatus"); + table->SetEvent = (PFN_vkSetEvent) gpa(device, "vkSetEvent"); + table->ResetEvent = (PFN_vkResetEvent) gpa(device, "vkResetEvent"); + table->CreateQueryPool = (PFN_vkCreateQueryPool) gpa(device, "vkCreateQueryPool"); + table->DestroyQueryPool = (PFN_vkDestroyQueryPool) gpa(device, "vkDestroyQueryPool"); + table->GetQueryPoolResults = (PFN_vkGetQueryPoolResults) gpa(device, "vkGetQueryPoolResults"); + table->CreateBuffer = (PFN_vkCreateBuffer) gpa(device, "vkCreateBuffer"); + table->DestroyBuffer = (PFN_vkDestroyBuffer) gpa(device, "vkDestroyBuffer"); + table->CreateBufferView = (PFN_vkCreateBufferView) gpa(device, "vkCreateBufferView"); + table->DestroyBufferView = (PFN_vkDestroyBufferView) gpa(device, "vkDestroyBufferView"); + table->CreateImage = (PFN_vkCreateImage) gpa(device, "vkCreateImage"); + table->DestroyImage = (PFN_vkDestroyImage) gpa(device, "vkDestroyImage"); + table->GetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout) gpa(device, "vkGetImageSubresourceLayout"); + table->CreateImageView = (PFN_vkCreateImageView) gpa(device, "vkCreateImageView"); + table->DestroyImageView = (PFN_vkDestroyImageView) gpa(device, "vkDestroyImageView"); + table->CreateShaderModule = (PFN_vkCreateShaderModule) gpa(device, "vkCreateShaderModule"); + table->DestroyShaderModule = (PFN_vkDestroyShaderModule) gpa(device, "vkDestroyShaderModule"); + table->CreatePipelineCache = (PFN_vkCreatePipelineCache) gpa(device, "vkCreatePipelineCache"); + table->DestroyPipelineCache = (PFN_vkDestroyPipelineCache) gpa(device, "vkDestroyPipelineCache"); + table->GetPipelineCacheData = (PFN_vkGetPipelineCacheData) gpa(device, "vkGetPipelineCacheData"); + table->MergePipelineCaches = (PFN_vkMergePipelineCaches) gpa(device, "vkMergePipelineCaches"); + table->CreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines) gpa(device, "vkCreateGraphicsPipelines"); + table->CreateComputePipelines = (PFN_vkCreateComputePipelines) gpa(device, "vkCreateComputePipelines"); + table->DestroyPipeline = (PFN_vkDestroyPipeline) gpa(device, "vkDestroyPipeline"); + table->CreatePipelineLayout = (PFN_vkCreatePipelineLayout) gpa(device, "vkCreatePipelineLayout"); + table->DestroyPipelineLayout = (PFN_vkDestroyPipelineLayout) gpa(device, "vkDestroyPipelineLayout"); + table->CreateSampler = (PFN_vkCreateSampler) gpa(device, "vkCreateSampler"); + table->DestroySampler = (PFN_vkDestroySampler) gpa(device, "vkDestroySampler"); + table->CreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout) gpa(device, "vkCreateDescriptorSetLayout"); + table->DestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout) gpa(device, "vkDestroyDescriptorSetLayout"); + table->CreateDescriptorPool = (PFN_vkCreateDescriptorPool) gpa(device, "vkCreateDescriptorPool"); + table->DestroyDescriptorPool = (PFN_vkDestroyDescriptorPool) gpa(device, "vkDestroyDescriptorPool"); + table->ResetDescriptorPool = (PFN_vkResetDescriptorPool) gpa(device, "vkResetDescriptorPool"); + table->AllocateDescriptorSets = (PFN_vkAllocateDescriptorSets) gpa(device, "vkAllocateDescriptorSets"); + table->FreeDescriptorSets = (PFN_vkFreeDescriptorSets) gpa(device, "vkFreeDescriptorSets"); + table->UpdateDescriptorSets = (PFN_vkUpdateDescriptorSets) gpa(device, "vkUpdateDescriptorSets"); + table->CreateFramebuffer = (PFN_vkCreateFramebuffer) gpa(device, "vkCreateFramebuffer"); + table->DestroyFramebuffer = (PFN_vkDestroyFramebuffer) gpa(device, "vkDestroyFramebuffer"); + table->CreateRenderPass = (PFN_vkCreateRenderPass) gpa(device, "vkCreateRenderPass"); + table->DestroyRenderPass = (PFN_vkDestroyRenderPass) gpa(device, "vkDestroyRenderPass"); + table->GetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity) gpa(device, "vkGetRenderAreaGranularity"); + table->CreateCommandPool = (PFN_vkCreateCommandPool) gpa(device, "vkCreateCommandPool"); + table->DestroyCommandPool = (PFN_vkDestroyCommandPool) gpa(device, "vkDestroyCommandPool"); + table->ResetCommandPool = (PFN_vkResetCommandPool) gpa(device, "vkResetCommandPool"); + table->AllocateCommandBuffers = (PFN_vkAllocateCommandBuffers) gpa(device, "vkAllocateCommandBuffers"); + table->FreeCommandBuffers = (PFN_vkFreeCommandBuffers) gpa(device, "vkFreeCommandBuffers"); + table->BeginCommandBuffer = (PFN_vkBeginCommandBuffer) gpa(device, "vkBeginCommandBuffer"); + table->EndCommandBuffer = (PFN_vkEndCommandBuffer) gpa(device, "vkEndCommandBuffer"); + table->ResetCommandBuffer = (PFN_vkResetCommandBuffer) gpa(device, "vkResetCommandBuffer"); + table->CmdBindPipeline = (PFN_vkCmdBindPipeline) gpa(device, "vkCmdBindPipeline"); + table->CmdSetViewport = (PFN_vkCmdSetViewport) gpa(device, "vkCmdSetViewport"); + table->CmdSetScissor = (PFN_vkCmdSetScissor) gpa(device, "vkCmdSetScissor"); + table->CmdSetLineWidth = (PFN_vkCmdSetLineWidth) gpa(device, "vkCmdSetLineWidth"); + table->CmdSetDepthBias = (PFN_vkCmdSetDepthBias) gpa(device, "vkCmdSetDepthBias"); + table->CmdSetBlendConstants = (PFN_vkCmdSetBlendConstants) gpa(device, "vkCmdSetBlendConstants"); + table->CmdSetDepthBounds = (PFN_vkCmdSetDepthBounds) gpa(device, "vkCmdSetDepthBounds"); + table->CmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask) gpa(device, "vkCmdSetStencilCompareMask"); + table->CmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask) gpa(device, "vkCmdSetStencilWriteMask"); + table->CmdSetStencilReference = (PFN_vkCmdSetStencilReference) gpa(device, "vkCmdSetStencilReference"); + table->CmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets) gpa(device, "vkCmdBindDescriptorSets"); + table->CmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer) gpa(device, "vkCmdBindIndexBuffer"); + table->CmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers) gpa(device, "vkCmdBindVertexBuffers"); + table->CmdDraw = (PFN_vkCmdDraw) gpa(device, "vkCmdDraw"); + table->CmdDrawIndexed = (PFN_vkCmdDrawIndexed) gpa(device, "vkCmdDrawIndexed"); + table->CmdDrawIndirect = (PFN_vkCmdDrawIndirect) gpa(device, "vkCmdDrawIndirect"); + table->CmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect) gpa(device, "vkCmdDrawIndexedIndirect"); + table->CmdDispatch = (PFN_vkCmdDispatch) gpa(device, "vkCmdDispatch"); + table->CmdDispatchIndirect = (PFN_vkCmdDispatchIndirect) gpa(device, "vkCmdDispatchIndirect"); + table->CmdCopyBuffer = (PFN_vkCmdCopyBuffer) gpa(device, "vkCmdCopyBuffer"); + table->CmdCopyImage = (PFN_vkCmdCopyImage) gpa(device, "vkCmdCopyImage"); + table->CmdBlitImage = (PFN_vkCmdBlitImage) gpa(device, "vkCmdBlitImage"); + table->CmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage) gpa(device, "vkCmdCopyBufferToImage"); + table->CmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer) gpa(device, "vkCmdCopyImageToBuffer"); + table->CmdUpdateBuffer = (PFN_vkCmdUpdateBuffer) gpa(device, "vkCmdUpdateBuffer"); + table->CmdFillBuffer = (PFN_vkCmdFillBuffer) gpa(device, "vkCmdFillBuffer"); + table->CmdClearColorImage = (PFN_vkCmdClearColorImage) gpa(device, "vkCmdClearColorImage"); + table->CmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage) gpa(device, "vkCmdClearDepthStencilImage"); + table->CmdClearAttachments = (PFN_vkCmdClearAttachments) gpa(device, "vkCmdClearAttachments"); + table->CmdResolveImage = (PFN_vkCmdResolveImage) gpa(device, "vkCmdResolveImage"); + table->CmdSetEvent = (PFN_vkCmdSetEvent) gpa(device, "vkCmdSetEvent"); + table->CmdResetEvent = (PFN_vkCmdResetEvent) gpa(device, "vkCmdResetEvent"); + table->CmdWaitEvents = (PFN_vkCmdWaitEvents) gpa(device, "vkCmdWaitEvents"); + table->CmdPipelineBarrier = (PFN_vkCmdPipelineBarrier) gpa(device, "vkCmdPipelineBarrier"); + table->CmdBeginQuery = (PFN_vkCmdBeginQuery) gpa(device, "vkCmdBeginQuery"); + table->CmdEndQuery = (PFN_vkCmdEndQuery) gpa(device, "vkCmdEndQuery"); + table->CmdResetQueryPool = (PFN_vkCmdResetQueryPool) gpa(device, "vkCmdResetQueryPool"); + table->CmdWriteTimestamp = (PFN_vkCmdWriteTimestamp) gpa(device, "vkCmdWriteTimestamp"); + table->CmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults) gpa(device, "vkCmdCopyQueryPoolResults"); + table->CmdPushConstants = (PFN_vkCmdPushConstants) gpa(device, "vkCmdPushConstants"); + table->CmdBeginRenderPass = (PFN_vkCmdBeginRenderPass) gpa(device, "vkCmdBeginRenderPass"); + table->CmdNextSubpass = (PFN_vkCmdNextSubpass) gpa(device, "vkCmdNextSubpass"); + table->CmdEndRenderPass = (PFN_vkCmdEndRenderPass) gpa(device, "vkCmdEndRenderPass"); + table->CmdExecuteCommands = (PFN_vkCmdExecuteCommands) gpa(device, "vkCmdExecuteCommands"); + table->BindBufferMemory2 = (PFN_vkBindBufferMemory2) gpa(device, "vkBindBufferMemory2"); + table->BindImageMemory2 = (PFN_vkBindImageMemory2) gpa(device, "vkBindImageMemory2"); + table->GetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures) gpa(device, "vkGetDeviceGroupPeerMemoryFeatures"); + table->CmdSetDeviceMask = (PFN_vkCmdSetDeviceMask) gpa(device, "vkCmdSetDeviceMask"); + table->CmdDispatchBase = (PFN_vkCmdDispatchBase) gpa(device, "vkCmdDispatchBase"); + table->GetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2) gpa(device, "vkGetImageMemoryRequirements2"); + table->GetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2) gpa(device, "vkGetBufferMemoryRequirements2"); + table->GetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2) gpa(device, "vkGetImageSparseMemoryRequirements2"); + table->TrimCommandPool = (PFN_vkTrimCommandPool) gpa(device, "vkTrimCommandPool"); + table->GetDeviceQueue2 = (PFN_vkGetDeviceQueue2) gpa(device, "vkGetDeviceQueue2"); + table->CreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion) gpa(device, "vkCreateSamplerYcbcrConversion"); + table->DestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion) gpa(device, "vkDestroySamplerYcbcrConversion"); + table->CreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate) gpa(device, "vkCreateDescriptorUpdateTemplate"); + table->DestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate) gpa(device, "vkDestroyDescriptorUpdateTemplate"); + table->UpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate) gpa(device, "vkUpdateDescriptorSetWithTemplate"); + table->GetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport) gpa(device, "vkGetDescriptorSetLayoutSupport"); + table->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount) gpa(device, "vkCmdDrawIndirectCount"); + table->CmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount) gpa(device, "vkCmdDrawIndexedIndirectCount"); + table->CreateRenderPass2 = (PFN_vkCreateRenderPass2) gpa(device, "vkCreateRenderPass2"); + table->CmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2) gpa(device, "vkCmdBeginRenderPass2"); + table->CmdNextSubpass2 = (PFN_vkCmdNextSubpass2) gpa(device, "vkCmdNextSubpass2"); + table->CmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2) gpa(device, "vkCmdEndRenderPass2"); + table->ResetQueryPool = (PFN_vkResetQueryPool) gpa(device, "vkResetQueryPool"); + table->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue) gpa(device, "vkGetSemaphoreCounterValue"); + table->WaitSemaphores = (PFN_vkWaitSemaphores) gpa(device, "vkWaitSemaphores"); + table->SignalSemaphore = (PFN_vkSignalSemaphore) gpa(device, "vkSignalSemaphore"); + table->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress) gpa(device, "vkGetBufferDeviceAddress"); + table->GetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress) gpa(device, "vkGetBufferOpaqueCaptureAddress"); + table->GetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress) gpa(device, "vkGetDeviceMemoryOpaqueCaptureAddress"); + table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR"); + if (table->CreateSwapchainKHR == nullptr) { table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)StubCreateSwapchainKHR; } + table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR"); + if (table->DestroySwapchainKHR == nullptr) { table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)StubDestroySwapchainKHR; } + table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR"); + if (table->GetSwapchainImagesKHR == nullptr) { table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)StubGetSwapchainImagesKHR; } + table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR"); + if (table->AcquireNextImageKHR == nullptr) { table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)StubAcquireNextImageKHR; } + table->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR"); + if (table->QueuePresentKHR == nullptr) { table->QueuePresentKHR = (PFN_vkQueuePresentKHR)StubQueuePresentKHR; } + table->GetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR) gpa(device, "vkGetDeviceGroupPresentCapabilitiesKHR"); + if (table->GetDeviceGroupPresentCapabilitiesKHR == nullptr) { table->GetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)StubGetDeviceGroupPresentCapabilitiesKHR; } + table->GetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR) gpa(device, "vkGetDeviceGroupSurfacePresentModesKHR"); + if (table->GetDeviceGroupSurfacePresentModesKHR == nullptr) { table->GetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)StubGetDeviceGroupSurfacePresentModesKHR; } + table->AcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR) gpa(device, "vkAcquireNextImage2KHR"); + if (table->AcquireNextImage2KHR == nullptr) { table->AcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)StubAcquireNextImage2KHR; } + table->CreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR) gpa(device, "vkCreateSharedSwapchainsKHR"); + if (table->CreateSharedSwapchainsKHR == nullptr) { table->CreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)StubCreateSharedSwapchainsKHR; } + table->GetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR) gpa(device, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); + if (table->GetDeviceGroupPeerMemoryFeaturesKHR == nullptr) { table->GetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)StubGetDeviceGroupPeerMemoryFeaturesKHR; } + table->CmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR) gpa(device, "vkCmdSetDeviceMaskKHR"); + if (table->CmdSetDeviceMaskKHR == nullptr) { table->CmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)StubCmdSetDeviceMaskKHR; } + table->CmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR) gpa(device, "vkCmdDispatchBaseKHR"); + if (table->CmdDispatchBaseKHR == nullptr) { table->CmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)StubCmdDispatchBaseKHR; } + table->TrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR) gpa(device, "vkTrimCommandPoolKHR"); + if (table->TrimCommandPoolKHR == nullptr) { table->TrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)StubTrimCommandPoolKHR; } +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR) gpa(device, "vkGetMemoryWin32HandleKHR"); + if (table->GetMemoryWin32HandleKHR == nullptr) { table->GetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)StubGetMemoryWin32HandleKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR) gpa(device, "vkGetMemoryWin32HandlePropertiesKHR"); + if (table->GetMemoryWin32HandlePropertiesKHR == nullptr) { table->GetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)StubGetMemoryWin32HandlePropertiesKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryFdKHR = (PFN_vkGetMemoryFdKHR) gpa(device, "vkGetMemoryFdKHR"); + if (table->GetMemoryFdKHR == nullptr) { table->GetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)StubGetMemoryFdKHR; } + table->GetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR) gpa(device, "vkGetMemoryFdPropertiesKHR"); + if (table->GetMemoryFdPropertiesKHR == nullptr) { table->GetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)StubGetMemoryFdPropertiesKHR; } +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR) gpa(device, "vkImportSemaphoreWin32HandleKHR"); + if (table->ImportSemaphoreWin32HandleKHR == nullptr) { table->ImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)StubImportSemaphoreWin32HandleKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR) gpa(device, "vkGetSemaphoreWin32HandleKHR"); + if (table->GetSemaphoreWin32HandleKHR == nullptr) { table->GetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)StubGetSemaphoreWin32HandleKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR + table->ImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR) gpa(device, "vkImportSemaphoreFdKHR"); + if (table->ImportSemaphoreFdKHR == nullptr) { table->ImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)StubImportSemaphoreFdKHR; } + table->GetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR) gpa(device, "vkGetSemaphoreFdKHR"); + if (table->GetSemaphoreFdKHR == nullptr) { table->GetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)StubGetSemaphoreFdKHR; } + table->CmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR) gpa(device, "vkCmdPushDescriptorSetKHR"); + if (table->CmdPushDescriptorSetKHR == nullptr) { table->CmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)StubCmdPushDescriptorSetKHR; } + table->CmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR) gpa(device, "vkCmdPushDescriptorSetWithTemplateKHR"); + if (table->CmdPushDescriptorSetWithTemplateKHR == nullptr) { table->CmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)StubCmdPushDescriptorSetWithTemplateKHR; } + table->CreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR) gpa(device, "vkCreateDescriptorUpdateTemplateKHR"); + if (table->CreateDescriptorUpdateTemplateKHR == nullptr) { table->CreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)StubCreateDescriptorUpdateTemplateKHR; } + table->DestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR) gpa(device, "vkDestroyDescriptorUpdateTemplateKHR"); + if (table->DestroyDescriptorUpdateTemplateKHR == nullptr) { table->DestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)StubDestroyDescriptorUpdateTemplateKHR; } + table->UpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR) gpa(device, "vkUpdateDescriptorSetWithTemplateKHR"); + if (table->UpdateDescriptorSetWithTemplateKHR == nullptr) { table->UpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)StubUpdateDescriptorSetWithTemplateKHR; } + table->CreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR) gpa(device, "vkCreateRenderPass2KHR"); + if (table->CreateRenderPass2KHR == nullptr) { table->CreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)StubCreateRenderPass2KHR; } + table->CmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR) gpa(device, "vkCmdBeginRenderPass2KHR"); + if (table->CmdBeginRenderPass2KHR == nullptr) { table->CmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)StubCmdBeginRenderPass2KHR; } + table->CmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR) gpa(device, "vkCmdNextSubpass2KHR"); + if (table->CmdNextSubpass2KHR == nullptr) { table->CmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)StubCmdNextSubpass2KHR; } + table->CmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR) gpa(device, "vkCmdEndRenderPass2KHR"); + if (table->CmdEndRenderPass2KHR == nullptr) { table->CmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)StubCmdEndRenderPass2KHR; } + table->GetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR) gpa(device, "vkGetSwapchainStatusKHR"); + if (table->GetSwapchainStatusKHR == nullptr) { table->GetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)StubGetSwapchainStatusKHR; } +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR) gpa(device, "vkImportFenceWin32HandleKHR"); + if (table->ImportFenceWin32HandleKHR == nullptr) { table->ImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)StubImportFenceWin32HandleKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR) gpa(device, "vkGetFenceWin32HandleKHR"); + if (table->GetFenceWin32HandleKHR == nullptr) { table->GetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)StubGetFenceWin32HandleKHR; } +#endif // VK_USE_PLATFORM_WIN32_KHR + table->ImportFenceFdKHR = (PFN_vkImportFenceFdKHR) gpa(device, "vkImportFenceFdKHR"); + if (table->ImportFenceFdKHR == nullptr) { table->ImportFenceFdKHR = (PFN_vkImportFenceFdKHR)StubImportFenceFdKHR; } + table->GetFenceFdKHR = (PFN_vkGetFenceFdKHR) gpa(device, "vkGetFenceFdKHR"); + if (table->GetFenceFdKHR == nullptr) { table->GetFenceFdKHR = (PFN_vkGetFenceFdKHR)StubGetFenceFdKHR; } + table->AcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR) gpa(device, "vkAcquireProfilingLockKHR"); + if (table->AcquireProfilingLockKHR == nullptr) { table->AcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)StubAcquireProfilingLockKHR; } + table->ReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR) gpa(device, "vkReleaseProfilingLockKHR"); + if (table->ReleaseProfilingLockKHR == nullptr) { table->ReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)StubReleaseProfilingLockKHR; } + table->GetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR) gpa(device, "vkGetImageMemoryRequirements2KHR"); + if (table->GetImageMemoryRequirements2KHR == nullptr) { table->GetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)StubGetImageMemoryRequirements2KHR; } + table->GetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR) gpa(device, "vkGetBufferMemoryRequirements2KHR"); + if (table->GetBufferMemoryRequirements2KHR == nullptr) { table->GetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)StubGetBufferMemoryRequirements2KHR; } + table->GetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR) gpa(device, "vkGetImageSparseMemoryRequirements2KHR"); + if (table->GetImageSparseMemoryRequirements2KHR == nullptr) { table->GetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)StubGetImageSparseMemoryRequirements2KHR; } + table->CreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR) gpa(device, "vkCreateSamplerYcbcrConversionKHR"); + if (table->CreateSamplerYcbcrConversionKHR == nullptr) { table->CreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)StubCreateSamplerYcbcrConversionKHR; } + table->DestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR) gpa(device, "vkDestroySamplerYcbcrConversionKHR"); + if (table->DestroySamplerYcbcrConversionKHR == nullptr) { table->DestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)StubDestroySamplerYcbcrConversionKHR; } + table->BindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR) gpa(device, "vkBindBufferMemory2KHR"); + if (table->BindBufferMemory2KHR == nullptr) { table->BindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)StubBindBufferMemory2KHR; } + table->BindImageMemory2KHR = (PFN_vkBindImageMemory2KHR) gpa(device, "vkBindImageMemory2KHR"); + if (table->BindImageMemory2KHR == nullptr) { table->BindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)StubBindImageMemory2KHR; } + table->GetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR) gpa(device, "vkGetDescriptorSetLayoutSupportKHR"); + if (table->GetDescriptorSetLayoutSupportKHR == nullptr) { table->GetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)StubGetDescriptorSetLayoutSupportKHR; } + table->CmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR) gpa(device, "vkCmdDrawIndirectCountKHR"); + if (table->CmdDrawIndirectCountKHR == nullptr) { table->CmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)StubCmdDrawIndirectCountKHR; } + table->CmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR) gpa(device, "vkCmdDrawIndexedIndirectCountKHR"); + if (table->CmdDrawIndexedIndirectCountKHR == nullptr) { table->CmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)StubCmdDrawIndexedIndirectCountKHR; } + table->GetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR) gpa(device, "vkGetSemaphoreCounterValueKHR"); + if (table->GetSemaphoreCounterValueKHR == nullptr) { table->GetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)StubGetSemaphoreCounterValueKHR; } + table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR) gpa(device, "vkWaitSemaphoresKHR"); + if (table->WaitSemaphoresKHR == nullptr) { table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)StubWaitSemaphoresKHR; } + table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR) gpa(device, "vkSignalSemaphoreKHR"); + if (table->SignalSemaphoreKHR == nullptr) { table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)StubSignalSemaphoreKHR; } + table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR) gpa(device, "vkCmdSetFragmentShadingRateKHR"); + if (table->CmdSetFragmentShadingRateKHR == nullptr) { table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)StubCmdSetFragmentShadingRateKHR; } + table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR) gpa(device, "vkGetBufferDeviceAddressKHR"); + if (table->GetBufferDeviceAddressKHR == nullptr) { table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)StubGetBufferDeviceAddressKHR; } + table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR) gpa(device, "vkGetBufferOpaqueCaptureAddressKHR"); + if (table->GetBufferOpaqueCaptureAddressKHR == nullptr) { table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)StubGetBufferOpaqueCaptureAddressKHR; } + table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR) gpa(device, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); + if (table->GetDeviceMemoryOpaqueCaptureAddressKHR == nullptr) { table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)StubGetDeviceMemoryOpaqueCaptureAddressKHR; } + table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR) gpa(device, "vkCreateDeferredOperationKHR"); + if (table->CreateDeferredOperationKHR == nullptr) { table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)StubCreateDeferredOperationKHR; } + table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR) gpa(device, "vkDestroyDeferredOperationKHR"); + if (table->DestroyDeferredOperationKHR == nullptr) { table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)StubDestroyDeferredOperationKHR; } + table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR) gpa(device, "vkGetDeferredOperationMaxConcurrencyKHR"); + if (table->GetDeferredOperationMaxConcurrencyKHR == nullptr) { table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)StubGetDeferredOperationMaxConcurrencyKHR; } + table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR) gpa(device, "vkGetDeferredOperationResultKHR"); + if (table->GetDeferredOperationResultKHR == nullptr) { table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)StubGetDeferredOperationResultKHR; } + table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR) gpa(device, "vkDeferredOperationJoinKHR"); + if (table->DeferredOperationJoinKHR == nullptr) { table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)StubDeferredOperationJoinKHR; } + table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR) gpa(device, "vkGetPipelineExecutablePropertiesKHR"); + if (table->GetPipelineExecutablePropertiesKHR == nullptr) { table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)StubGetPipelineExecutablePropertiesKHR; } + table->GetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR) gpa(device, "vkGetPipelineExecutableStatisticsKHR"); + if (table->GetPipelineExecutableStatisticsKHR == nullptr) { table->GetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)StubGetPipelineExecutableStatisticsKHR; } + table->GetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR) gpa(device, "vkGetPipelineExecutableInternalRepresentationsKHR"); + if (table->GetPipelineExecutableInternalRepresentationsKHR == nullptr) { table->GetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)StubGetPipelineExecutableInternalRepresentationsKHR; } + table->CmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR) gpa(device, "vkCmdCopyBuffer2KHR"); + if (table->CmdCopyBuffer2KHR == nullptr) { table->CmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)StubCmdCopyBuffer2KHR; } + table->CmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR) gpa(device, "vkCmdCopyImage2KHR"); + if (table->CmdCopyImage2KHR == nullptr) { table->CmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)StubCmdCopyImage2KHR; } + table->CmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR) gpa(device, "vkCmdCopyBufferToImage2KHR"); + if (table->CmdCopyBufferToImage2KHR == nullptr) { table->CmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)StubCmdCopyBufferToImage2KHR; } + table->CmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR) gpa(device, "vkCmdCopyImageToBuffer2KHR"); + if (table->CmdCopyImageToBuffer2KHR == nullptr) { table->CmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)StubCmdCopyImageToBuffer2KHR; } + table->CmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR) gpa(device, "vkCmdBlitImage2KHR"); + if (table->CmdBlitImage2KHR == nullptr) { table->CmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)StubCmdBlitImage2KHR; } + table->CmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR) gpa(device, "vkCmdResolveImage2KHR"); + if (table->CmdResolveImage2KHR == nullptr) { table->CmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)StubCmdResolveImage2KHR; } + table->DebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT) gpa(device, "vkDebugMarkerSetObjectTagEXT"); + if (table->DebugMarkerSetObjectTagEXT == nullptr) { table->DebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)StubDebugMarkerSetObjectTagEXT; } + table->DebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT) gpa(device, "vkDebugMarkerSetObjectNameEXT"); + if (table->DebugMarkerSetObjectNameEXT == nullptr) { table->DebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)StubDebugMarkerSetObjectNameEXT; } + table->CmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT) gpa(device, "vkCmdDebugMarkerBeginEXT"); + if (table->CmdDebugMarkerBeginEXT == nullptr) { table->CmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)StubCmdDebugMarkerBeginEXT; } + table->CmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT) gpa(device, "vkCmdDebugMarkerEndEXT"); + if (table->CmdDebugMarkerEndEXT == nullptr) { table->CmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)StubCmdDebugMarkerEndEXT; } + table->CmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT) gpa(device, "vkCmdDebugMarkerInsertEXT"); + if (table->CmdDebugMarkerInsertEXT == nullptr) { table->CmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)StubCmdDebugMarkerInsertEXT; } + table->CmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT) gpa(device, "vkCmdBindTransformFeedbackBuffersEXT"); + if (table->CmdBindTransformFeedbackBuffersEXT == nullptr) { table->CmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)StubCmdBindTransformFeedbackBuffersEXT; } + table->CmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT) gpa(device, "vkCmdBeginTransformFeedbackEXT"); + if (table->CmdBeginTransformFeedbackEXT == nullptr) { table->CmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)StubCmdBeginTransformFeedbackEXT; } + table->CmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT) gpa(device, "vkCmdEndTransformFeedbackEXT"); + if (table->CmdEndTransformFeedbackEXT == nullptr) { table->CmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)StubCmdEndTransformFeedbackEXT; } + table->CmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT) gpa(device, "vkCmdBeginQueryIndexedEXT"); + if (table->CmdBeginQueryIndexedEXT == nullptr) { table->CmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)StubCmdBeginQueryIndexedEXT; } + table->CmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT) gpa(device, "vkCmdEndQueryIndexedEXT"); + if (table->CmdEndQueryIndexedEXT == nullptr) { table->CmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)StubCmdEndQueryIndexedEXT; } + table->CmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT) gpa(device, "vkCmdDrawIndirectByteCountEXT"); + if (table->CmdDrawIndirectByteCountEXT == nullptr) { table->CmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)StubCmdDrawIndirectByteCountEXT; } + table->GetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX) gpa(device, "vkGetImageViewHandleNVX"); + if (table->GetImageViewHandleNVX == nullptr) { table->GetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)StubGetImageViewHandleNVX; } + table->GetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX) gpa(device, "vkGetImageViewAddressNVX"); + if (table->GetImageViewAddressNVX == nullptr) { table->GetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)StubGetImageViewAddressNVX; } + table->CmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD) gpa(device, "vkCmdDrawIndirectCountAMD"); + if (table->CmdDrawIndirectCountAMD == nullptr) { table->CmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)StubCmdDrawIndirectCountAMD; } + table->CmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD) gpa(device, "vkCmdDrawIndexedIndirectCountAMD"); + if (table->CmdDrawIndexedIndirectCountAMD == nullptr) { table->CmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)StubCmdDrawIndexedIndirectCountAMD; } + table->GetShaderInfoAMD = (PFN_vkGetShaderInfoAMD) gpa(device, "vkGetShaderInfoAMD"); + if (table->GetShaderInfoAMD == nullptr) { table->GetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)StubGetShaderInfoAMD; } +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV) gpa(device, "vkGetMemoryWin32HandleNV"); + if (table->GetMemoryWin32HandleNV == nullptr) { table->GetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)StubGetMemoryWin32HandleNV; } +#endif // VK_USE_PLATFORM_WIN32_KHR + table->CmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT) gpa(device, "vkCmdBeginConditionalRenderingEXT"); + if (table->CmdBeginConditionalRenderingEXT == nullptr) { table->CmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)StubCmdBeginConditionalRenderingEXT; } + table->CmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT) gpa(device, "vkCmdEndConditionalRenderingEXT"); + if (table->CmdEndConditionalRenderingEXT == nullptr) { table->CmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)StubCmdEndConditionalRenderingEXT; } + table->CmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV) gpa(device, "vkCmdSetViewportWScalingNV"); + if (table->CmdSetViewportWScalingNV == nullptr) { table->CmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)StubCmdSetViewportWScalingNV; } + table->DisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT) gpa(device, "vkDisplayPowerControlEXT"); + if (table->DisplayPowerControlEXT == nullptr) { table->DisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)StubDisplayPowerControlEXT; } + table->RegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT) gpa(device, "vkRegisterDeviceEventEXT"); + if (table->RegisterDeviceEventEXT == nullptr) { table->RegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)StubRegisterDeviceEventEXT; } + table->RegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT) gpa(device, "vkRegisterDisplayEventEXT"); + if (table->RegisterDisplayEventEXT == nullptr) { table->RegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)StubRegisterDisplayEventEXT; } + table->GetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT) gpa(device, "vkGetSwapchainCounterEXT"); + if (table->GetSwapchainCounterEXT == nullptr) { table->GetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)StubGetSwapchainCounterEXT; } + table->GetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE) gpa(device, "vkGetRefreshCycleDurationGOOGLE"); + if (table->GetRefreshCycleDurationGOOGLE == nullptr) { table->GetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)StubGetRefreshCycleDurationGOOGLE; } + table->GetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE) gpa(device, "vkGetPastPresentationTimingGOOGLE"); + if (table->GetPastPresentationTimingGOOGLE == nullptr) { table->GetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)StubGetPastPresentationTimingGOOGLE; } + table->CmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT) gpa(device, "vkCmdSetDiscardRectangleEXT"); + if (table->CmdSetDiscardRectangleEXT == nullptr) { table->CmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)StubCmdSetDiscardRectangleEXT; } + table->SetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT) gpa(device, "vkSetHdrMetadataEXT"); + if (table->SetHdrMetadataEXT == nullptr) { table->SetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)StubSetHdrMetadataEXT; } + table->SetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT) gpa(device, "vkSetDebugUtilsObjectNameEXT"); + table->SetDebugUtilsObjectTagEXT = (PFN_vkSetDebugUtilsObjectTagEXT) gpa(device, "vkSetDebugUtilsObjectTagEXT"); + table->QueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT) gpa(device, "vkQueueBeginDebugUtilsLabelEXT"); + table->QueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT) gpa(device, "vkQueueEndDebugUtilsLabelEXT"); + table->QueueInsertDebugUtilsLabelEXT = (PFN_vkQueueInsertDebugUtilsLabelEXT) gpa(device, "vkQueueInsertDebugUtilsLabelEXT"); + table->CmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT) gpa(device, "vkCmdBeginDebugUtilsLabelEXT"); + table->CmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT) gpa(device, "vkCmdEndDebugUtilsLabelEXT"); + table->CmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT) gpa(device, "vkCmdInsertDebugUtilsLabelEXT"); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->GetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID) gpa(device, "vkGetAndroidHardwareBufferPropertiesANDROID"); + if (table->GetAndroidHardwareBufferPropertiesANDROID == nullptr) { table->GetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)StubGetAndroidHardwareBufferPropertiesANDROID; } +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->GetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID) gpa(device, "vkGetMemoryAndroidHardwareBufferANDROID"); + if (table->GetMemoryAndroidHardwareBufferANDROID == nullptr) { table->GetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)StubGetMemoryAndroidHardwareBufferANDROID; } +#endif // VK_USE_PLATFORM_ANDROID_KHR + table->CmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT) gpa(device, "vkCmdSetSampleLocationsEXT"); + if (table->CmdSetSampleLocationsEXT == nullptr) { table->CmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)StubCmdSetSampleLocationsEXT; } + table->GetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT) gpa(device, "vkGetImageDrmFormatModifierPropertiesEXT"); + if (table->GetImageDrmFormatModifierPropertiesEXT == nullptr) { table->GetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)StubGetImageDrmFormatModifierPropertiesEXT; } + table->CreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT) gpa(device, "vkCreateValidationCacheEXT"); + if (table->CreateValidationCacheEXT == nullptr) { table->CreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)StubCreateValidationCacheEXT; } + table->DestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT) gpa(device, "vkDestroyValidationCacheEXT"); + if (table->DestroyValidationCacheEXT == nullptr) { table->DestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)StubDestroyValidationCacheEXT; } + table->MergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT) gpa(device, "vkMergeValidationCachesEXT"); + if (table->MergeValidationCachesEXT == nullptr) { table->MergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)StubMergeValidationCachesEXT; } + table->GetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT) gpa(device, "vkGetValidationCacheDataEXT"); + if (table->GetValidationCacheDataEXT == nullptr) { table->GetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)StubGetValidationCacheDataEXT; } + table->CmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV) gpa(device, "vkCmdBindShadingRateImageNV"); + if (table->CmdBindShadingRateImageNV == nullptr) { table->CmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)StubCmdBindShadingRateImageNV; } + table->CmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV) gpa(device, "vkCmdSetViewportShadingRatePaletteNV"); + if (table->CmdSetViewportShadingRatePaletteNV == nullptr) { table->CmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)StubCmdSetViewportShadingRatePaletteNV; } + table->CmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV) gpa(device, "vkCmdSetCoarseSampleOrderNV"); + if (table->CmdSetCoarseSampleOrderNV == nullptr) { table->CmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)StubCmdSetCoarseSampleOrderNV; } + table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV) gpa(device, "vkCreateAccelerationStructureNV"); + if (table->CreateAccelerationStructureNV == nullptr) { table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)StubCreateAccelerationStructureNV; } + table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV) gpa(device, "vkDestroyAccelerationStructureNV"); + if (table->DestroyAccelerationStructureNV == nullptr) { table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)StubDestroyAccelerationStructureNV; } + table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV) gpa(device, "vkGetAccelerationStructureMemoryRequirementsNV"); + if (table->GetAccelerationStructureMemoryRequirementsNV == nullptr) { table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)StubGetAccelerationStructureMemoryRequirementsNV; } + table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV) gpa(device, "vkBindAccelerationStructureMemoryNV"); + if (table->BindAccelerationStructureMemoryNV == nullptr) { table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)StubBindAccelerationStructureMemoryNV; } + table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV) gpa(device, "vkCmdBuildAccelerationStructureNV"); + if (table->CmdBuildAccelerationStructureNV == nullptr) { table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)StubCmdBuildAccelerationStructureNV; } + table->CmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV) gpa(device, "vkCmdCopyAccelerationStructureNV"); + if (table->CmdCopyAccelerationStructureNV == nullptr) { table->CmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)StubCmdCopyAccelerationStructureNV; } + table->CmdTraceRaysNV = (PFN_vkCmdTraceRaysNV) gpa(device, "vkCmdTraceRaysNV"); + if (table->CmdTraceRaysNV == nullptr) { table->CmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)StubCmdTraceRaysNV; } + table->CreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV) gpa(device, "vkCreateRayTracingPipelinesNV"); + if (table->CreateRayTracingPipelinesNV == nullptr) { table->CreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)StubCreateRayTracingPipelinesNV; } + table->GetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR) gpa(device, "vkGetRayTracingShaderGroupHandlesKHR"); + if (table->GetRayTracingShaderGroupHandlesKHR == nullptr) { table->GetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)StubGetRayTracingShaderGroupHandlesKHR; } + table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV) gpa(device, "vkGetRayTracingShaderGroupHandlesNV"); + if (table->GetRayTracingShaderGroupHandlesNV == nullptr) { table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)StubGetRayTracingShaderGroupHandlesNV; } + table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV) gpa(device, "vkGetAccelerationStructureHandleNV"); + if (table->GetAccelerationStructureHandleNV == nullptr) { table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)StubGetAccelerationStructureHandleNV; } + table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesNV"); + if (table->CmdWriteAccelerationStructuresPropertiesNV == nullptr) { table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)StubCmdWriteAccelerationStructuresPropertiesNV; } + table->CompileDeferredNV = (PFN_vkCompileDeferredNV) gpa(device, "vkCompileDeferredNV"); + if (table->CompileDeferredNV == nullptr) { table->CompileDeferredNV = (PFN_vkCompileDeferredNV)StubCompileDeferredNV; } + table->GetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT) gpa(device, "vkGetMemoryHostPointerPropertiesEXT"); + if (table->GetMemoryHostPointerPropertiesEXT == nullptr) { table->GetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)StubGetMemoryHostPointerPropertiesEXT; } + table->CmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD) gpa(device, "vkCmdWriteBufferMarkerAMD"); + if (table->CmdWriteBufferMarkerAMD == nullptr) { table->CmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)StubCmdWriteBufferMarkerAMD; } + table->GetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT) gpa(device, "vkGetCalibratedTimestampsEXT"); + if (table->GetCalibratedTimestampsEXT == nullptr) { table->GetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)StubGetCalibratedTimestampsEXT; } + table->CmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV) gpa(device, "vkCmdDrawMeshTasksNV"); + if (table->CmdDrawMeshTasksNV == nullptr) { table->CmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)StubCmdDrawMeshTasksNV; } + table->CmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV) gpa(device, "vkCmdDrawMeshTasksIndirectNV"); + if (table->CmdDrawMeshTasksIndirectNV == nullptr) { table->CmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)StubCmdDrawMeshTasksIndirectNV; } + table->CmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV) gpa(device, "vkCmdDrawMeshTasksIndirectCountNV"); + if (table->CmdDrawMeshTasksIndirectCountNV == nullptr) { table->CmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)StubCmdDrawMeshTasksIndirectCountNV; } + table->CmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV) gpa(device, "vkCmdSetExclusiveScissorNV"); + if (table->CmdSetExclusiveScissorNV == nullptr) { table->CmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)StubCmdSetExclusiveScissorNV; } + table->CmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV) gpa(device, "vkCmdSetCheckpointNV"); + if (table->CmdSetCheckpointNV == nullptr) { table->CmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)StubCmdSetCheckpointNV; } + table->GetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV) gpa(device, "vkGetQueueCheckpointDataNV"); + if (table->GetQueueCheckpointDataNV == nullptr) { table->GetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)StubGetQueueCheckpointDataNV; } + table->InitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL) gpa(device, "vkInitializePerformanceApiINTEL"); + if (table->InitializePerformanceApiINTEL == nullptr) { table->InitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)StubInitializePerformanceApiINTEL; } + table->UninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL) gpa(device, "vkUninitializePerformanceApiINTEL"); + if (table->UninitializePerformanceApiINTEL == nullptr) { table->UninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)StubUninitializePerformanceApiINTEL; } + table->CmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL) gpa(device, "vkCmdSetPerformanceMarkerINTEL"); + if (table->CmdSetPerformanceMarkerINTEL == nullptr) { table->CmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)StubCmdSetPerformanceMarkerINTEL; } + table->CmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL) gpa(device, "vkCmdSetPerformanceStreamMarkerINTEL"); + if (table->CmdSetPerformanceStreamMarkerINTEL == nullptr) { table->CmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)StubCmdSetPerformanceStreamMarkerINTEL; } + table->CmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL) gpa(device, "vkCmdSetPerformanceOverrideINTEL"); + if (table->CmdSetPerformanceOverrideINTEL == nullptr) { table->CmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)StubCmdSetPerformanceOverrideINTEL; } + table->AcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL) gpa(device, "vkAcquirePerformanceConfigurationINTEL"); + if (table->AcquirePerformanceConfigurationINTEL == nullptr) { table->AcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)StubAcquirePerformanceConfigurationINTEL; } + table->ReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL) gpa(device, "vkReleasePerformanceConfigurationINTEL"); + if (table->ReleasePerformanceConfigurationINTEL == nullptr) { table->ReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)StubReleasePerformanceConfigurationINTEL; } + table->QueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL) gpa(device, "vkQueueSetPerformanceConfigurationINTEL"); + if (table->QueueSetPerformanceConfigurationINTEL == nullptr) { table->QueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)StubQueueSetPerformanceConfigurationINTEL; } + table->GetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL) gpa(device, "vkGetPerformanceParameterINTEL"); + if (table->GetPerformanceParameterINTEL == nullptr) { table->GetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)StubGetPerformanceParameterINTEL; } + table->SetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD) gpa(device, "vkSetLocalDimmingAMD"); + if (table->SetLocalDimmingAMD == nullptr) { table->SetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)StubSetLocalDimmingAMD; } + table->GetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT) gpa(device, "vkGetBufferDeviceAddressEXT"); + if (table->GetBufferDeviceAddressEXT == nullptr) { table->GetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)StubGetBufferDeviceAddressEXT; } +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->AcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT) gpa(device, "vkAcquireFullScreenExclusiveModeEXT"); + if (table->AcquireFullScreenExclusiveModeEXT == nullptr) { table->AcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)StubAcquireFullScreenExclusiveModeEXT; } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT) gpa(device, "vkReleaseFullScreenExclusiveModeEXT"); + if (table->ReleaseFullScreenExclusiveModeEXT == nullptr) { table->ReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)StubReleaseFullScreenExclusiveModeEXT; } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT) gpa(device, "vkGetDeviceGroupSurfacePresentModes2EXT"); + if (table->GetDeviceGroupSurfacePresentModes2EXT == nullptr) { table->GetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)StubGetDeviceGroupSurfacePresentModes2EXT; } +#endif // VK_USE_PLATFORM_WIN32_KHR + table->CmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT) gpa(device, "vkCmdSetLineStippleEXT"); + if (table->CmdSetLineStippleEXT == nullptr) { table->CmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)StubCmdSetLineStippleEXT; } + table->ResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT) gpa(device, "vkResetQueryPoolEXT"); + if (table->ResetQueryPoolEXT == nullptr) { table->ResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)StubResetQueryPoolEXT; } + table->CmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT) gpa(device, "vkCmdSetCullModeEXT"); + if (table->CmdSetCullModeEXT == nullptr) { table->CmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)StubCmdSetCullModeEXT; } + table->CmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT) gpa(device, "vkCmdSetFrontFaceEXT"); + if (table->CmdSetFrontFaceEXT == nullptr) { table->CmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)StubCmdSetFrontFaceEXT; } + table->CmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT) gpa(device, "vkCmdSetPrimitiveTopologyEXT"); + if (table->CmdSetPrimitiveTopologyEXT == nullptr) { table->CmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)StubCmdSetPrimitiveTopologyEXT; } + table->CmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT) gpa(device, "vkCmdSetViewportWithCountEXT"); + if (table->CmdSetViewportWithCountEXT == nullptr) { table->CmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)StubCmdSetViewportWithCountEXT; } + table->CmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT) gpa(device, "vkCmdSetScissorWithCountEXT"); + if (table->CmdSetScissorWithCountEXT == nullptr) { table->CmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)StubCmdSetScissorWithCountEXT; } + table->CmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT) gpa(device, "vkCmdBindVertexBuffers2EXT"); + if (table->CmdBindVertexBuffers2EXT == nullptr) { table->CmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)StubCmdBindVertexBuffers2EXT; } + table->CmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT) gpa(device, "vkCmdSetDepthTestEnableEXT"); + if (table->CmdSetDepthTestEnableEXT == nullptr) { table->CmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)StubCmdSetDepthTestEnableEXT; } + table->CmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT) gpa(device, "vkCmdSetDepthWriteEnableEXT"); + if (table->CmdSetDepthWriteEnableEXT == nullptr) { table->CmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)StubCmdSetDepthWriteEnableEXT; } + table->CmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT) gpa(device, "vkCmdSetDepthCompareOpEXT"); + if (table->CmdSetDepthCompareOpEXT == nullptr) { table->CmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)StubCmdSetDepthCompareOpEXT; } + table->CmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT) gpa(device, "vkCmdSetDepthBoundsTestEnableEXT"); + if (table->CmdSetDepthBoundsTestEnableEXT == nullptr) { table->CmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)StubCmdSetDepthBoundsTestEnableEXT; } + table->CmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT) gpa(device, "vkCmdSetStencilTestEnableEXT"); + if (table->CmdSetStencilTestEnableEXT == nullptr) { table->CmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)StubCmdSetStencilTestEnableEXT; } + table->CmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT) gpa(device, "vkCmdSetStencilOpEXT"); + if (table->CmdSetStencilOpEXT == nullptr) { table->CmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)StubCmdSetStencilOpEXT; } + table->GetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV) gpa(device, "vkGetGeneratedCommandsMemoryRequirementsNV"); + if (table->GetGeneratedCommandsMemoryRequirementsNV == nullptr) { table->GetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)StubGetGeneratedCommandsMemoryRequirementsNV; } + table->CmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV) gpa(device, "vkCmdPreprocessGeneratedCommandsNV"); + if (table->CmdPreprocessGeneratedCommandsNV == nullptr) { table->CmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)StubCmdPreprocessGeneratedCommandsNV; } + table->CmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV) gpa(device, "vkCmdExecuteGeneratedCommandsNV"); + if (table->CmdExecuteGeneratedCommandsNV == nullptr) { table->CmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)StubCmdExecuteGeneratedCommandsNV; } + table->CmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV) gpa(device, "vkCmdBindPipelineShaderGroupNV"); + if (table->CmdBindPipelineShaderGroupNV == nullptr) { table->CmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)StubCmdBindPipelineShaderGroupNV; } + table->CreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV) gpa(device, "vkCreateIndirectCommandsLayoutNV"); + if (table->CreateIndirectCommandsLayoutNV == nullptr) { table->CreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)StubCreateIndirectCommandsLayoutNV; } + table->DestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV) gpa(device, "vkDestroyIndirectCommandsLayoutNV"); + if (table->DestroyIndirectCommandsLayoutNV == nullptr) { table->DestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)StubDestroyIndirectCommandsLayoutNV; } + table->CreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT) gpa(device, "vkCreatePrivateDataSlotEXT"); + if (table->CreatePrivateDataSlotEXT == nullptr) { table->CreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)StubCreatePrivateDataSlotEXT; } + table->DestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT) gpa(device, "vkDestroyPrivateDataSlotEXT"); + if (table->DestroyPrivateDataSlotEXT == nullptr) { table->DestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)StubDestroyPrivateDataSlotEXT; } + table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT) gpa(device, "vkSetPrivateDataEXT"); + if (table->SetPrivateDataEXT == nullptr) { table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)StubSetPrivateDataEXT; } + table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT) gpa(device, "vkGetPrivateDataEXT"); + if (table->GetPrivateDataEXT == nullptr) { table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)StubGetPrivateDataEXT; } + table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV) gpa(device, "vkCmdSetFragmentShadingRateEnumNV"); + if (table->CmdSetFragmentShadingRateEnumNV == nullptr) { table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)StubCmdSetFragmentShadingRateEnumNV; } + table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR) gpa(device, "vkCreateAccelerationStructureKHR"); + if (table->CreateAccelerationStructureKHR == nullptr) { table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)StubCreateAccelerationStructureKHR; } + table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR) gpa(device, "vkDestroyAccelerationStructureKHR"); + if (table->DestroyAccelerationStructureKHR == nullptr) { table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)StubDestroyAccelerationStructureKHR; } + table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR) gpa(device, "vkCmdBuildAccelerationStructuresKHR"); + if (table->CmdBuildAccelerationStructuresKHR == nullptr) { table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)StubCmdBuildAccelerationStructuresKHR; } + table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR) gpa(device, "vkCmdBuildAccelerationStructuresIndirectKHR"); + if (table->CmdBuildAccelerationStructuresIndirectKHR == nullptr) { table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)StubCmdBuildAccelerationStructuresIndirectKHR; } + table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR) gpa(device, "vkBuildAccelerationStructuresKHR"); + if (table->BuildAccelerationStructuresKHR == nullptr) { table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)StubBuildAccelerationStructuresKHR; } + table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR) gpa(device, "vkCopyAccelerationStructureKHR"); + if (table->CopyAccelerationStructureKHR == nullptr) { table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)StubCopyAccelerationStructureKHR; } + table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCopyAccelerationStructureToMemoryKHR"); + if (table->CopyAccelerationStructureToMemoryKHR == nullptr) { table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)StubCopyAccelerationStructureToMemoryKHR; } + table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCopyMemoryToAccelerationStructureKHR"); + if (table->CopyMemoryToAccelerationStructureKHR == nullptr) { table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)StubCopyMemoryToAccelerationStructureKHR; } + table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkWriteAccelerationStructuresPropertiesKHR"); + if (table->WriteAccelerationStructuresPropertiesKHR == nullptr) { table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)StubWriteAccelerationStructuresPropertiesKHR; } + table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR) gpa(device, "vkCmdCopyAccelerationStructureKHR"); + if (table->CmdCopyAccelerationStructureKHR == nullptr) { table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)StubCmdCopyAccelerationStructureKHR; } + table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR) gpa(device, "vkCmdCopyAccelerationStructureToMemoryKHR"); + if (table->CmdCopyAccelerationStructureToMemoryKHR == nullptr) { table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)StubCmdCopyAccelerationStructureToMemoryKHR; } + table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR) gpa(device, "vkCmdCopyMemoryToAccelerationStructureKHR"); + if (table->CmdCopyMemoryToAccelerationStructureKHR == nullptr) { table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)StubCmdCopyMemoryToAccelerationStructureKHR; } + table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR) gpa(device, "vkGetAccelerationStructureDeviceAddressKHR"); + if (table->GetAccelerationStructureDeviceAddressKHR == nullptr) { table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)StubGetAccelerationStructureDeviceAddressKHR; } + table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR) gpa(device, "vkCmdWriteAccelerationStructuresPropertiesKHR"); + if (table->CmdWriteAccelerationStructuresPropertiesKHR == nullptr) { table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)StubCmdWriteAccelerationStructuresPropertiesKHR; } + table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR) gpa(device, "vkGetDeviceAccelerationStructureCompatibilityKHR"); + if (table->GetDeviceAccelerationStructureCompatibilityKHR == nullptr) { table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)StubGetDeviceAccelerationStructureCompatibilityKHR; } + table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR) gpa(device, "vkGetAccelerationStructureBuildSizesKHR"); + if (table->GetAccelerationStructureBuildSizesKHR == nullptr) { table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)StubGetAccelerationStructureBuildSizesKHR; } + table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR) gpa(device, "vkCmdTraceRaysKHR"); + if (table->CmdTraceRaysKHR == nullptr) { table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)StubCmdTraceRaysKHR; } + table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR) gpa(device, "vkCreateRayTracingPipelinesKHR"); + if (table->CreateRayTracingPipelinesKHR == nullptr) { table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)StubCreateRayTracingPipelinesKHR; } + table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR) gpa(device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); + if (table->GetRayTracingCaptureReplayShaderGroupHandlesKHR == nullptr) { table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)StubGetRayTracingCaptureReplayShaderGroupHandlesKHR; } + table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR) gpa(device, "vkCmdTraceRaysIndirectKHR"); + if (table->CmdTraceRaysIndirectKHR == nullptr) { table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)StubCmdTraceRaysIndirectKHR; } + table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR) gpa(device, "vkGetRayTracingShaderGroupStackSizeKHR"); + if (table->GetRayTracingShaderGroupStackSizeKHR == nullptr) { table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)StubGetRayTracingShaderGroupStackSizeKHR; } + table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR) gpa(device, "vkCmdSetRayTracingPipelineStackSizeKHR"); + if (table->CmdSetRayTracingPipelineStackSizeKHR == nullptr) { table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)StubCmdSetRayTracingPipelineStackSizeKHR; } +} + + +static inline void layer_init_instance_dispatch_table(VkInstance instance, VkLayerInstanceDispatchTable *table, PFN_vkGetInstanceProcAddr gpa) { + memset(table, 0, sizeof(*table)); + // Instance function pointers + table->DestroyInstance = (PFN_vkDestroyInstance) gpa(instance, "vkDestroyInstance"); + table->EnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices) gpa(instance, "vkEnumeratePhysicalDevices"); + table->GetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures) gpa(instance, "vkGetPhysicalDeviceFeatures"); + table->GetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties) gpa(instance, "vkGetPhysicalDeviceFormatProperties"); + table->GetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties) gpa(instance, "vkGetPhysicalDeviceImageFormatProperties"); + table->GetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties) gpa(instance, "vkGetPhysicalDeviceProperties"); + table->GetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties) gpa(instance, "vkGetPhysicalDeviceQueueFamilyProperties"); + table->GetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties) gpa(instance, "vkGetPhysicalDeviceMemoryProperties"); + table->GetInstanceProcAddr = gpa; + table->EnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties) gpa(instance, "vkEnumerateDeviceExtensionProperties"); + table->EnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties) gpa(instance, "vkEnumerateDeviceLayerProperties"); + table->GetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties) gpa(instance, "vkGetPhysicalDeviceSparseImageFormatProperties"); + table->EnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups) gpa(instance, "vkEnumeratePhysicalDeviceGroups"); + table->GetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2) gpa(instance, "vkGetPhysicalDeviceFeatures2"); + table->GetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2) gpa(instance, "vkGetPhysicalDeviceProperties2"); + table->GetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2) gpa(instance, "vkGetPhysicalDeviceFormatProperties2"); + table->GetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2) gpa(instance, "vkGetPhysicalDeviceImageFormatProperties2"); + table->GetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2) gpa(instance, "vkGetPhysicalDeviceQueueFamilyProperties2"); + table->GetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2) gpa(instance, "vkGetPhysicalDeviceMemoryProperties2"); + table->GetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2) gpa(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2"); + table->GetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties) gpa(instance, "vkGetPhysicalDeviceExternalBufferProperties"); + table->GetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties) gpa(instance, "vkGetPhysicalDeviceExternalFenceProperties"); + table->GetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties) gpa(instance, "vkGetPhysicalDeviceExternalSemaphoreProperties"); + table->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR) gpa(instance, "vkDestroySurfaceKHR"); + table->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR"); + table->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + table->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR"); + table->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR"); + table->GetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR) gpa(instance, "vkGetPhysicalDevicePresentRectanglesKHR"); + table->GetPhysicalDeviceDisplayPropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR) gpa(instance, "vkGetPhysicalDeviceDisplayPropertiesKHR"); + table->GetPhysicalDeviceDisplayPlanePropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR) gpa(instance, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"); + table->GetDisplayPlaneSupportedDisplaysKHR = (PFN_vkGetDisplayPlaneSupportedDisplaysKHR) gpa(instance, "vkGetDisplayPlaneSupportedDisplaysKHR"); + table->GetDisplayModePropertiesKHR = (PFN_vkGetDisplayModePropertiesKHR) gpa(instance, "vkGetDisplayModePropertiesKHR"); + table->CreateDisplayModeKHR = (PFN_vkCreateDisplayModeKHR) gpa(instance, "vkCreateDisplayModeKHR"); + table->GetDisplayPlaneCapabilitiesKHR = (PFN_vkGetDisplayPlaneCapabilitiesKHR) gpa(instance, "vkGetDisplayPlaneCapabilitiesKHR"); + table->CreateDisplayPlaneSurfaceKHR = (PFN_vkCreateDisplayPlaneSurfaceKHR) gpa(instance, "vkCreateDisplayPlaneSurfaceKHR"); +#ifdef VK_USE_PLATFORM_XLIB_KHR + table->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + table->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + table->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + table->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + table->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + table->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR"); +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + table->GetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR) gpa(instance, "vkGetPhysicalDeviceFeatures2KHR"); + table->GetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR) gpa(instance, "vkGetPhysicalDeviceProperties2KHR"); + table->GetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR) gpa(instance, "vkGetPhysicalDeviceFormatProperties2KHR"); + table->GetPhysicalDeviceImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR) gpa(instance, "vkGetPhysicalDeviceImageFormatProperties2KHR"); + table->GetPhysicalDeviceQueueFamilyProperties2KHR = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR) gpa(instance, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"); + table->GetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR) gpa(instance, "vkGetPhysicalDeviceMemoryProperties2KHR"); + table->GetPhysicalDeviceSparseImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR) gpa(instance, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"); + table->EnumeratePhysicalDeviceGroupsKHR = (PFN_vkEnumeratePhysicalDeviceGroupsKHR) gpa(instance, "vkEnumeratePhysicalDeviceGroupsKHR"); + table->GetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR) gpa(instance, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); + table->GetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR) gpa(instance, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); + table->GetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR) gpa(instance, "vkGetPhysicalDeviceExternalFencePropertiesKHR"); + table->EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = (PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR) gpa(instance, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"); + table->GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = (PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR) gpa(instance, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"); + table->GetPhysicalDeviceSurfaceCapabilities2KHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); + table->GetPhysicalDeviceSurfaceFormats2KHR = (PFN_vkGetPhysicalDeviceSurfaceFormats2KHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormats2KHR"); + table->GetPhysicalDeviceDisplayProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayProperties2KHR) gpa(instance, "vkGetPhysicalDeviceDisplayProperties2KHR"); + table->GetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR) gpa(instance, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"); + table->GetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR) gpa(instance, "vkGetDisplayModeProperties2KHR"); + table->GetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR) gpa(instance, "vkGetDisplayPlaneCapabilities2KHR"); + table->GetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR) gpa(instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); + table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) gpa(instance, "vkCreateDebugReportCallbackEXT"); + table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) gpa(instance, "vkDestroyDebugReportCallbackEXT"); + table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) gpa(instance, "vkDebugReportMessageEXT"); +#ifdef VK_USE_PLATFORM_GGP + table->CreateStreamDescriptorSurfaceGGP = (PFN_vkCreateStreamDescriptorSurfaceGGP) gpa(instance, "vkCreateStreamDescriptorSurfaceGGP"); +#endif // VK_USE_PLATFORM_GGP + table->GetPhysicalDeviceExternalImageFormatPropertiesNV = (PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV) gpa(instance, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"); +#ifdef VK_USE_PLATFORM_VI_NN + table->CreateViSurfaceNN = (PFN_vkCreateViSurfaceNN) gpa(instance, "vkCreateViSurfaceNN"); +#endif // VK_USE_PLATFORM_VI_NN + table->ReleaseDisplayEXT = (PFN_vkReleaseDisplayEXT) gpa(instance, "vkReleaseDisplayEXT"); +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + table->AcquireXlibDisplayEXT = (PFN_vkAcquireXlibDisplayEXT) gpa(instance, "vkAcquireXlibDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + table->GetRandROutputDisplayEXT = (PFN_vkGetRandROutputDisplayEXT) gpa(instance, "vkGetRandROutputDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + table->GetPhysicalDeviceSurfaceCapabilities2EXT = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); +#ifdef VK_USE_PLATFORM_IOS_MVK + table->CreateIOSSurfaceMVK = (PFN_vkCreateIOSSurfaceMVK) gpa(instance, "vkCreateIOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_MACOS_MVK + table->CreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK) gpa(instance, "vkCreateMacOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_MACOS_MVK + table->CreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT) gpa(instance, "vkCreateDebugUtilsMessengerEXT"); + table->DestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT) gpa(instance, "vkDestroyDebugUtilsMessengerEXT"); + table->SubmitDebugUtilsMessageEXT = (PFN_vkSubmitDebugUtilsMessageEXT) gpa(instance, "vkSubmitDebugUtilsMessageEXT"); + table->GetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT) gpa(instance, "vkGetPhysicalDeviceMultisamplePropertiesEXT"); + table->GetPhysicalDeviceCalibrateableTimeDomainsEXT = (PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT) gpa(instance, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"); +#ifdef VK_USE_PLATFORM_FUCHSIA + table->CreateImagePipeSurfaceFUCHSIA = (PFN_vkCreateImagePipeSurfaceFUCHSIA) gpa(instance, "vkCreateImagePipeSurfaceFUCHSIA"); +#endif // VK_USE_PLATFORM_FUCHSIA +#ifdef VK_USE_PLATFORM_METAL_EXT + table->CreateMetalSurfaceEXT = (PFN_vkCreateMetalSurfaceEXT) gpa(instance, "vkCreateMetalSurfaceEXT"); +#endif // VK_USE_PLATFORM_METAL_EXT + table->GetPhysicalDeviceToolPropertiesEXT = (PFN_vkGetPhysicalDeviceToolPropertiesEXT) gpa(instance, "vkGetPhysicalDeviceToolPropertiesEXT"); + table->GetPhysicalDeviceCooperativeMatrixPropertiesNV = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV) gpa(instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"); + table->GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = (PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV) gpa(instance, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetPhysicalDeviceSurfacePresentModes2EXT = (PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModes2EXT"); +#endif // VK_USE_PLATFORM_WIN32_KHR + table->CreateHeadlessSurfaceEXT = (PFN_vkCreateHeadlessSurfaceEXT) gpa(instance, "vkCreateHeadlessSurfaceEXT"); +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + table->CreateDirectFBSurfaceEXT = (PFN_vkCreateDirectFBSurfaceEXT) gpa(instance, "vkCreateDirectFBSurfaceEXT"); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + table->GetPhysicalDeviceDirectFBPresentationSupportEXT = (PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT) gpa(instance, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +} diff --git a/src/third_party/vulkan/loader/vk_layer_dispatch_table.h b/src/third_party/vulkan/loader/vk_layer_dispatch_table.h new file mode 100644 index 0000000..45b20db --- /dev/null +++ b/src/third_party/vulkan/loader/vk_layer_dispatch_table.h @@ -0,0 +1,757 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See loader_extension_generator.py for modifications + +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Lobodzinski + * Author: Mark Young + */ + +#pragma once + +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Instance function pointer dispatch table +typedef struct VkLayerInstanceDispatchTable_ { + // Manually add in GetPhysicalDeviceProcAddr entry + PFN_GetPhysicalDeviceProcAddr GetPhysicalDeviceProcAddr; + + // ---- Core 1_0 commands + PFN_vkCreateInstance CreateInstance; + PFN_vkDestroyInstance DestroyInstance; + PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices; + PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures; + PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties; + PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties; + PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties; + PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties; + PFN_vkGetInstanceProcAddr GetInstanceProcAddr; + PFN_vkCreateDevice CreateDevice; + PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties; + PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties; + PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties; + PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties; + + // ---- Core 1_1 commands + PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion; + PFN_vkEnumeratePhysicalDeviceGroups EnumeratePhysicalDeviceGroups; + PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2; + PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2; + PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2; + PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2; + PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 GetPhysicalDeviceSparseImageFormatProperties2; + PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties; + PFN_vkGetPhysicalDeviceExternalFenceProperties GetPhysicalDeviceExternalFenceProperties; + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties GetPhysicalDeviceExternalSemaphoreProperties; + + // ---- VK_KHR_surface extension commands + PFN_vkDestroySurfaceKHR DestroySurfaceKHR; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR; + + // ---- VK_KHR_swapchain extension commands + PFN_vkGetPhysicalDevicePresentRectanglesKHR GetPhysicalDevicePresentRectanglesKHR; + + // ---- VK_KHR_display extension commands + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR GetPhysicalDeviceDisplayPropertiesKHR; + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR GetPhysicalDeviceDisplayPlanePropertiesKHR; + PFN_vkGetDisplayPlaneSupportedDisplaysKHR GetDisplayPlaneSupportedDisplaysKHR; + PFN_vkGetDisplayModePropertiesKHR GetDisplayModePropertiesKHR; + PFN_vkCreateDisplayModeKHR CreateDisplayModeKHR; + PFN_vkGetDisplayPlaneCapabilitiesKHR GetDisplayPlaneCapabilitiesKHR; + PFN_vkCreateDisplayPlaneSurfaceKHR CreateDisplayPlaneSurfaceKHR; + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR GetPhysicalDeviceXlibPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR GetPhysicalDeviceXcbPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkCreateWaylandSurfaceKHR CreateWaylandSurfaceKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR GetPhysicalDeviceWaylandPresentationSupportKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR GetPhysicalDeviceWin32PresentationSupportKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + PFN_vkGetPhysicalDeviceFeatures2KHR GetPhysicalDeviceFeatures2KHR; + PFN_vkGetPhysicalDeviceProperties2KHR GetPhysicalDeviceProperties2KHR; + PFN_vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR; + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR GetPhysicalDeviceImageFormatProperties2KHR; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR GetPhysicalDeviceQueueFamilyProperties2KHR; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR GetPhysicalDeviceMemoryProperties2KHR; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR GetPhysicalDeviceSparseImageFormatProperties2KHR; + + // ---- VK_KHR_device_group_creation extension commands + PFN_vkEnumeratePhysicalDeviceGroupsKHR EnumeratePhysicalDeviceGroupsKHR; + + // ---- VK_KHR_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR GetPhysicalDeviceExternalBufferPropertiesKHR; + + // ---- VK_KHR_external_semaphore_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR GetPhysicalDeviceExternalSemaphorePropertiesKHR; + + // ---- VK_KHR_external_fence_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR GetPhysicalDeviceExternalFencePropertiesKHR; + + // ---- VK_KHR_performance_query extension commands + PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; + PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; + + // ---- VK_KHR_get_surface_capabilities2 extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR GetPhysicalDeviceSurfaceCapabilities2KHR; + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR GetPhysicalDeviceSurfaceFormats2KHR; + + // ---- VK_KHR_get_display_properties2 extension commands + PFN_vkGetPhysicalDeviceDisplayProperties2KHR GetPhysicalDeviceDisplayProperties2KHR; + PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR GetPhysicalDeviceDisplayPlaneProperties2KHR; + PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR; + PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR; + + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR; + + // ---- VK_EXT_debug_report extension commands + PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT; + PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT; + PFN_vkDebugReportMessageEXT DebugReportMessageEXT; + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + PFN_vkCreateStreamDescriptorSurfaceGGP CreateStreamDescriptorSurfaceGGP; +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV; + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + PFN_vkCreateViSurfaceNN CreateViSurfaceNN; +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + PFN_vkReleaseDisplayEXT ReleaseDisplayEXT; + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkAcquireXlibDisplayEXT AcquireXlibDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkGetRandROutputDisplayEXT GetRandROutputDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT GetPhysicalDeviceSurfaceCapabilities2EXT; + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + PFN_vkCreateIOSSurfaceMVK CreateIOSSurfaceMVK; +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK; +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT; + PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT; + PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT; + + // ---- VK_EXT_sample_locations extension commands + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT; + + // ---- VK_EXT_calibrated_timestamps extension commands + PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT GetPhysicalDeviceCalibrateableTimeDomainsEXT; + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA; +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT; +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + PFN_vkGetPhysicalDeviceToolPropertiesEXT GetPhysicalDeviceToolPropertiesEXT; + + // ---- VK_NV_cooperative_matrix extension commands + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV GetPhysicalDeviceCooperativeMatrixPropertiesNV; + + // ---- VK_NV_coverage_reduction_mode extension commands + PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT GetPhysicalDeviceSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + PFN_vkCreateHeadlessSurfaceEXT CreateHeadlessSurfaceEXT; + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkCreateDirectFBSurfaceEXT CreateDirectFBSurfaceEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT GetPhysicalDeviceDirectFBPresentationSupportEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +} VkLayerInstanceDispatchTable; + +// Device function pointer dispatch table +typedef struct VkLayerDispatchTable_ { + + // ---- Core 1_0 commands + PFN_vkGetDeviceProcAddr GetDeviceProcAddr; + PFN_vkDestroyDevice DestroyDevice; + PFN_vkGetDeviceQueue GetDeviceQueue; + PFN_vkQueueSubmit QueueSubmit; + PFN_vkQueueWaitIdle QueueWaitIdle; + PFN_vkDeviceWaitIdle DeviceWaitIdle; + PFN_vkAllocateMemory AllocateMemory; + PFN_vkFreeMemory FreeMemory; + PFN_vkMapMemory MapMemory; + PFN_vkUnmapMemory UnmapMemory; + PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges; + PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment; + PFN_vkBindBufferMemory BindBufferMemory; + PFN_vkBindImageMemory BindImageMemory; + PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements; + PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements; + PFN_vkQueueBindSparse QueueBindSparse; + PFN_vkCreateFence CreateFence; + PFN_vkDestroyFence DestroyFence; + PFN_vkResetFences ResetFences; + PFN_vkGetFenceStatus GetFenceStatus; + PFN_vkWaitForFences WaitForFences; + PFN_vkCreateSemaphore CreateSemaphore; + PFN_vkDestroySemaphore DestroySemaphore; + PFN_vkCreateEvent CreateEvent; + PFN_vkDestroyEvent DestroyEvent; + PFN_vkGetEventStatus GetEventStatus; + PFN_vkSetEvent SetEvent; + PFN_vkResetEvent ResetEvent; + PFN_vkCreateQueryPool CreateQueryPool; + PFN_vkDestroyQueryPool DestroyQueryPool; + PFN_vkGetQueryPoolResults GetQueryPoolResults; + PFN_vkCreateBuffer CreateBuffer; + PFN_vkDestroyBuffer DestroyBuffer; + PFN_vkCreateBufferView CreateBufferView; + PFN_vkDestroyBufferView DestroyBufferView; + PFN_vkCreateImage CreateImage; + PFN_vkDestroyImage DestroyImage; + PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout; + PFN_vkCreateImageView CreateImageView; + PFN_vkDestroyImageView DestroyImageView; + PFN_vkCreateShaderModule CreateShaderModule; + PFN_vkDestroyShaderModule DestroyShaderModule; + PFN_vkCreatePipelineCache CreatePipelineCache; + PFN_vkDestroyPipelineCache DestroyPipelineCache; + PFN_vkGetPipelineCacheData GetPipelineCacheData; + PFN_vkMergePipelineCaches MergePipelineCaches; + PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines; + PFN_vkCreateComputePipelines CreateComputePipelines; + PFN_vkDestroyPipeline DestroyPipeline; + PFN_vkCreatePipelineLayout CreatePipelineLayout; + PFN_vkDestroyPipelineLayout DestroyPipelineLayout; + PFN_vkCreateSampler CreateSampler; + PFN_vkDestroySampler DestroySampler; + PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout; + PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout; + PFN_vkCreateDescriptorPool CreateDescriptorPool; + PFN_vkDestroyDescriptorPool DestroyDescriptorPool; + PFN_vkResetDescriptorPool ResetDescriptorPool; + PFN_vkAllocateDescriptorSets AllocateDescriptorSets; + PFN_vkFreeDescriptorSets FreeDescriptorSets; + PFN_vkUpdateDescriptorSets UpdateDescriptorSets; + PFN_vkCreateFramebuffer CreateFramebuffer; + PFN_vkDestroyFramebuffer DestroyFramebuffer; + PFN_vkCreateRenderPass CreateRenderPass; + PFN_vkDestroyRenderPass DestroyRenderPass; + PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity; + PFN_vkCreateCommandPool CreateCommandPool; + PFN_vkDestroyCommandPool DestroyCommandPool; + PFN_vkResetCommandPool ResetCommandPool; + PFN_vkAllocateCommandBuffers AllocateCommandBuffers; + PFN_vkFreeCommandBuffers FreeCommandBuffers; + PFN_vkBeginCommandBuffer BeginCommandBuffer; + PFN_vkEndCommandBuffer EndCommandBuffer; + PFN_vkResetCommandBuffer ResetCommandBuffer; + PFN_vkCmdBindPipeline CmdBindPipeline; + PFN_vkCmdSetViewport CmdSetViewport; + PFN_vkCmdSetScissor CmdSetScissor; + PFN_vkCmdSetLineWidth CmdSetLineWidth; + PFN_vkCmdSetDepthBias CmdSetDepthBias; + PFN_vkCmdSetBlendConstants CmdSetBlendConstants; + PFN_vkCmdSetDepthBounds CmdSetDepthBounds; + PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask; + PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask; + PFN_vkCmdSetStencilReference CmdSetStencilReference; + PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets; + PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer; + PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers; + PFN_vkCmdDraw CmdDraw; + PFN_vkCmdDrawIndexed CmdDrawIndexed; + PFN_vkCmdDrawIndirect CmdDrawIndirect; + PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect; + PFN_vkCmdDispatch CmdDispatch; + PFN_vkCmdDispatchIndirect CmdDispatchIndirect; + PFN_vkCmdCopyBuffer CmdCopyBuffer; + PFN_vkCmdCopyImage CmdCopyImage; + PFN_vkCmdBlitImage CmdBlitImage; + PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage; + PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer; + PFN_vkCmdUpdateBuffer CmdUpdateBuffer; + PFN_vkCmdFillBuffer CmdFillBuffer; + PFN_vkCmdClearColorImage CmdClearColorImage; + PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage; + PFN_vkCmdClearAttachments CmdClearAttachments; + PFN_vkCmdResolveImage CmdResolveImage; + PFN_vkCmdSetEvent CmdSetEvent; + PFN_vkCmdResetEvent CmdResetEvent; + PFN_vkCmdWaitEvents CmdWaitEvents; + PFN_vkCmdPipelineBarrier CmdPipelineBarrier; + PFN_vkCmdBeginQuery CmdBeginQuery; + PFN_vkCmdEndQuery CmdEndQuery; + PFN_vkCmdResetQueryPool CmdResetQueryPool; + PFN_vkCmdWriteTimestamp CmdWriteTimestamp; + PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults; + PFN_vkCmdPushConstants CmdPushConstants; + PFN_vkCmdBeginRenderPass CmdBeginRenderPass; + PFN_vkCmdNextSubpass CmdNextSubpass; + PFN_vkCmdEndRenderPass CmdEndRenderPass; + PFN_vkCmdExecuteCommands CmdExecuteCommands; + + // ---- Core 1_1 commands + PFN_vkBindBufferMemory2 BindBufferMemory2; + PFN_vkBindImageMemory2 BindImageMemory2; + PFN_vkGetDeviceGroupPeerMemoryFeatures GetDeviceGroupPeerMemoryFeatures; + PFN_vkCmdSetDeviceMask CmdSetDeviceMask; + PFN_vkCmdDispatchBase CmdDispatchBase; + PFN_vkGetImageMemoryRequirements2 GetImageMemoryRequirements2; + PFN_vkGetBufferMemoryRequirements2 GetBufferMemoryRequirements2; + PFN_vkGetImageSparseMemoryRequirements2 GetImageSparseMemoryRequirements2; + PFN_vkTrimCommandPool TrimCommandPool; + PFN_vkGetDeviceQueue2 GetDeviceQueue2; + PFN_vkCreateSamplerYcbcrConversion CreateSamplerYcbcrConversion; + PFN_vkDestroySamplerYcbcrConversion DestroySamplerYcbcrConversion; + PFN_vkCreateDescriptorUpdateTemplate CreateDescriptorUpdateTemplate; + PFN_vkDestroyDescriptorUpdateTemplate DestroyDescriptorUpdateTemplate; + PFN_vkUpdateDescriptorSetWithTemplate UpdateDescriptorSetWithTemplate; + PFN_vkGetDescriptorSetLayoutSupport GetDescriptorSetLayoutSupport; + + // ---- Core 1_2 commands + PFN_vkCmdDrawIndirectCount CmdDrawIndirectCount; + PFN_vkCmdDrawIndexedIndirectCount CmdDrawIndexedIndirectCount; + PFN_vkCreateRenderPass2 CreateRenderPass2; + PFN_vkCmdBeginRenderPass2 CmdBeginRenderPass2; + PFN_vkCmdNextSubpass2 CmdNextSubpass2; + PFN_vkCmdEndRenderPass2 CmdEndRenderPass2; + PFN_vkResetQueryPool ResetQueryPool; + PFN_vkGetSemaphoreCounterValue GetSemaphoreCounterValue; + PFN_vkWaitSemaphores WaitSemaphores; + PFN_vkSignalSemaphore SignalSemaphore; + PFN_vkGetBufferDeviceAddress GetBufferDeviceAddress; + PFN_vkGetBufferOpaqueCaptureAddress GetBufferOpaqueCaptureAddress; + PFN_vkGetDeviceMemoryOpaqueCaptureAddress GetDeviceMemoryOpaqueCaptureAddress; + + // ---- VK_KHR_swapchain extension commands + PFN_vkCreateSwapchainKHR CreateSwapchainKHR; + PFN_vkDestroySwapchainKHR DestroySwapchainKHR; + PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR; + PFN_vkAcquireNextImageKHR AcquireNextImageKHR; + PFN_vkQueuePresentKHR QueuePresentKHR; + PFN_vkGetDeviceGroupPresentCapabilitiesKHR GetDeviceGroupPresentCapabilitiesKHR; + PFN_vkGetDeviceGroupSurfacePresentModesKHR GetDeviceGroupSurfacePresentModesKHR; + PFN_vkAcquireNextImage2KHR AcquireNextImage2KHR; + + // ---- VK_KHR_display_swapchain extension commands + PFN_vkCreateSharedSwapchainsKHR CreateSharedSwapchainsKHR; + + // ---- VK_KHR_device_group extension commands + PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR GetDeviceGroupPeerMemoryFeaturesKHR; + PFN_vkCmdSetDeviceMaskKHR CmdSetDeviceMaskKHR; + PFN_vkCmdDispatchBaseKHR CmdDispatchBaseKHR; + + // ---- VK_KHR_maintenance1 extension commands + PFN_vkTrimCommandPoolKHR TrimCommandPoolKHR; + + // ---- VK_KHR_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleKHR GetMemoryWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandlePropertiesKHR GetMemoryWin32HandlePropertiesKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_memory_fd extension commands + PFN_vkGetMemoryFdKHR GetMemoryFdKHR; + PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR; + + // ---- VK_KHR_external_semaphore_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportSemaphoreWin32HandleKHR ImportSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetSemaphoreWin32HandleKHR GetSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_semaphore_fd extension commands + PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR; + PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR; + + // ---- VK_KHR_push_descriptor extension commands + PFN_vkCmdPushDescriptorSetKHR CmdPushDescriptorSetKHR; + PFN_vkCmdPushDescriptorSetWithTemplateKHR CmdPushDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_descriptor_update_template extension commands + PFN_vkCreateDescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplateKHR; + PFN_vkDestroyDescriptorUpdateTemplateKHR DestroyDescriptorUpdateTemplateKHR; + PFN_vkUpdateDescriptorSetWithTemplateKHR UpdateDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_create_renderpass2 extension commands + PFN_vkCreateRenderPass2KHR CreateRenderPass2KHR; + PFN_vkCmdBeginRenderPass2KHR CmdBeginRenderPass2KHR; + PFN_vkCmdNextSubpass2KHR CmdNextSubpass2KHR; + PFN_vkCmdEndRenderPass2KHR CmdEndRenderPass2KHR; + + // ---- VK_KHR_shared_presentable_image extension commands + PFN_vkGetSwapchainStatusKHR GetSwapchainStatusKHR; + + // ---- VK_KHR_external_fence_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportFenceWin32HandleKHR ImportFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetFenceWin32HandleKHR GetFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_fence_fd extension commands + PFN_vkImportFenceFdKHR ImportFenceFdKHR; + PFN_vkGetFenceFdKHR GetFenceFdKHR; + + // ---- VK_KHR_performance_query extension commands + PFN_vkAcquireProfilingLockKHR AcquireProfilingLockKHR; + PFN_vkReleaseProfilingLockKHR ReleaseProfilingLockKHR; + + // ---- VK_KHR_get_memory_requirements2 extension commands + PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2KHR; + PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2KHR; + PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2KHR; + + // ---- VK_KHR_sampler_ycbcr_conversion extension commands + PFN_vkCreateSamplerYcbcrConversionKHR CreateSamplerYcbcrConversionKHR; + PFN_vkDestroySamplerYcbcrConversionKHR DestroySamplerYcbcrConversionKHR; + + // ---- VK_KHR_bind_memory2 extension commands + PFN_vkBindBufferMemory2KHR BindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR BindImageMemory2KHR; + + // ---- VK_KHR_maintenance3 extension commands + PFN_vkGetDescriptorSetLayoutSupportKHR GetDescriptorSetLayoutSupportKHR; + + // ---- VK_KHR_draw_indirect_count extension commands + PFN_vkCmdDrawIndirectCountKHR CmdDrawIndirectCountKHR; + PFN_vkCmdDrawIndexedIndirectCountKHR CmdDrawIndexedIndirectCountKHR; + + // ---- VK_KHR_timeline_semaphore extension commands + PFN_vkGetSemaphoreCounterValueKHR GetSemaphoreCounterValueKHR; + PFN_vkWaitSemaphoresKHR WaitSemaphoresKHR; + PFN_vkSignalSemaphoreKHR SignalSemaphoreKHR; + + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkCmdSetFragmentShadingRateKHR CmdSetFragmentShadingRateKHR; + + // ---- VK_KHR_buffer_device_address extension commands + PFN_vkGetBufferDeviceAddressKHR GetBufferDeviceAddressKHR; + PFN_vkGetBufferOpaqueCaptureAddressKHR GetBufferOpaqueCaptureAddressKHR; + PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR GetDeviceMemoryOpaqueCaptureAddressKHR; + + // ---- VK_KHR_deferred_host_operations extension commands + PFN_vkCreateDeferredOperationKHR CreateDeferredOperationKHR; + PFN_vkDestroyDeferredOperationKHR DestroyDeferredOperationKHR; + PFN_vkGetDeferredOperationMaxConcurrencyKHR GetDeferredOperationMaxConcurrencyKHR; + PFN_vkGetDeferredOperationResultKHR GetDeferredOperationResultKHR; + PFN_vkDeferredOperationJoinKHR DeferredOperationJoinKHR; + + // ---- VK_KHR_pipeline_executable_properties extension commands + PFN_vkGetPipelineExecutablePropertiesKHR GetPipelineExecutablePropertiesKHR; + PFN_vkGetPipelineExecutableStatisticsKHR GetPipelineExecutableStatisticsKHR; + PFN_vkGetPipelineExecutableInternalRepresentationsKHR GetPipelineExecutableInternalRepresentationsKHR; + + // ---- VK_KHR_copy_commands2 extension commands + PFN_vkCmdCopyBuffer2KHR CmdCopyBuffer2KHR; + PFN_vkCmdCopyImage2KHR CmdCopyImage2KHR; + PFN_vkCmdCopyBufferToImage2KHR CmdCopyBufferToImage2KHR; + PFN_vkCmdCopyImageToBuffer2KHR CmdCopyImageToBuffer2KHR; + PFN_vkCmdBlitImage2KHR CmdBlitImage2KHR; + PFN_vkCmdResolveImage2KHR CmdResolveImage2KHR; + + // ---- VK_EXT_debug_marker extension commands + PFN_vkDebugMarkerSetObjectTagEXT DebugMarkerSetObjectTagEXT; + PFN_vkDebugMarkerSetObjectNameEXT DebugMarkerSetObjectNameEXT; + PFN_vkCmdDebugMarkerBeginEXT CmdDebugMarkerBeginEXT; + PFN_vkCmdDebugMarkerEndEXT CmdDebugMarkerEndEXT; + PFN_vkCmdDebugMarkerInsertEXT CmdDebugMarkerInsertEXT; + + // ---- VK_EXT_transform_feedback extension commands + PFN_vkCmdBindTransformFeedbackBuffersEXT CmdBindTransformFeedbackBuffersEXT; + PFN_vkCmdBeginTransformFeedbackEXT CmdBeginTransformFeedbackEXT; + PFN_vkCmdEndTransformFeedbackEXT CmdEndTransformFeedbackEXT; + PFN_vkCmdBeginQueryIndexedEXT CmdBeginQueryIndexedEXT; + PFN_vkCmdEndQueryIndexedEXT CmdEndQueryIndexedEXT; + PFN_vkCmdDrawIndirectByteCountEXT CmdDrawIndirectByteCountEXT; + + // ---- VK_NVX_image_view_handle extension commands + PFN_vkGetImageViewHandleNVX GetImageViewHandleNVX; + PFN_vkGetImageViewAddressNVX GetImageViewAddressNVX; + + // ---- VK_AMD_draw_indirect_count extension commands + PFN_vkCmdDrawIndirectCountAMD CmdDrawIndirectCountAMD; + PFN_vkCmdDrawIndexedIndirectCountAMD CmdDrawIndexedIndirectCountAMD; + + // ---- VK_AMD_shader_info extension commands + PFN_vkGetShaderInfoAMD GetShaderInfoAMD; + + // ---- VK_NV_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleNV GetMemoryWin32HandleNV; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_conditional_rendering extension commands + PFN_vkCmdBeginConditionalRenderingEXT CmdBeginConditionalRenderingEXT; + PFN_vkCmdEndConditionalRenderingEXT CmdEndConditionalRenderingEXT; + + // ---- VK_NV_clip_space_w_scaling extension commands + PFN_vkCmdSetViewportWScalingNV CmdSetViewportWScalingNV; + + // ---- VK_EXT_display_control extension commands + PFN_vkDisplayPowerControlEXT DisplayPowerControlEXT; + PFN_vkRegisterDeviceEventEXT RegisterDeviceEventEXT; + PFN_vkRegisterDisplayEventEXT RegisterDisplayEventEXT; + PFN_vkGetSwapchainCounterEXT GetSwapchainCounterEXT; + + // ---- VK_GOOGLE_display_timing extension commands + PFN_vkGetRefreshCycleDurationGOOGLE GetRefreshCycleDurationGOOGLE; + PFN_vkGetPastPresentationTimingGOOGLE GetPastPresentationTimingGOOGLE; + + // ---- VK_EXT_discard_rectangles extension commands + PFN_vkCmdSetDiscardRectangleEXT CmdSetDiscardRectangleEXT; + + // ---- VK_EXT_hdr_metadata extension commands + PFN_vkSetHdrMetadataEXT SetHdrMetadataEXT; + + // ---- VK_EXT_debug_utils extension commands + PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT; + PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT; + PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT; + PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT; + PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT; + PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT; + PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT; + PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT; + + // ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetAndroidHardwareBufferPropertiesANDROID GetAndroidHardwareBufferPropertiesANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetMemoryAndroidHardwareBufferANDROID GetMemoryAndroidHardwareBufferANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_EXT_sample_locations extension commands + PFN_vkCmdSetSampleLocationsEXT CmdSetSampleLocationsEXT; + + // ---- VK_EXT_image_drm_format_modifier extension commands + PFN_vkGetImageDrmFormatModifierPropertiesEXT GetImageDrmFormatModifierPropertiesEXT; + + // ---- VK_EXT_validation_cache extension commands + PFN_vkCreateValidationCacheEXT CreateValidationCacheEXT; + PFN_vkDestroyValidationCacheEXT DestroyValidationCacheEXT; + PFN_vkMergeValidationCachesEXT MergeValidationCachesEXT; + PFN_vkGetValidationCacheDataEXT GetValidationCacheDataEXT; + + // ---- VK_NV_shading_rate_image extension commands + PFN_vkCmdBindShadingRateImageNV CmdBindShadingRateImageNV; + PFN_vkCmdSetViewportShadingRatePaletteNV CmdSetViewportShadingRatePaletteNV; + PFN_vkCmdSetCoarseSampleOrderNV CmdSetCoarseSampleOrderNV; + + // ---- VK_NV_ray_tracing extension commands + PFN_vkCreateAccelerationStructureNV CreateAccelerationStructureNV; + PFN_vkDestroyAccelerationStructureNV DestroyAccelerationStructureNV; + PFN_vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV; + PFN_vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV; + PFN_vkCmdBuildAccelerationStructureNV CmdBuildAccelerationStructureNV; + PFN_vkCmdCopyAccelerationStructureNV CmdCopyAccelerationStructureNV; + PFN_vkCmdTraceRaysNV CmdTraceRaysNV; + PFN_vkCreateRayTracingPipelinesNV CreateRayTracingPipelinesNV; + PFN_vkGetRayTracingShaderGroupHandlesKHR GetRayTracingShaderGroupHandlesKHR; + PFN_vkGetRayTracingShaderGroupHandlesNV GetRayTracingShaderGroupHandlesNV; + PFN_vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV; + PFN_vkCmdWriteAccelerationStructuresPropertiesNV CmdWriteAccelerationStructuresPropertiesNV; + PFN_vkCompileDeferredNV CompileDeferredNV; + + // ---- VK_EXT_external_memory_host extension commands + PFN_vkGetMemoryHostPointerPropertiesEXT GetMemoryHostPointerPropertiesEXT; + + // ---- VK_AMD_buffer_marker extension commands + PFN_vkCmdWriteBufferMarkerAMD CmdWriteBufferMarkerAMD; + + // ---- VK_EXT_calibrated_timestamps extension commands + PFN_vkGetCalibratedTimestampsEXT GetCalibratedTimestampsEXT; + + // ---- VK_NV_mesh_shader extension commands + PFN_vkCmdDrawMeshTasksNV CmdDrawMeshTasksNV; + PFN_vkCmdDrawMeshTasksIndirectNV CmdDrawMeshTasksIndirectNV; + PFN_vkCmdDrawMeshTasksIndirectCountNV CmdDrawMeshTasksIndirectCountNV; + + // ---- VK_NV_scissor_exclusive extension commands + PFN_vkCmdSetExclusiveScissorNV CmdSetExclusiveScissorNV; + + // ---- VK_NV_device_diagnostic_checkpoints extension commands + PFN_vkCmdSetCheckpointNV CmdSetCheckpointNV; + PFN_vkGetQueueCheckpointDataNV GetQueueCheckpointDataNV; + + // ---- VK_INTEL_performance_query extension commands + PFN_vkInitializePerformanceApiINTEL InitializePerformanceApiINTEL; + PFN_vkUninitializePerformanceApiINTEL UninitializePerformanceApiINTEL; + PFN_vkCmdSetPerformanceMarkerINTEL CmdSetPerformanceMarkerINTEL; + PFN_vkCmdSetPerformanceStreamMarkerINTEL CmdSetPerformanceStreamMarkerINTEL; + PFN_vkCmdSetPerformanceOverrideINTEL CmdSetPerformanceOverrideINTEL; + PFN_vkAcquirePerformanceConfigurationINTEL AcquirePerformanceConfigurationINTEL; + PFN_vkReleasePerformanceConfigurationINTEL ReleasePerformanceConfigurationINTEL; + PFN_vkQueueSetPerformanceConfigurationINTEL QueueSetPerformanceConfigurationINTEL; + PFN_vkGetPerformanceParameterINTEL GetPerformanceParameterINTEL; + + // ---- VK_AMD_display_native_hdr extension commands + PFN_vkSetLocalDimmingAMD SetLocalDimmingAMD; + + // ---- VK_EXT_buffer_device_address extension commands + PFN_vkGetBufferDeviceAddressEXT GetBufferDeviceAddressEXT; + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkAcquireFullScreenExclusiveModeEXT AcquireFullScreenExclusiveModeEXT; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkReleaseFullScreenExclusiveModeEXT ReleaseFullScreenExclusiveModeEXT; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetDeviceGroupSurfacePresentModes2EXT GetDeviceGroupSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_line_rasterization extension commands + PFN_vkCmdSetLineStippleEXT CmdSetLineStippleEXT; + + // ---- VK_EXT_host_query_reset extension commands + PFN_vkResetQueryPoolEXT ResetQueryPoolEXT; + + // ---- VK_EXT_extended_dynamic_state extension commands + PFN_vkCmdSetCullModeEXT CmdSetCullModeEXT; + PFN_vkCmdSetFrontFaceEXT CmdSetFrontFaceEXT; + PFN_vkCmdSetPrimitiveTopologyEXT CmdSetPrimitiveTopologyEXT; + PFN_vkCmdSetViewportWithCountEXT CmdSetViewportWithCountEXT; + PFN_vkCmdSetScissorWithCountEXT CmdSetScissorWithCountEXT; + PFN_vkCmdBindVertexBuffers2EXT CmdBindVertexBuffers2EXT; + PFN_vkCmdSetDepthTestEnableEXT CmdSetDepthTestEnableEXT; + PFN_vkCmdSetDepthWriteEnableEXT CmdSetDepthWriteEnableEXT; + PFN_vkCmdSetDepthCompareOpEXT CmdSetDepthCompareOpEXT; + PFN_vkCmdSetDepthBoundsTestEnableEXT CmdSetDepthBoundsTestEnableEXT; + PFN_vkCmdSetStencilTestEnableEXT CmdSetStencilTestEnableEXT; + PFN_vkCmdSetStencilOpEXT CmdSetStencilOpEXT; + + // ---- VK_NV_device_generated_commands extension commands + PFN_vkGetGeneratedCommandsMemoryRequirementsNV GetGeneratedCommandsMemoryRequirementsNV; + PFN_vkCmdPreprocessGeneratedCommandsNV CmdPreprocessGeneratedCommandsNV; + PFN_vkCmdExecuteGeneratedCommandsNV CmdExecuteGeneratedCommandsNV; + PFN_vkCmdBindPipelineShaderGroupNV CmdBindPipelineShaderGroupNV; + PFN_vkCreateIndirectCommandsLayoutNV CreateIndirectCommandsLayoutNV; + PFN_vkDestroyIndirectCommandsLayoutNV DestroyIndirectCommandsLayoutNV; + + // ---- VK_EXT_private_data extension commands + PFN_vkCreatePrivateDataSlotEXT CreatePrivateDataSlotEXT; + PFN_vkDestroyPrivateDataSlotEXT DestroyPrivateDataSlotEXT; + PFN_vkSetPrivateDataEXT SetPrivateDataEXT; + PFN_vkGetPrivateDataEXT GetPrivateDataEXT; + + // ---- VK_NV_fragment_shading_rate_enums extension commands + PFN_vkCmdSetFragmentShadingRateEnumNV CmdSetFragmentShadingRateEnumNV; + + // ---- VK_KHR_acceleration_structure extension commands + PFN_vkCreateAccelerationStructureKHR CreateAccelerationStructureKHR; + PFN_vkDestroyAccelerationStructureKHR DestroyAccelerationStructureKHR; + PFN_vkCmdBuildAccelerationStructuresKHR CmdBuildAccelerationStructuresKHR; + PFN_vkCmdBuildAccelerationStructuresIndirectKHR CmdBuildAccelerationStructuresIndirectKHR; + PFN_vkBuildAccelerationStructuresKHR BuildAccelerationStructuresKHR; + PFN_vkCopyAccelerationStructureKHR CopyAccelerationStructureKHR; + PFN_vkCopyAccelerationStructureToMemoryKHR CopyAccelerationStructureToMemoryKHR; + PFN_vkCopyMemoryToAccelerationStructureKHR CopyMemoryToAccelerationStructureKHR; + PFN_vkWriteAccelerationStructuresPropertiesKHR WriteAccelerationStructuresPropertiesKHR; + PFN_vkCmdCopyAccelerationStructureKHR CmdCopyAccelerationStructureKHR; + PFN_vkCmdCopyAccelerationStructureToMemoryKHR CmdCopyAccelerationStructureToMemoryKHR; + PFN_vkCmdCopyMemoryToAccelerationStructureKHR CmdCopyMemoryToAccelerationStructureKHR; + PFN_vkGetAccelerationStructureDeviceAddressKHR GetAccelerationStructureDeviceAddressKHR; + PFN_vkCmdWriteAccelerationStructuresPropertiesKHR CmdWriteAccelerationStructuresPropertiesKHR; + PFN_vkGetDeviceAccelerationStructureCompatibilityKHR GetDeviceAccelerationStructureCompatibilityKHR; + PFN_vkGetAccelerationStructureBuildSizesKHR GetAccelerationStructureBuildSizesKHR; + + // ---- VK_KHR_ray_tracing_pipeline extension commands + PFN_vkCmdTraceRaysKHR CmdTraceRaysKHR; + PFN_vkCreateRayTracingPipelinesKHR CreateRayTracingPipelinesKHR; + PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR GetRayTracingCaptureReplayShaderGroupHandlesKHR; + PFN_vkCmdTraceRaysIndirectKHR CmdTraceRaysIndirectKHR; + PFN_vkGetRayTracingShaderGroupStackSizeKHR GetRayTracingShaderGroupStackSizeKHR; + PFN_vkCmdSetRayTracingPipelineStackSizeKHR CmdSetRayTracingPipelineStackSizeKHR; +} VkLayerDispatchTable; + + diff --git a/src/third_party/vulkan/loader/vk_loader_extensions.c b/src/third_party/vulkan/loader/vk_loader_extensions.c new file mode 100644 index 0000000..d9d506d --- /dev/null +++ b/src/third_party/vulkan/loader/vk_loader_extensions.c @@ -0,0 +1,5483 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See loader_extension_generator.py for modifications + +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Lobodzinski + * Author: Mark Young + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include "vk_loader_platform.h" +#include "loader.h" +#include "vk_loader_extensions.h" +#include "../vk_icd.h" +#include "wsi.h" +#include "debug_utils.h" +#include "extension_manual.h" + +// Device extension error function +VKAPI_ATTR VkResult VKAPI_CALL vkDevExtError(VkDevice dev) { + struct loader_device *found_dev; + // The device going in is a trampoline device + struct loader_icd_term *icd_term = loader_get_icd_and_device(dev, &found_dev, NULL); + + if (icd_term) + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "Bad destination in loader trampoline dispatch," + "Are layers and extensions that you are calling enabled?"); + return VK_ERROR_EXTENSION_NOT_PRESENT; +} + +VKAPI_ATTR bool VKAPI_CALL loader_icd_init_entries(struct loader_icd_term *icd_term, VkInstance inst, + const PFN_vkGetInstanceProcAddr fp_gipa) { + +#define LOOKUP_GIPA(func, required) \ + do { \ + icd_term->dispatch.func = (PFN_vk##func)fp_gipa(inst, "vk" #func); \ + if (!icd_term->dispatch.func && required) { \ + loader_log((struct loader_instance *)inst, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, \ + loader_platform_get_proc_address_error("vk" #func)); \ + return false; \ + } \ + } while (0) + + + // ---- Core 1_0 + LOOKUP_GIPA(DestroyInstance, true); + LOOKUP_GIPA(EnumeratePhysicalDevices, true); + LOOKUP_GIPA(GetPhysicalDeviceFeatures, true); + LOOKUP_GIPA(GetPhysicalDeviceFormatProperties, true); + LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties, true); + LOOKUP_GIPA(GetPhysicalDeviceProperties, true); + LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties, true); + LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties, true); + LOOKUP_GIPA(GetDeviceProcAddr, true); + LOOKUP_GIPA(CreateDevice, true); + LOOKUP_GIPA(EnumerateDeviceExtensionProperties, true); + LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties, true); + + // ---- Core 1_1 + LOOKUP_GIPA(EnumeratePhysicalDeviceGroups, false); + LOOKUP_GIPA(GetPhysicalDeviceFeatures2, false); + LOOKUP_GIPA(GetPhysicalDeviceProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceFormatProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties2, false); + LOOKUP_GIPA(GetPhysicalDeviceExternalBufferProperties, false); + LOOKUP_GIPA(GetPhysicalDeviceExternalFenceProperties, false); + LOOKUP_GIPA(GetPhysicalDeviceExternalSemaphoreProperties, false); + + // ---- VK_KHR_surface extension commands + LOOKUP_GIPA(DestroySurfaceKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSurfaceSupportKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilitiesKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormatsKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModesKHR, false); + + // ---- VK_KHR_swapchain extension commands + LOOKUP_GIPA(CreateSwapchainKHR, false); + LOOKUP_GIPA(GetDeviceGroupSurfacePresentModesKHR, false); + LOOKUP_GIPA(GetPhysicalDevicePresentRectanglesKHR, false); + + // ---- VK_KHR_display extension commands + LOOKUP_GIPA(GetPhysicalDeviceDisplayPropertiesKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceDisplayPlanePropertiesKHR, false); + LOOKUP_GIPA(GetDisplayPlaneSupportedDisplaysKHR, false); + LOOKUP_GIPA(GetDisplayModePropertiesKHR, false); + LOOKUP_GIPA(CreateDisplayModeKHR, false); + LOOKUP_GIPA(GetDisplayPlaneCapabilitiesKHR, false); + LOOKUP_GIPA(CreateDisplayPlaneSurfaceKHR, false); + + // ---- VK_KHR_display_swapchain extension commands + LOOKUP_GIPA(CreateSharedSwapchainsKHR, false); + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + LOOKUP_GIPA(CreateXlibSurfaceKHR, false); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + LOOKUP_GIPA(GetPhysicalDeviceXlibPresentationSupportKHR, false); +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + LOOKUP_GIPA(CreateXcbSurfaceKHR, false); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + LOOKUP_GIPA(GetPhysicalDeviceXcbPresentationSupportKHR, false); +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + LOOKUP_GIPA(CreateWaylandSurfaceKHR, false); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + LOOKUP_GIPA(GetPhysicalDeviceWaylandPresentationSupportKHR, false); +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + LOOKUP_GIPA(CreateAndroidSurfaceKHR, false); +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + LOOKUP_GIPA(CreateWin32SurfaceKHR, false); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + LOOKUP_GIPA(GetPhysicalDeviceWin32PresentationSupportKHR, false); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + LOOKUP_GIPA(GetPhysicalDeviceFeatures2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceFormatProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceImageFormatProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceMemoryProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSparseImageFormatProperties2KHR, false); + + // ---- VK_KHR_device_group_creation extension commands + LOOKUP_GIPA(EnumeratePhysicalDeviceGroupsKHR, false); + + // ---- VK_KHR_external_memory_capabilities extension commands + LOOKUP_GIPA(GetPhysicalDeviceExternalBufferPropertiesKHR, false); + + // ---- VK_KHR_external_semaphore_capabilities extension commands + LOOKUP_GIPA(GetPhysicalDeviceExternalSemaphorePropertiesKHR, false); + + // ---- VK_KHR_external_fence_capabilities extension commands + LOOKUP_GIPA(GetPhysicalDeviceExternalFencePropertiesKHR, false); + + // ---- VK_KHR_performance_query extension commands + LOOKUP_GIPA(EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR, false); + LOOKUP_GIPA(GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR, false); + + // ---- VK_KHR_get_surface_capabilities2 extension commands + LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilities2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceSurfaceFormats2KHR, false); + + // ---- VK_KHR_get_display_properties2 extension commands + LOOKUP_GIPA(GetPhysicalDeviceDisplayProperties2KHR, false); + LOOKUP_GIPA(GetPhysicalDeviceDisplayPlaneProperties2KHR, false); + LOOKUP_GIPA(GetDisplayModeProperties2KHR, false); + LOOKUP_GIPA(GetDisplayPlaneCapabilities2KHR, false); + + // ---- VK_KHR_fragment_shading_rate extension commands + LOOKUP_GIPA(GetPhysicalDeviceFragmentShadingRatesKHR, false); + + // ---- VK_EXT_debug_report extension commands + LOOKUP_GIPA(CreateDebugReportCallbackEXT, false); + LOOKUP_GIPA(DestroyDebugReportCallbackEXT, false); + LOOKUP_GIPA(DebugReportMessageEXT, false); + + // ---- VK_EXT_debug_marker extension commands + LOOKUP_GIPA(DebugMarkerSetObjectTagEXT, false); + LOOKUP_GIPA(DebugMarkerSetObjectNameEXT, false); + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + LOOKUP_GIPA(CreateStreamDescriptorSurfaceGGP, false); +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + LOOKUP_GIPA(GetPhysicalDeviceExternalImageFormatPropertiesNV, false); + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + LOOKUP_GIPA(CreateViSurfaceNN, false); +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + LOOKUP_GIPA(ReleaseDisplayEXT, false); + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + LOOKUP_GIPA(AcquireXlibDisplayEXT, false); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + LOOKUP_GIPA(GetRandROutputDisplayEXT, false); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + LOOKUP_GIPA(GetPhysicalDeviceSurfaceCapabilities2EXT, false); + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + LOOKUP_GIPA(CreateIOSSurfaceMVK, false); +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + LOOKUP_GIPA(CreateMacOSSurfaceMVK, false); +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + LOOKUP_GIPA(SetDebugUtilsObjectNameEXT, false); + LOOKUP_GIPA(SetDebugUtilsObjectTagEXT, false); + LOOKUP_GIPA(QueueBeginDebugUtilsLabelEXT, false); + LOOKUP_GIPA(QueueEndDebugUtilsLabelEXT, false); + LOOKUP_GIPA(QueueInsertDebugUtilsLabelEXT, false); + LOOKUP_GIPA(CmdBeginDebugUtilsLabelEXT, false); + LOOKUP_GIPA(CmdEndDebugUtilsLabelEXT, false); + LOOKUP_GIPA(CmdInsertDebugUtilsLabelEXT, false); + LOOKUP_GIPA(CreateDebugUtilsMessengerEXT, false); + LOOKUP_GIPA(DestroyDebugUtilsMessengerEXT, false); + LOOKUP_GIPA(SubmitDebugUtilsMessageEXT, false); + + // ---- VK_EXT_sample_locations extension commands + LOOKUP_GIPA(GetPhysicalDeviceMultisamplePropertiesEXT, false); + + // ---- VK_EXT_calibrated_timestamps extension commands + LOOKUP_GIPA(GetPhysicalDeviceCalibrateableTimeDomainsEXT, false); + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + LOOKUP_GIPA(CreateImagePipeSurfaceFUCHSIA, false); +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + LOOKUP_GIPA(CreateMetalSurfaceEXT, false); +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + LOOKUP_GIPA(GetPhysicalDeviceToolPropertiesEXT, false); + + // ---- VK_NV_cooperative_matrix extension commands + LOOKUP_GIPA(GetPhysicalDeviceCooperativeMatrixPropertiesNV, false); + + // ---- VK_NV_coverage_reduction_mode extension commands + LOOKUP_GIPA(GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV, false); + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + LOOKUP_GIPA(GetPhysicalDeviceSurfacePresentModes2EXT, false); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + LOOKUP_GIPA(GetDeviceGroupSurfacePresentModes2EXT, false); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + LOOKUP_GIPA(CreateHeadlessSurfaceEXT, false); + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + LOOKUP_GIPA(CreateDirectFBSurfaceEXT, false); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + LOOKUP_GIPA(GetPhysicalDeviceDirectFBPresentationSupportEXT, false); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + +#undef LOOKUP_GIPA + + return true; +}; + +// Init Device function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_device_dispatch_table(struct loader_dev_dispatch_table *dev_table, PFN_vkGetDeviceProcAddr gpa, + VkDevice dev) { + VkLayerDispatchTable *table = &dev_table->core_dispatch; + for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) dev_table->ext_dispatch.dev_ext[i] = (PFN_vkDevExt)vkDevExtError; + + // ---- Core 1_0 commands + table->GetDeviceProcAddr = gpa; + table->DestroyDevice = (PFN_vkDestroyDevice)gpa(dev, "vkDestroyDevice"); + table->GetDeviceQueue = (PFN_vkGetDeviceQueue)gpa(dev, "vkGetDeviceQueue"); + table->QueueSubmit = (PFN_vkQueueSubmit)gpa(dev, "vkQueueSubmit"); + table->QueueWaitIdle = (PFN_vkQueueWaitIdle)gpa(dev, "vkQueueWaitIdle"); + table->DeviceWaitIdle = (PFN_vkDeviceWaitIdle)gpa(dev, "vkDeviceWaitIdle"); + table->AllocateMemory = (PFN_vkAllocateMemory)gpa(dev, "vkAllocateMemory"); + table->FreeMemory = (PFN_vkFreeMemory)gpa(dev, "vkFreeMemory"); + table->MapMemory = (PFN_vkMapMemory)gpa(dev, "vkMapMemory"); + table->UnmapMemory = (PFN_vkUnmapMemory)gpa(dev, "vkUnmapMemory"); + table->FlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)gpa(dev, "vkFlushMappedMemoryRanges"); + table->InvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)gpa(dev, "vkInvalidateMappedMemoryRanges"); + table->GetDeviceMemoryCommitment = (PFN_vkGetDeviceMemoryCommitment)gpa(dev, "vkGetDeviceMemoryCommitment"); + table->BindBufferMemory = (PFN_vkBindBufferMemory)gpa(dev, "vkBindBufferMemory"); + table->BindImageMemory = (PFN_vkBindImageMemory)gpa(dev, "vkBindImageMemory"); + table->GetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)gpa(dev, "vkGetBufferMemoryRequirements"); + table->GetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)gpa(dev, "vkGetImageMemoryRequirements"); + table->GetImageSparseMemoryRequirements = (PFN_vkGetImageSparseMemoryRequirements)gpa(dev, "vkGetImageSparseMemoryRequirements"); + table->QueueBindSparse = (PFN_vkQueueBindSparse)gpa(dev, "vkQueueBindSparse"); + table->CreateFence = (PFN_vkCreateFence)gpa(dev, "vkCreateFence"); + table->DestroyFence = (PFN_vkDestroyFence)gpa(dev, "vkDestroyFence"); + table->ResetFences = (PFN_vkResetFences)gpa(dev, "vkResetFences"); + table->GetFenceStatus = (PFN_vkGetFenceStatus)gpa(dev, "vkGetFenceStatus"); + table->WaitForFences = (PFN_vkWaitForFences)gpa(dev, "vkWaitForFences"); + table->CreateSemaphore = (PFN_vkCreateSemaphore)gpa(dev, "vkCreateSemaphore"); + table->DestroySemaphore = (PFN_vkDestroySemaphore)gpa(dev, "vkDestroySemaphore"); + table->CreateEvent = (PFN_vkCreateEvent)gpa(dev, "vkCreateEvent"); + table->DestroyEvent = (PFN_vkDestroyEvent)gpa(dev, "vkDestroyEvent"); + table->GetEventStatus = (PFN_vkGetEventStatus)gpa(dev, "vkGetEventStatus"); + table->SetEvent = (PFN_vkSetEvent)gpa(dev, "vkSetEvent"); + table->ResetEvent = (PFN_vkResetEvent)gpa(dev, "vkResetEvent"); + table->CreateQueryPool = (PFN_vkCreateQueryPool)gpa(dev, "vkCreateQueryPool"); + table->DestroyQueryPool = (PFN_vkDestroyQueryPool)gpa(dev, "vkDestroyQueryPool"); + table->GetQueryPoolResults = (PFN_vkGetQueryPoolResults)gpa(dev, "vkGetQueryPoolResults"); + table->CreateBuffer = (PFN_vkCreateBuffer)gpa(dev, "vkCreateBuffer"); + table->DestroyBuffer = (PFN_vkDestroyBuffer)gpa(dev, "vkDestroyBuffer"); + table->CreateBufferView = (PFN_vkCreateBufferView)gpa(dev, "vkCreateBufferView"); + table->DestroyBufferView = (PFN_vkDestroyBufferView)gpa(dev, "vkDestroyBufferView"); + table->CreateImage = (PFN_vkCreateImage)gpa(dev, "vkCreateImage"); + table->DestroyImage = (PFN_vkDestroyImage)gpa(dev, "vkDestroyImage"); + table->GetImageSubresourceLayout = (PFN_vkGetImageSubresourceLayout)gpa(dev, "vkGetImageSubresourceLayout"); + table->CreateImageView = (PFN_vkCreateImageView)gpa(dev, "vkCreateImageView"); + table->DestroyImageView = (PFN_vkDestroyImageView)gpa(dev, "vkDestroyImageView"); + table->CreateShaderModule = (PFN_vkCreateShaderModule)gpa(dev, "vkCreateShaderModule"); + table->DestroyShaderModule = (PFN_vkDestroyShaderModule)gpa(dev, "vkDestroyShaderModule"); + table->CreatePipelineCache = (PFN_vkCreatePipelineCache)gpa(dev, "vkCreatePipelineCache"); + table->DestroyPipelineCache = (PFN_vkDestroyPipelineCache)gpa(dev, "vkDestroyPipelineCache"); + table->GetPipelineCacheData = (PFN_vkGetPipelineCacheData)gpa(dev, "vkGetPipelineCacheData"); + table->MergePipelineCaches = (PFN_vkMergePipelineCaches)gpa(dev, "vkMergePipelineCaches"); + table->CreateGraphicsPipelines = (PFN_vkCreateGraphicsPipelines)gpa(dev, "vkCreateGraphicsPipelines"); + table->CreateComputePipelines = (PFN_vkCreateComputePipelines)gpa(dev, "vkCreateComputePipelines"); + table->DestroyPipeline = (PFN_vkDestroyPipeline)gpa(dev, "vkDestroyPipeline"); + table->CreatePipelineLayout = (PFN_vkCreatePipelineLayout)gpa(dev, "vkCreatePipelineLayout"); + table->DestroyPipelineLayout = (PFN_vkDestroyPipelineLayout)gpa(dev, "vkDestroyPipelineLayout"); + table->CreateSampler = (PFN_vkCreateSampler)gpa(dev, "vkCreateSampler"); + table->DestroySampler = (PFN_vkDestroySampler)gpa(dev, "vkDestroySampler"); + table->CreateDescriptorSetLayout = (PFN_vkCreateDescriptorSetLayout)gpa(dev, "vkCreateDescriptorSetLayout"); + table->DestroyDescriptorSetLayout = (PFN_vkDestroyDescriptorSetLayout)gpa(dev, "vkDestroyDescriptorSetLayout"); + table->CreateDescriptorPool = (PFN_vkCreateDescriptorPool)gpa(dev, "vkCreateDescriptorPool"); + table->DestroyDescriptorPool = (PFN_vkDestroyDescriptorPool)gpa(dev, "vkDestroyDescriptorPool"); + table->ResetDescriptorPool = (PFN_vkResetDescriptorPool)gpa(dev, "vkResetDescriptorPool"); + table->AllocateDescriptorSets = (PFN_vkAllocateDescriptorSets)gpa(dev, "vkAllocateDescriptorSets"); + table->FreeDescriptorSets = (PFN_vkFreeDescriptorSets)gpa(dev, "vkFreeDescriptorSets"); + table->UpdateDescriptorSets = (PFN_vkUpdateDescriptorSets)gpa(dev, "vkUpdateDescriptorSets"); + table->CreateFramebuffer = (PFN_vkCreateFramebuffer)gpa(dev, "vkCreateFramebuffer"); + table->DestroyFramebuffer = (PFN_vkDestroyFramebuffer)gpa(dev, "vkDestroyFramebuffer"); + table->CreateRenderPass = (PFN_vkCreateRenderPass)gpa(dev, "vkCreateRenderPass"); + table->DestroyRenderPass = (PFN_vkDestroyRenderPass)gpa(dev, "vkDestroyRenderPass"); + table->GetRenderAreaGranularity = (PFN_vkGetRenderAreaGranularity)gpa(dev, "vkGetRenderAreaGranularity"); + table->CreateCommandPool = (PFN_vkCreateCommandPool)gpa(dev, "vkCreateCommandPool"); + table->DestroyCommandPool = (PFN_vkDestroyCommandPool)gpa(dev, "vkDestroyCommandPool"); + table->ResetCommandPool = (PFN_vkResetCommandPool)gpa(dev, "vkResetCommandPool"); + table->AllocateCommandBuffers = (PFN_vkAllocateCommandBuffers)gpa(dev, "vkAllocateCommandBuffers"); + table->FreeCommandBuffers = (PFN_vkFreeCommandBuffers)gpa(dev, "vkFreeCommandBuffers"); + table->BeginCommandBuffer = (PFN_vkBeginCommandBuffer)gpa(dev, "vkBeginCommandBuffer"); + table->EndCommandBuffer = (PFN_vkEndCommandBuffer)gpa(dev, "vkEndCommandBuffer"); + table->ResetCommandBuffer = (PFN_vkResetCommandBuffer)gpa(dev, "vkResetCommandBuffer"); + table->CmdBindPipeline = (PFN_vkCmdBindPipeline)gpa(dev, "vkCmdBindPipeline"); + table->CmdSetViewport = (PFN_vkCmdSetViewport)gpa(dev, "vkCmdSetViewport"); + table->CmdSetScissor = (PFN_vkCmdSetScissor)gpa(dev, "vkCmdSetScissor"); + table->CmdSetLineWidth = (PFN_vkCmdSetLineWidth)gpa(dev, "vkCmdSetLineWidth"); + table->CmdSetDepthBias = (PFN_vkCmdSetDepthBias)gpa(dev, "vkCmdSetDepthBias"); + table->CmdSetBlendConstants = (PFN_vkCmdSetBlendConstants)gpa(dev, "vkCmdSetBlendConstants"); + table->CmdSetDepthBounds = (PFN_vkCmdSetDepthBounds)gpa(dev, "vkCmdSetDepthBounds"); + table->CmdSetStencilCompareMask = (PFN_vkCmdSetStencilCompareMask)gpa(dev, "vkCmdSetStencilCompareMask"); + table->CmdSetStencilWriteMask = (PFN_vkCmdSetStencilWriteMask)gpa(dev, "vkCmdSetStencilWriteMask"); + table->CmdSetStencilReference = (PFN_vkCmdSetStencilReference)gpa(dev, "vkCmdSetStencilReference"); + table->CmdBindDescriptorSets = (PFN_vkCmdBindDescriptorSets)gpa(dev, "vkCmdBindDescriptorSets"); + table->CmdBindIndexBuffer = (PFN_vkCmdBindIndexBuffer)gpa(dev, "vkCmdBindIndexBuffer"); + table->CmdBindVertexBuffers = (PFN_vkCmdBindVertexBuffers)gpa(dev, "vkCmdBindVertexBuffers"); + table->CmdDraw = (PFN_vkCmdDraw)gpa(dev, "vkCmdDraw"); + table->CmdDrawIndexed = (PFN_vkCmdDrawIndexed)gpa(dev, "vkCmdDrawIndexed"); + table->CmdDrawIndirect = (PFN_vkCmdDrawIndirect)gpa(dev, "vkCmdDrawIndirect"); + table->CmdDrawIndexedIndirect = (PFN_vkCmdDrawIndexedIndirect)gpa(dev, "vkCmdDrawIndexedIndirect"); + table->CmdDispatch = (PFN_vkCmdDispatch)gpa(dev, "vkCmdDispatch"); + table->CmdDispatchIndirect = (PFN_vkCmdDispatchIndirect)gpa(dev, "vkCmdDispatchIndirect"); + table->CmdCopyBuffer = (PFN_vkCmdCopyBuffer)gpa(dev, "vkCmdCopyBuffer"); + table->CmdCopyImage = (PFN_vkCmdCopyImage)gpa(dev, "vkCmdCopyImage"); + table->CmdBlitImage = (PFN_vkCmdBlitImage)gpa(dev, "vkCmdBlitImage"); + table->CmdCopyBufferToImage = (PFN_vkCmdCopyBufferToImage)gpa(dev, "vkCmdCopyBufferToImage"); + table->CmdCopyImageToBuffer = (PFN_vkCmdCopyImageToBuffer)gpa(dev, "vkCmdCopyImageToBuffer"); + table->CmdUpdateBuffer = (PFN_vkCmdUpdateBuffer)gpa(dev, "vkCmdUpdateBuffer"); + table->CmdFillBuffer = (PFN_vkCmdFillBuffer)gpa(dev, "vkCmdFillBuffer"); + table->CmdClearColorImage = (PFN_vkCmdClearColorImage)gpa(dev, "vkCmdClearColorImage"); + table->CmdClearDepthStencilImage = (PFN_vkCmdClearDepthStencilImage)gpa(dev, "vkCmdClearDepthStencilImage"); + table->CmdClearAttachments = (PFN_vkCmdClearAttachments)gpa(dev, "vkCmdClearAttachments"); + table->CmdResolveImage = (PFN_vkCmdResolveImage)gpa(dev, "vkCmdResolveImage"); + table->CmdSetEvent = (PFN_vkCmdSetEvent)gpa(dev, "vkCmdSetEvent"); + table->CmdResetEvent = (PFN_vkCmdResetEvent)gpa(dev, "vkCmdResetEvent"); + table->CmdWaitEvents = (PFN_vkCmdWaitEvents)gpa(dev, "vkCmdWaitEvents"); + table->CmdPipelineBarrier = (PFN_vkCmdPipelineBarrier)gpa(dev, "vkCmdPipelineBarrier"); + table->CmdBeginQuery = (PFN_vkCmdBeginQuery)gpa(dev, "vkCmdBeginQuery"); + table->CmdEndQuery = (PFN_vkCmdEndQuery)gpa(dev, "vkCmdEndQuery"); + table->CmdResetQueryPool = (PFN_vkCmdResetQueryPool)gpa(dev, "vkCmdResetQueryPool"); + table->CmdWriteTimestamp = (PFN_vkCmdWriteTimestamp)gpa(dev, "vkCmdWriteTimestamp"); + table->CmdCopyQueryPoolResults = (PFN_vkCmdCopyQueryPoolResults)gpa(dev, "vkCmdCopyQueryPoolResults"); + table->CmdPushConstants = (PFN_vkCmdPushConstants)gpa(dev, "vkCmdPushConstants"); + table->CmdBeginRenderPass = (PFN_vkCmdBeginRenderPass)gpa(dev, "vkCmdBeginRenderPass"); + table->CmdNextSubpass = (PFN_vkCmdNextSubpass)gpa(dev, "vkCmdNextSubpass"); + table->CmdEndRenderPass = (PFN_vkCmdEndRenderPass)gpa(dev, "vkCmdEndRenderPass"); + table->CmdExecuteCommands = (PFN_vkCmdExecuteCommands)gpa(dev, "vkCmdExecuteCommands"); + + // ---- Core 1_1 commands + table->BindBufferMemory2 = (PFN_vkBindBufferMemory2)gpa(dev, "vkBindBufferMemory2"); + table->BindImageMemory2 = (PFN_vkBindImageMemory2)gpa(dev, "vkBindImageMemory2"); + table->GetDeviceGroupPeerMemoryFeatures = (PFN_vkGetDeviceGroupPeerMemoryFeatures)gpa(dev, "vkGetDeviceGroupPeerMemoryFeatures"); + table->CmdSetDeviceMask = (PFN_vkCmdSetDeviceMask)gpa(dev, "vkCmdSetDeviceMask"); + table->CmdDispatchBase = (PFN_vkCmdDispatchBase)gpa(dev, "vkCmdDispatchBase"); + table->GetImageMemoryRequirements2 = (PFN_vkGetImageMemoryRequirements2)gpa(dev, "vkGetImageMemoryRequirements2"); + table->GetBufferMemoryRequirements2 = (PFN_vkGetBufferMemoryRequirements2)gpa(dev, "vkGetBufferMemoryRequirements2"); + table->GetImageSparseMemoryRequirements2 = (PFN_vkGetImageSparseMemoryRequirements2)gpa(dev, "vkGetImageSparseMemoryRequirements2"); + table->TrimCommandPool = (PFN_vkTrimCommandPool)gpa(dev, "vkTrimCommandPool"); + table->GetDeviceQueue2 = (PFN_vkGetDeviceQueue2)gpa(dev, "vkGetDeviceQueue2"); + table->CreateSamplerYcbcrConversion = (PFN_vkCreateSamplerYcbcrConversion)gpa(dev, "vkCreateSamplerYcbcrConversion"); + table->DestroySamplerYcbcrConversion = (PFN_vkDestroySamplerYcbcrConversion)gpa(dev, "vkDestroySamplerYcbcrConversion"); + table->CreateDescriptorUpdateTemplate = (PFN_vkCreateDescriptorUpdateTemplate)gpa(dev, "vkCreateDescriptorUpdateTemplate"); + table->DestroyDescriptorUpdateTemplate = (PFN_vkDestroyDescriptorUpdateTemplate)gpa(dev, "vkDestroyDescriptorUpdateTemplate"); + table->UpdateDescriptorSetWithTemplate = (PFN_vkUpdateDescriptorSetWithTemplate)gpa(dev, "vkUpdateDescriptorSetWithTemplate"); + table->GetDescriptorSetLayoutSupport = (PFN_vkGetDescriptorSetLayoutSupport)gpa(dev, "vkGetDescriptorSetLayoutSupport"); + + // ---- Core 1_2 commands + table->CmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)gpa(dev, "vkCmdDrawIndirectCount"); + table->CmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)gpa(dev, "vkCmdDrawIndexedIndirectCount"); + table->CreateRenderPass2 = (PFN_vkCreateRenderPass2)gpa(dev, "vkCreateRenderPass2"); + table->CmdBeginRenderPass2 = (PFN_vkCmdBeginRenderPass2)gpa(dev, "vkCmdBeginRenderPass2"); + table->CmdNextSubpass2 = (PFN_vkCmdNextSubpass2)gpa(dev, "vkCmdNextSubpass2"); + table->CmdEndRenderPass2 = (PFN_vkCmdEndRenderPass2)gpa(dev, "vkCmdEndRenderPass2"); + table->ResetQueryPool = (PFN_vkResetQueryPool)gpa(dev, "vkResetQueryPool"); + table->GetSemaphoreCounterValue = (PFN_vkGetSemaphoreCounterValue)gpa(dev, "vkGetSemaphoreCounterValue"); + table->WaitSemaphores = (PFN_vkWaitSemaphores)gpa(dev, "vkWaitSemaphores"); + table->SignalSemaphore = (PFN_vkSignalSemaphore)gpa(dev, "vkSignalSemaphore"); + table->GetBufferDeviceAddress = (PFN_vkGetBufferDeviceAddress)gpa(dev, "vkGetBufferDeviceAddress"); + table->GetBufferOpaqueCaptureAddress = (PFN_vkGetBufferOpaqueCaptureAddress)gpa(dev, "vkGetBufferOpaqueCaptureAddress"); + table->GetDeviceMemoryOpaqueCaptureAddress = (PFN_vkGetDeviceMemoryOpaqueCaptureAddress)gpa(dev, "vkGetDeviceMemoryOpaqueCaptureAddress"); +} + +// Init Device function pointer dispatch table with extension commands +VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct loader_dev_dispatch_table *dev_table, + PFN_vkGetInstanceProcAddr gipa, + PFN_vkGetDeviceProcAddr gdpa, + VkInstance inst, + VkDevice dev) { + VkLayerDispatchTable *table = &dev_table->core_dispatch; + + // ---- VK_KHR_swapchain extension commands + table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gdpa(dev, "vkCreateSwapchainKHR"); + table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gdpa(dev, "vkDestroySwapchainKHR"); + table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gdpa(dev, "vkGetSwapchainImagesKHR"); + table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gdpa(dev, "vkAcquireNextImageKHR"); + table->QueuePresentKHR = (PFN_vkQueuePresentKHR)gdpa(dev, "vkQueuePresentKHR"); + table->GetDeviceGroupPresentCapabilitiesKHR = (PFN_vkGetDeviceGroupPresentCapabilitiesKHR)gdpa(dev, "vkGetDeviceGroupPresentCapabilitiesKHR"); + table->GetDeviceGroupSurfacePresentModesKHR = (PFN_vkGetDeviceGroupSurfacePresentModesKHR)gdpa(dev, "vkGetDeviceGroupSurfacePresentModesKHR"); + table->AcquireNextImage2KHR = (PFN_vkAcquireNextImage2KHR)gdpa(dev, "vkAcquireNextImage2KHR"); + + // ---- VK_KHR_display_swapchain extension commands + table->CreateSharedSwapchainsKHR = (PFN_vkCreateSharedSwapchainsKHR)gdpa(dev, "vkCreateSharedSwapchainsKHR"); + + // ---- VK_KHR_device_group extension commands + table->GetDeviceGroupPeerMemoryFeaturesKHR = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)gdpa(dev, "vkGetDeviceGroupPeerMemoryFeaturesKHR"); + table->CmdSetDeviceMaskKHR = (PFN_vkCmdSetDeviceMaskKHR)gdpa(dev, "vkCmdSetDeviceMaskKHR"); + table->CmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)gdpa(dev, "vkCmdDispatchBaseKHR"); + + // ---- VK_KHR_maintenance1 extension commands + table->TrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)gdpa(dev, "vkTrimCommandPoolKHR"); + + // ---- VK_KHR_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)gdpa(dev, "vkGetMemoryWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandlePropertiesKHR = (PFN_vkGetMemoryWin32HandlePropertiesKHR)gdpa(dev, "vkGetMemoryWin32HandlePropertiesKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_memory_fd extension commands + table->GetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)gdpa(dev, "vkGetMemoryFdKHR"); + table->GetMemoryFdPropertiesKHR = (PFN_vkGetMemoryFdPropertiesKHR)gdpa(dev, "vkGetMemoryFdPropertiesKHR"); + + // ---- VK_KHR_external_semaphore_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)gdpa(dev, "vkImportSemaphoreWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)gdpa(dev, "vkGetSemaphoreWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_semaphore_fd extension commands + table->ImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)gdpa(dev, "vkImportSemaphoreFdKHR"); + table->GetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)gdpa(dev, "vkGetSemaphoreFdKHR"); + + // ---- VK_KHR_push_descriptor extension commands + table->CmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)gdpa(dev, "vkCmdPushDescriptorSetKHR"); + table->CmdPushDescriptorSetWithTemplateKHR = (PFN_vkCmdPushDescriptorSetWithTemplateKHR)gdpa(dev, "vkCmdPushDescriptorSetWithTemplateKHR"); + + // ---- VK_KHR_descriptor_update_template extension commands + table->CreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)gdpa(dev, "vkCreateDescriptorUpdateTemplateKHR"); + table->DestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)gdpa(dev, "vkDestroyDescriptorUpdateTemplateKHR"); + table->UpdateDescriptorSetWithTemplateKHR = (PFN_vkUpdateDescriptorSetWithTemplateKHR)gdpa(dev, "vkUpdateDescriptorSetWithTemplateKHR"); + + // ---- VK_KHR_create_renderpass2 extension commands + table->CreateRenderPass2KHR = (PFN_vkCreateRenderPass2KHR)gdpa(dev, "vkCreateRenderPass2KHR"); + table->CmdBeginRenderPass2KHR = (PFN_vkCmdBeginRenderPass2KHR)gdpa(dev, "vkCmdBeginRenderPass2KHR"); + table->CmdNextSubpass2KHR = (PFN_vkCmdNextSubpass2KHR)gdpa(dev, "vkCmdNextSubpass2KHR"); + table->CmdEndRenderPass2KHR = (PFN_vkCmdEndRenderPass2KHR)gdpa(dev, "vkCmdEndRenderPass2KHR"); + + // ---- VK_KHR_shared_presentable_image extension commands + table->GetSwapchainStatusKHR = (PFN_vkGetSwapchainStatusKHR)gdpa(dev, "vkGetSwapchainStatusKHR"); + + // ---- VK_KHR_external_fence_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)gdpa(dev, "vkImportFenceWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)gdpa(dev, "vkGetFenceWin32HandleKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_fence_fd extension commands + table->ImportFenceFdKHR = (PFN_vkImportFenceFdKHR)gdpa(dev, "vkImportFenceFdKHR"); + table->GetFenceFdKHR = (PFN_vkGetFenceFdKHR)gdpa(dev, "vkGetFenceFdKHR"); + + // ---- VK_KHR_performance_query extension commands + table->AcquireProfilingLockKHR = (PFN_vkAcquireProfilingLockKHR)gdpa(dev, "vkAcquireProfilingLockKHR"); + table->ReleaseProfilingLockKHR = (PFN_vkReleaseProfilingLockKHR)gdpa(dev, "vkReleaseProfilingLockKHR"); + + // ---- VK_KHR_get_memory_requirements2 extension commands + table->GetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2KHR)gdpa(dev, "vkGetImageMemoryRequirements2KHR"); + table->GetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)gdpa(dev, "vkGetBufferMemoryRequirements2KHR"); + table->GetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)gdpa(dev, "vkGetImageSparseMemoryRequirements2KHR"); + + // ---- VK_KHR_sampler_ycbcr_conversion extension commands + table->CreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)gdpa(dev, "vkCreateSamplerYcbcrConversionKHR"); + table->DestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)gdpa(dev, "vkDestroySamplerYcbcrConversionKHR"); + + // ---- VK_KHR_bind_memory2 extension commands + table->BindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)gdpa(dev, "vkBindBufferMemory2KHR"); + table->BindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)gdpa(dev, "vkBindImageMemory2KHR"); + + // ---- VK_KHR_maintenance3 extension commands + table->GetDescriptorSetLayoutSupportKHR = (PFN_vkGetDescriptorSetLayoutSupportKHR)gdpa(dev, "vkGetDescriptorSetLayoutSupportKHR"); + + // ---- VK_KHR_draw_indirect_count extension commands + table->CmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)gdpa(dev, "vkCmdDrawIndirectCountKHR"); + table->CmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)gdpa(dev, "vkCmdDrawIndexedIndirectCountKHR"); + + // ---- VK_KHR_timeline_semaphore extension commands + table->GetSemaphoreCounterValueKHR = (PFN_vkGetSemaphoreCounterValueKHR)gdpa(dev, "vkGetSemaphoreCounterValueKHR"); + table->WaitSemaphoresKHR = (PFN_vkWaitSemaphoresKHR)gdpa(dev, "vkWaitSemaphoresKHR"); + table->SignalSemaphoreKHR = (PFN_vkSignalSemaphoreKHR)gdpa(dev, "vkSignalSemaphoreKHR"); + + // ---- VK_KHR_fragment_shading_rate extension commands + table->CmdSetFragmentShadingRateKHR = (PFN_vkCmdSetFragmentShadingRateKHR)gdpa(dev, "vkCmdSetFragmentShadingRateKHR"); + + // ---- VK_KHR_buffer_device_address extension commands + table->GetBufferDeviceAddressKHR = (PFN_vkGetBufferDeviceAddressKHR)gdpa(dev, "vkGetBufferDeviceAddressKHR"); + table->GetBufferOpaqueCaptureAddressKHR = (PFN_vkGetBufferOpaqueCaptureAddressKHR)gdpa(dev, "vkGetBufferOpaqueCaptureAddressKHR"); + table->GetDeviceMemoryOpaqueCaptureAddressKHR = (PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)gdpa(dev, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); + + // ---- VK_KHR_deferred_host_operations extension commands + table->CreateDeferredOperationKHR = (PFN_vkCreateDeferredOperationKHR)gdpa(dev, "vkCreateDeferredOperationKHR"); + table->DestroyDeferredOperationKHR = (PFN_vkDestroyDeferredOperationKHR)gdpa(dev, "vkDestroyDeferredOperationKHR"); + table->GetDeferredOperationMaxConcurrencyKHR = (PFN_vkGetDeferredOperationMaxConcurrencyKHR)gdpa(dev, "vkGetDeferredOperationMaxConcurrencyKHR"); + table->GetDeferredOperationResultKHR = (PFN_vkGetDeferredOperationResultKHR)gdpa(dev, "vkGetDeferredOperationResultKHR"); + table->DeferredOperationJoinKHR = (PFN_vkDeferredOperationJoinKHR)gdpa(dev, "vkDeferredOperationJoinKHR"); + + // ---- VK_KHR_pipeline_executable_properties extension commands + table->GetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)gdpa(dev, "vkGetPipelineExecutablePropertiesKHR"); + table->GetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)gdpa(dev, "vkGetPipelineExecutableStatisticsKHR"); + table->GetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)gdpa(dev, "vkGetPipelineExecutableInternalRepresentationsKHR"); + + // ---- VK_KHR_copy_commands2 extension commands + table->CmdCopyBuffer2KHR = (PFN_vkCmdCopyBuffer2KHR)gdpa(dev, "vkCmdCopyBuffer2KHR"); + table->CmdCopyImage2KHR = (PFN_vkCmdCopyImage2KHR)gdpa(dev, "vkCmdCopyImage2KHR"); + table->CmdCopyBufferToImage2KHR = (PFN_vkCmdCopyBufferToImage2KHR)gdpa(dev, "vkCmdCopyBufferToImage2KHR"); + table->CmdCopyImageToBuffer2KHR = (PFN_vkCmdCopyImageToBuffer2KHR)gdpa(dev, "vkCmdCopyImageToBuffer2KHR"); + table->CmdBlitImage2KHR = (PFN_vkCmdBlitImage2KHR)gdpa(dev, "vkCmdBlitImage2KHR"); + table->CmdResolveImage2KHR = (PFN_vkCmdResolveImage2KHR)gdpa(dev, "vkCmdResolveImage2KHR"); + + // ---- VK_EXT_debug_marker extension commands + table->DebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)gdpa(dev, "vkDebugMarkerSetObjectTagEXT"); + table->DebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)gdpa(dev, "vkDebugMarkerSetObjectNameEXT"); + table->CmdDebugMarkerBeginEXT = (PFN_vkCmdDebugMarkerBeginEXT)gdpa(dev, "vkCmdDebugMarkerBeginEXT"); + table->CmdDebugMarkerEndEXT = (PFN_vkCmdDebugMarkerEndEXT)gdpa(dev, "vkCmdDebugMarkerEndEXT"); + table->CmdDebugMarkerInsertEXT = (PFN_vkCmdDebugMarkerInsertEXT)gdpa(dev, "vkCmdDebugMarkerInsertEXT"); + + // ---- VK_EXT_transform_feedback extension commands + table->CmdBindTransformFeedbackBuffersEXT = (PFN_vkCmdBindTransformFeedbackBuffersEXT)gdpa(dev, "vkCmdBindTransformFeedbackBuffersEXT"); + table->CmdBeginTransformFeedbackEXT = (PFN_vkCmdBeginTransformFeedbackEXT)gdpa(dev, "vkCmdBeginTransformFeedbackEXT"); + table->CmdEndTransformFeedbackEXT = (PFN_vkCmdEndTransformFeedbackEXT)gdpa(dev, "vkCmdEndTransformFeedbackEXT"); + table->CmdBeginQueryIndexedEXT = (PFN_vkCmdBeginQueryIndexedEXT)gdpa(dev, "vkCmdBeginQueryIndexedEXT"); + table->CmdEndQueryIndexedEXT = (PFN_vkCmdEndQueryIndexedEXT)gdpa(dev, "vkCmdEndQueryIndexedEXT"); + table->CmdDrawIndirectByteCountEXT = (PFN_vkCmdDrawIndirectByteCountEXT)gdpa(dev, "vkCmdDrawIndirectByteCountEXT"); + + // ---- VK_NVX_image_view_handle extension commands + table->GetImageViewHandleNVX = (PFN_vkGetImageViewHandleNVX)gdpa(dev, "vkGetImageViewHandleNVX"); + table->GetImageViewAddressNVX = (PFN_vkGetImageViewAddressNVX)gdpa(dev, "vkGetImageViewAddressNVX"); + + // ---- VK_AMD_draw_indirect_count extension commands + table->CmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)gdpa(dev, "vkCmdDrawIndirectCountAMD"); + table->CmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)gdpa(dev, "vkCmdDrawIndexedIndirectCountAMD"); + + // ---- VK_AMD_shader_info extension commands + table->GetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)gdpa(dev, "vkGetShaderInfoAMD"); + + // ---- VK_NV_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)gdpa(dev, "vkGetMemoryWin32HandleNV"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_conditional_rendering extension commands + table->CmdBeginConditionalRenderingEXT = (PFN_vkCmdBeginConditionalRenderingEXT)gdpa(dev, "vkCmdBeginConditionalRenderingEXT"); + table->CmdEndConditionalRenderingEXT = (PFN_vkCmdEndConditionalRenderingEXT)gdpa(dev, "vkCmdEndConditionalRenderingEXT"); + + // ---- VK_NV_clip_space_w_scaling extension commands + table->CmdSetViewportWScalingNV = (PFN_vkCmdSetViewportWScalingNV)gdpa(dev, "vkCmdSetViewportWScalingNV"); + + // ---- VK_EXT_display_control extension commands + table->DisplayPowerControlEXT = (PFN_vkDisplayPowerControlEXT)gdpa(dev, "vkDisplayPowerControlEXT"); + table->RegisterDeviceEventEXT = (PFN_vkRegisterDeviceEventEXT)gdpa(dev, "vkRegisterDeviceEventEXT"); + table->RegisterDisplayEventEXT = (PFN_vkRegisterDisplayEventEXT)gdpa(dev, "vkRegisterDisplayEventEXT"); + table->GetSwapchainCounterEXT = (PFN_vkGetSwapchainCounterEXT)gdpa(dev, "vkGetSwapchainCounterEXT"); + + // ---- VK_GOOGLE_display_timing extension commands + table->GetRefreshCycleDurationGOOGLE = (PFN_vkGetRefreshCycleDurationGOOGLE)gdpa(dev, "vkGetRefreshCycleDurationGOOGLE"); + table->GetPastPresentationTimingGOOGLE = (PFN_vkGetPastPresentationTimingGOOGLE)gdpa(dev, "vkGetPastPresentationTimingGOOGLE"); + + // ---- VK_EXT_discard_rectangles extension commands + table->CmdSetDiscardRectangleEXT = (PFN_vkCmdSetDiscardRectangleEXT)gdpa(dev, "vkCmdSetDiscardRectangleEXT"); + + // ---- VK_EXT_hdr_metadata extension commands + table->SetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)gdpa(dev, "vkSetHdrMetadataEXT"); + + // ---- VK_EXT_debug_utils extension commands + table->SetDebugUtilsObjectNameEXT = (PFN_vkSetDebugUtilsObjectNameEXT)gipa(inst, "vkSetDebugUtilsObjectNameEXT"); + table->SetDebugUtilsObjectTagEXT = (PFN_vkSetDebugUtilsObjectTagEXT)gipa(inst, "vkSetDebugUtilsObjectTagEXT"); + table->QueueBeginDebugUtilsLabelEXT = (PFN_vkQueueBeginDebugUtilsLabelEXT)gipa(inst, "vkQueueBeginDebugUtilsLabelEXT"); + table->QueueEndDebugUtilsLabelEXT = (PFN_vkQueueEndDebugUtilsLabelEXT)gipa(inst, "vkQueueEndDebugUtilsLabelEXT"); + table->QueueInsertDebugUtilsLabelEXT = (PFN_vkQueueInsertDebugUtilsLabelEXT)gipa(inst, "vkQueueInsertDebugUtilsLabelEXT"); + table->CmdBeginDebugUtilsLabelEXT = (PFN_vkCmdBeginDebugUtilsLabelEXT)gipa(inst, "vkCmdBeginDebugUtilsLabelEXT"); + table->CmdEndDebugUtilsLabelEXT = (PFN_vkCmdEndDebugUtilsLabelEXT)gipa(inst, "vkCmdEndDebugUtilsLabelEXT"); + table->CmdInsertDebugUtilsLabelEXT = (PFN_vkCmdInsertDebugUtilsLabelEXT)gipa(inst, "vkCmdInsertDebugUtilsLabelEXT"); + + // ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->GetAndroidHardwareBufferPropertiesANDROID = (PFN_vkGetAndroidHardwareBufferPropertiesANDROID)gdpa(dev, "vkGetAndroidHardwareBufferPropertiesANDROID"); +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->GetMemoryAndroidHardwareBufferANDROID = (PFN_vkGetMemoryAndroidHardwareBufferANDROID)gdpa(dev, "vkGetMemoryAndroidHardwareBufferANDROID"); +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_EXT_sample_locations extension commands + table->CmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)gdpa(dev, "vkCmdSetSampleLocationsEXT"); + + // ---- VK_EXT_image_drm_format_modifier extension commands + table->GetImageDrmFormatModifierPropertiesEXT = (PFN_vkGetImageDrmFormatModifierPropertiesEXT)gdpa(dev, "vkGetImageDrmFormatModifierPropertiesEXT"); + + // ---- VK_EXT_validation_cache extension commands + table->CreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)gdpa(dev, "vkCreateValidationCacheEXT"); + table->DestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)gdpa(dev, "vkDestroyValidationCacheEXT"); + table->MergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)gdpa(dev, "vkMergeValidationCachesEXT"); + table->GetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)gdpa(dev, "vkGetValidationCacheDataEXT"); + + // ---- VK_NV_shading_rate_image extension commands + table->CmdBindShadingRateImageNV = (PFN_vkCmdBindShadingRateImageNV)gdpa(dev, "vkCmdBindShadingRateImageNV"); + table->CmdSetViewportShadingRatePaletteNV = (PFN_vkCmdSetViewportShadingRatePaletteNV)gdpa(dev, "vkCmdSetViewportShadingRatePaletteNV"); + table->CmdSetCoarseSampleOrderNV = (PFN_vkCmdSetCoarseSampleOrderNV)gdpa(dev, "vkCmdSetCoarseSampleOrderNV"); + + // ---- VK_NV_ray_tracing extension commands + table->CreateAccelerationStructureNV = (PFN_vkCreateAccelerationStructureNV)gdpa(dev, "vkCreateAccelerationStructureNV"); + table->DestroyAccelerationStructureNV = (PFN_vkDestroyAccelerationStructureNV)gdpa(dev, "vkDestroyAccelerationStructureNV"); + table->GetAccelerationStructureMemoryRequirementsNV = (PFN_vkGetAccelerationStructureMemoryRequirementsNV)gdpa(dev, "vkGetAccelerationStructureMemoryRequirementsNV"); + table->BindAccelerationStructureMemoryNV = (PFN_vkBindAccelerationStructureMemoryNV)gdpa(dev, "vkBindAccelerationStructureMemoryNV"); + table->CmdBuildAccelerationStructureNV = (PFN_vkCmdBuildAccelerationStructureNV)gdpa(dev, "vkCmdBuildAccelerationStructureNV"); + table->CmdCopyAccelerationStructureNV = (PFN_vkCmdCopyAccelerationStructureNV)gdpa(dev, "vkCmdCopyAccelerationStructureNV"); + table->CmdTraceRaysNV = (PFN_vkCmdTraceRaysNV)gdpa(dev, "vkCmdTraceRaysNV"); + table->CreateRayTracingPipelinesNV = (PFN_vkCreateRayTracingPipelinesNV)gdpa(dev, "vkCreateRayTracingPipelinesNV"); + table->GetRayTracingShaderGroupHandlesKHR = (PFN_vkGetRayTracingShaderGroupHandlesKHR)gdpa(dev, "vkGetRayTracingShaderGroupHandlesKHR"); + table->GetRayTracingShaderGroupHandlesNV = (PFN_vkGetRayTracingShaderGroupHandlesNV)gdpa(dev, "vkGetRayTracingShaderGroupHandlesNV"); + table->GetAccelerationStructureHandleNV = (PFN_vkGetAccelerationStructureHandleNV)gdpa(dev, "vkGetAccelerationStructureHandleNV"); + table->CmdWriteAccelerationStructuresPropertiesNV = (PFN_vkCmdWriteAccelerationStructuresPropertiesNV)gdpa(dev, "vkCmdWriteAccelerationStructuresPropertiesNV"); + table->CompileDeferredNV = (PFN_vkCompileDeferredNV)gdpa(dev, "vkCompileDeferredNV"); + + // ---- VK_EXT_external_memory_host extension commands + table->GetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)gdpa(dev, "vkGetMemoryHostPointerPropertiesEXT"); + + // ---- VK_AMD_buffer_marker extension commands + table->CmdWriteBufferMarkerAMD = (PFN_vkCmdWriteBufferMarkerAMD)gdpa(dev, "vkCmdWriteBufferMarkerAMD"); + + // ---- VK_EXT_calibrated_timestamps extension commands + table->GetCalibratedTimestampsEXT = (PFN_vkGetCalibratedTimestampsEXT)gdpa(dev, "vkGetCalibratedTimestampsEXT"); + + // ---- VK_NV_mesh_shader extension commands + table->CmdDrawMeshTasksNV = (PFN_vkCmdDrawMeshTasksNV)gdpa(dev, "vkCmdDrawMeshTasksNV"); + table->CmdDrawMeshTasksIndirectNV = (PFN_vkCmdDrawMeshTasksIndirectNV)gdpa(dev, "vkCmdDrawMeshTasksIndirectNV"); + table->CmdDrawMeshTasksIndirectCountNV = (PFN_vkCmdDrawMeshTasksIndirectCountNV)gdpa(dev, "vkCmdDrawMeshTasksIndirectCountNV"); + + // ---- VK_NV_scissor_exclusive extension commands + table->CmdSetExclusiveScissorNV = (PFN_vkCmdSetExclusiveScissorNV)gdpa(dev, "vkCmdSetExclusiveScissorNV"); + + // ---- VK_NV_device_diagnostic_checkpoints extension commands + table->CmdSetCheckpointNV = (PFN_vkCmdSetCheckpointNV)gdpa(dev, "vkCmdSetCheckpointNV"); + table->GetQueueCheckpointDataNV = (PFN_vkGetQueueCheckpointDataNV)gdpa(dev, "vkGetQueueCheckpointDataNV"); + + // ---- VK_INTEL_performance_query extension commands + table->InitializePerformanceApiINTEL = (PFN_vkInitializePerformanceApiINTEL)gdpa(dev, "vkInitializePerformanceApiINTEL"); + table->UninitializePerformanceApiINTEL = (PFN_vkUninitializePerformanceApiINTEL)gdpa(dev, "vkUninitializePerformanceApiINTEL"); + table->CmdSetPerformanceMarkerINTEL = (PFN_vkCmdSetPerformanceMarkerINTEL)gdpa(dev, "vkCmdSetPerformanceMarkerINTEL"); + table->CmdSetPerformanceStreamMarkerINTEL = (PFN_vkCmdSetPerformanceStreamMarkerINTEL)gdpa(dev, "vkCmdSetPerformanceStreamMarkerINTEL"); + table->CmdSetPerformanceOverrideINTEL = (PFN_vkCmdSetPerformanceOverrideINTEL)gdpa(dev, "vkCmdSetPerformanceOverrideINTEL"); + table->AcquirePerformanceConfigurationINTEL = (PFN_vkAcquirePerformanceConfigurationINTEL)gdpa(dev, "vkAcquirePerformanceConfigurationINTEL"); + table->ReleasePerformanceConfigurationINTEL = (PFN_vkReleasePerformanceConfigurationINTEL)gdpa(dev, "vkReleasePerformanceConfigurationINTEL"); + table->QueueSetPerformanceConfigurationINTEL = (PFN_vkQueueSetPerformanceConfigurationINTEL)gdpa(dev, "vkQueueSetPerformanceConfigurationINTEL"); + table->GetPerformanceParameterINTEL = (PFN_vkGetPerformanceParameterINTEL)gdpa(dev, "vkGetPerformanceParameterINTEL"); + + // ---- VK_AMD_display_native_hdr extension commands + table->SetLocalDimmingAMD = (PFN_vkSetLocalDimmingAMD)gdpa(dev, "vkSetLocalDimmingAMD"); + + // ---- VK_EXT_buffer_device_address extension commands + table->GetBufferDeviceAddressEXT = (PFN_vkGetBufferDeviceAddressEXT)gdpa(dev, "vkGetBufferDeviceAddressEXT"); + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->AcquireFullScreenExclusiveModeEXT = (PFN_vkAcquireFullScreenExclusiveModeEXT)gdpa(dev, "vkAcquireFullScreenExclusiveModeEXT"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->ReleaseFullScreenExclusiveModeEXT = (PFN_vkReleaseFullScreenExclusiveModeEXT)gdpa(dev, "vkReleaseFullScreenExclusiveModeEXT"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetDeviceGroupSurfacePresentModes2EXT = (PFN_vkGetDeviceGroupSurfacePresentModes2EXT)gdpa(dev, "vkGetDeviceGroupSurfacePresentModes2EXT"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_line_rasterization extension commands + table->CmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)gdpa(dev, "vkCmdSetLineStippleEXT"); + + // ---- VK_EXT_host_query_reset extension commands + table->ResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)gdpa(dev, "vkResetQueryPoolEXT"); + + // ---- VK_EXT_extended_dynamic_state extension commands + table->CmdSetCullModeEXT = (PFN_vkCmdSetCullModeEXT)gdpa(dev, "vkCmdSetCullModeEXT"); + table->CmdSetFrontFaceEXT = (PFN_vkCmdSetFrontFaceEXT)gdpa(dev, "vkCmdSetFrontFaceEXT"); + table->CmdSetPrimitiveTopologyEXT = (PFN_vkCmdSetPrimitiveTopologyEXT)gdpa(dev, "vkCmdSetPrimitiveTopologyEXT"); + table->CmdSetViewportWithCountEXT = (PFN_vkCmdSetViewportWithCountEXT)gdpa(dev, "vkCmdSetViewportWithCountEXT"); + table->CmdSetScissorWithCountEXT = (PFN_vkCmdSetScissorWithCountEXT)gdpa(dev, "vkCmdSetScissorWithCountEXT"); + table->CmdBindVertexBuffers2EXT = (PFN_vkCmdBindVertexBuffers2EXT)gdpa(dev, "vkCmdBindVertexBuffers2EXT"); + table->CmdSetDepthTestEnableEXT = (PFN_vkCmdSetDepthTestEnableEXT)gdpa(dev, "vkCmdSetDepthTestEnableEXT"); + table->CmdSetDepthWriteEnableEXT = (PFN_vkCmdSetDepthWriteEnableEXT)gdpa(dev, "vkCmdSetDepthWriteEnableEXT"); + table->CmdSetDepthCompareOpEXT = (PFN_vkCmdSetDepthCompareOpEXT)gdpa(dev, "vkCmdSetDepthCompareOpEXT"); + table->CmdSetDepthBoundsTestEnableEXT = (PFN_vkCmdSetDepthBoundsTestEnableEXT)gdpa(dev, "vkCmdSetDepthBoundsTestEnableEXT"); + table->CmdSetStencilTestEnableEXT = (PFN_vkCmdSetStencilTestEnableEXT)gdpa(dev, "vkCmdSetStencilTestEnableEXT"); + table->CmdSetStencilOpEXT = (PFN_vkCmdSetStencilOpEXT)gdpa(dev, "vkCmdSetStencilOpEXT"); + + // ---- VK_NV_device_generated_commands extension commands + table->GetGeneratedCommandsMemoryRequirementsNV = (PFN_vkGetGeneratedCommandsMemoryRequirementsNV)gdpa(dev, "vkGetGeneratedCommandsMemoryRequirementsNV"); + table->CmdPreprocessGeneratedCommandsNV = (PFN_vkCmdPreprocessGeneratedCommandsNV)gdpa(dev, "vkCmdPreprocessGeneratedCommandsNV"); + table->CmdExecuteGeneratedCommandsNV = (PFN_vkCmdExecuteGeneratedCommandsNV)gdpa(dev, "vkCmdExecuteGeneratedCommandsNV"); + table->CmdBindPipelineShaderGroupNV = (PFN_vkCmdBindPipelineShaderGroupNV)gdpa(dev, "vkCmdBindPipelineShaderGroupNV"); + table->CreateIndirectCommandsLayoutNV = (PFN_vkCreateIndirectCommandsLayoutNV)gdpa(dev, "vkCreateIndirectCommandsLayoutNV"); + table->DestroyIndirectCommandsLayoutNV = (PFN_vkDestroyIndirectCommandsLayoutNV)gdpa(dev, "vkDestroyIndirectCommandsLayoutNV"); + + // ---- VK_EXT_private_data extension commands + table->CreatePrivateDataSlotEXT = (PFN_vkCreatePrivateDataSlotEXT)gdpa(dev, "vkCreatePrivateDataSlotEXT"); + table->DestroyPrivateDataSlotEXT = (PFN_vkDestroyPrivateDataSlotEXT)gdpa(dev, "vkDestroyPrivateDataSlotEXT"); + table->SetPrivateDataEXT = (PFN_vkSetPrivateDataEXT)gdpa(dev, "vkSetPrivateDataEXT"); + table->GetPrivateDataEXT = (PFN_vkGetPrivateDataEXT)gdpa(dev, "vkGetPrivateDataEXT"); + + // ---- VK_NV_fragment_shading_rate_enums extension commands + table->CmdSetFragmentShadingRateEnumNV = (PFN_vkCmdSetFragmentShadingRateEnumNV)gdpa(dev, "vkCmdSetFragmentShadingRateEnumNV"); + + // ---- VK_KHR_acceleration_structure extension commands + table->CreateAccelerationStructureKHR = (PFN_vkCreateAccelerationStructureKHR)gdpa(dev, "vkCreateAccelerationStructureKHR"); + table->DestroyAccelerationStructureKHR = (PFN_vkDestroyAccelerationStructureKHR)gdpa(dev, "vkDestroyAccelerationStructureKHR"); + table->CmdBuildAccelerationStructuresKHR = (PFN_vkCmdBuildAccelerationStructuresKHR)gdpa(dev, "vkCmdBuildAccelerationStructuresKHR"); + table->CmdBuildAccelerationStructuresIndirectKHR = (PFN_vkCmdBuildAccelerationStructuresIndirectKHR)gdpa(dev, "vkCmdBuildAccelerationStructuresIndirectKHR"); + table->BuildAccelerationStructuresKHR = (PFN_vkBuildAccelerationStructuresKHR)gdpa(dev, "vkBuildAccelerationStructuresKHR"); + table->CopyAccelerationStructureKHR = (PFN_vkCopyAccelerationStructureKHR)gdpa(dev, "vkCopyAccelerationStructureKHR"); + table->CopyAccelerationStructureToMemoryKHR = (PFN_vkCopyAccelerationStructureToMemoryKHR)gdpa(dev, "vkCopyAccelerationStructureToMemoryKHR"); + table->CopyMemoryToAccelerationStructureKHR = (PFN_vkCopyMemoryToAccelerationStructureKHR)gdpa(dev, "vkCopyMemoryToAccelerationStructureKHR"); + table->WriteAccelerationStructuresPropertiesKHR = (PFN_vkWriteAccelerationStructuresPropertiesKHR)gdpa(dev, "vkWriteAccelerationStructuresPropertiesKHR"); + table->CmdCopyAccelerationStructureKHR = (PFN_vkCmdCopyAccelerationStructureKHR)gdpa(dev, "vkCmdCopyAccelerationStructureKHR"); + table->CmdCopyAccelerationStructureToMemoryKHR = (PFN_vkCmdCopyAccelerationStructureToMemoryKHR)gdpa(dev, "vkCmdCopyAccelerationStructureToMemoryKHR"); + table->CmdCopyMemoryToAccelerationStructureKHR = (PFN_vkCmdCopyMemoryToAccelerationStructureKHR)gdpa(dev, "vkCmdCopyMemoryToAccelerationStructureKHR"); + table->GetAccelerationStructureDeviceAddressKHR = (PFN_vkGetAccelerationStructureDeviceAddressKHR)gdpa(dev, "vkGetAccelerationStructureDeviceAddressKHR"); + table->CmdWriteAccelerationStructuresPropertiesKHR = (PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)gdpa(dev, "vkCmdWriteAccelerationStructuresPropertiesKHR"); + table->GetDeviceAccelerationStructureCompatibilityKHR = (PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)gdpa(dev, "vkGetDeviceAccelerationStructureCompatibilityKHR"); + table->GetAccelerationStructureBuildSizesKHR = (PFN_vkGetAccelerationStructureBuildSizesKHR)gdpa(dev, "vkGetAccelerationStructureBuildSizesKHR"); + + // ---- VK_KHR_ray_tracing_pipeline extension commands + table->CmdTraceRaysKHR = (PFN_vkCmdTraceRaysKHR)gdpa(dev, "vkCmdTraceRaysKHR"); + table->CreateRayTracingPipelinesKHR = (PFN_vkCreateRayTracingPipelinesKHR)gdpa(dev, "vkCreateRayTracingPipelinesKHR"); + table->GetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)gdpa(dev, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); + table->CmdTraceRaysIndirectKHR = (PFN_vkCmdTraceRaysIndirectKHR)gdpa(dev, "vkCmdTraceRaysIndirectKHR"); + table->GetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)gdpa(dev, "vkGetRayTracingShaderGroupStackSizeKHR"); + table->CmdSetRayTracingPipelineStackSizeKHR = (PFN_vkCmdSetRayTracingPipelineStackSizeKHR)gdpa(dev, "vkCmdSetRayTracingPipelineStackSizeKHR"); +} + +// Init Instance function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_instance_core_dispatch_table(VkLayerInstanceDispatchTable *table, PFN_vkGetInstanceProcAddr gpa, + VkInstance inst) { + + // ---- Core 1_0 commands + table->DestroyInstance = (PFN_vkDestroyInstance)gpa(inst, "vkDestroyInstance"); + table->EnumeratePhysicalDevices = (PFN_vkEnumeratePhysicalDevices)gpa(inst, "vkEnumeratePhysicalDevices"); + table->GetPhysicalDeviceFeatures = (PFN_vkGetPhysicalDeviceFeatures)gpa(inst, "vkGetPhysicalDeviceFeatures"); + table->GetPhysicalDeviceFormatProperties = (PFN_vkGetPhysicalDeviceFormatProperties)gpa(inst, "vkGetPhysicalDeviceFormatProperties"); + table->GetPhysicalDeviceImageFormatProperties = (PFN_vkGetPhysicalDeviceImageFormatProperties)gpa(inst, "vkGetPhysicalDeviceImageFormatProperties"); + table->GetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)gpa(inst, "vkGetPhysicalDeviceProperties"); + table->GetPhysicalDeviceQueueFamilyProperties = (PFN_vkGetPhysicalDeviceQueueFamilyProperties)gpa(inst, "vkGetPhysicalDeviceQueueFamilyProperties"); + table->GetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)gpa(inst, "vkGetPhysicalDeviceMemoryProperties"); + table->GetInstanceProcAddr = gpa; + table->EnumerateDeviceExtensionProperties = (PFN_vkEnumerateDeviceExtensionProperties)gpa(inst, "vkEnumerateDeviceExtensionProperties"); + table->EnumerateDeviceLayerProperties = (PFN_vkEnumerateDeviceLayerProperties)gpa(inst, "vkEnumerateDeviceLayerProperties"); + table->GetPhysicalDeviceSparseImageFormatProperties = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties)gpa(inst, "vkGetPhysicalDeviceSparseImageFormatProperties"); + + // ---- Core 1_1 commands + table->EnumeratePhysicalDeviceGroups = (PFN_vkEnumeratePhysicalDeviceGroups)gpa(inst, "vkEnumeratePhysicalDeviceGroups"); + table->GetPhysicalDeviceFeatures2 = (PFN_vkGetPhysicalDeviceFeatures2)gpa(inst, "vkGetPhysicalDeviceFeatures2"); + table->GetPhysicalDeviceProperties2 = (PFN_vkGetPhysicalDeviceProperties2)gpa(inst, "vkGetPhysicalDeviceProperties2"); + table->GetPhysicalDeviceFormatProperties2 = (PFN_vkGetPhysicalDeviceFormatProperties2)gpa(inst, "vkGetPhysicalDeviceFormatProperties2"); + table->GetPhysicalDeviceImageFormatProperties2 = (PFN_vkGetPhysicalDeviceImageFormatProperties2)gpa(inst, "vkGetPhysicalDeviceImageFormatProperties2"); + table->GetPhysicalDeviceQueueFamilyProperties2 = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2)gpa(inst, "vkGetPhysicalDeviceQueueFamilyProperties2"); + table->GetPhysicalDeviceMemoryProperties2 = (PFN_vkGetPhysicalDeviceMemoryProperties2)gpa(inst, "vkGetPhysicalDeviceMemoryProperties2"); + table->GetPhysicalDeviceSparseImageFormatProperties2 = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)gpa(inst, "vkGetPhysicalDeviceSparseImageFormatProperties2"); + table->GetPhysicalDeviceExternalBufferProperties = (PFN_vkGetPhysicalDeviceExternalBufferProperties)gpa(inst, "vkGetPhysicalDeviceExternalBufferProperties"); + table->GetPhysicalDeviceExternalFenceProperties = (PFN_vkGetPhysicalDeviceExternalFenceProperties)gpa(inst, "vkGetPhysicalDeviceExternalFenceProperties"); + table->GetPhysicalDeviceExternalSemaphoreProperties = (PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)gpa(inst, "vkGetPhysicalDeviceExternalSemaphoreProperties"); +} + +// Init Instance function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_instance_extension_dispatch_table(VkLayerInstanceDispatchTable *table, PFN_vkGetInstanceProcAddr gpa, + VkInstance inst) { + + // ---- VK_KHR_surface extension commands + table->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(inst, "vkDestroySurfaceKHR"); + table->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(inst, "vkGetPhysicalDeviceSurfaceSupportKHR"); + table->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(inst, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"); + table->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(inst, "vkGetPhysicalDeviceSurfaceFormatsKHR"); + table->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(inst, "vkGetPhysicalDeviceSurfacePresentModesKHR"); + + // ---- VK_KHR_swapchain extension commands + table->GetPhysicalDevicePresentRectanglesKHR = (PFN_vkGetPhysicalDevicePresentRectanglesKHR)gpa(inst, "vkGetPhysicalDevicePresentRectanglesKHR"); + + // ---- VK_KHR_display extension commands + table->GetPhysicalDeviceDisplayPropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)gpa(inst, "vkGetPhysicalDeviceDisplayPropertiesKHR"); + table->GetPhysicalDeviceDisplayPlanePropertiesKHR = (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)gpa(inst, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"); + table->GetDisplayPlaneSupportedDisplaysKHR = (PFN_vkGetDisplayPlaneSupportedDisplaysKHR)gpa(inst, "vkGetDisplayPlaneSupportedDisplaysKHR"); + table->GetDisplayModePropertiesKHR = (PFN_vkGetDisplayModePropertiesKHR)gpa(inst, "vkGetDisplayModePropertiesKHR"); + table->CreateDisplayModeKHR = (PFN_vkCreateDisplayModeKHR)gpa(inst, "vkCreateDisplayModeKHR"); + table->GetDisplayPlaneCapabilitiesKHR = (PFN_vkGetDisplayPlaneCapabilitiesKHR)gpa(inst, "vkGetDisplayPlaneCapabilitiesKHR"); + table->CreateDisplayPlaneSurfaceKHR = (PFN_vkCreateDisplayPlaneSurfaceKHR)gpa(inst, "vkCreateDisplayPlaneSurfaceKHR"); + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + table->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(inst, "vkCreateXlibSurfaceKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + table->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(inst, "vkGetPhysicalDeviceXlibPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + table->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(inst, "vkCreateXcbSurfaceKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + table->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(inst, "vkGetPhysicalDeviceXcbPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + table->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(inst, "vkCreateWaylandSurfaceKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + table->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(inst, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + table->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(inst, "vkCreateAndroidSurfaceKHR"); +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(inst, "vkCreateWin32SurfaceKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(inst, "vkGetPhysicalDeviceWin32PresentationSupportKHR"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + table->GetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)gpa(inst, "vkGetPhysicalDeviceFeatures2KHR"); + table->GetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)gpa(inst, "vkGetPhysicalDeviceProperties2KHR"); + table->GetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)gpa(inst, "vkGetPhysicalDeviceFormatProperties2KHR"); + table->GetPhysicalDeviceImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)gpa(inst, "vkGetPhysicalDeviceImageFormatProperties2KHR"); + table->GetPhysicalDeviceQueueFamilyProperties2KHR = (PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)gpa(inst, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"); + table->GetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)gpa(inst, "vkGetPhysicalDeviceMemoryProperties2KHR"); + table->GetPhysicalDeviceSparseImageFormatProperties2KHR = (PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)gpa(inst, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"); + + // ---- VK_KHR_device_group_creation extension commands + table->EnumeratePhysicalDeviceGroupsKHR = (PFN_vkEnumeratePhysicalDeviceGroupsKHR)gpa(inst, "vkEnumeratePhysicalDeviceGroupsKHR"); + + // ---- VK_KHR_external_memory_capabilities extension commands + table->GetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)gpa(inst, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); + + // ---- VK_KHR_external_semaphore_capabilities extension commands + table->GetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)gpa(inst, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); + + // ---- VK_KHR_external_fence_capabilities extension commands + table->GetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)gpa(inst, "vkGetPhysicalDeviceExternalFencePropertiesKHR"); + + // ---- VK_KHR_performance_query extension commands + table->EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = (PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)gpa(inst, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"); + table->GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = (PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)gpa(inst, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"); + + // ---- VK_KHR_get_surface_capabilities2 extension commands + table->GetPhysicalDeviceSurfaceCapabilities2KHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)gpa(inst, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"); + table->GetPhysicalDeviceSurfaceFormats2KHR = (PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)gpa(inst, "vkGetPhysicalDeviceSurfaceFormats2KHR"); + + // ---- VK_KHR_get_display_properties2 extension commands + table->GetPhysicalDeviceDisplayProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayProperties2KHR)gpa(inst, "vkGetPhysicalDeviceDisplayProperties2KHR"); + table->GetPhysicalDeviceDisplayPlaneProperties2KHR = (PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)gpa(inst, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"); + table->GetDisplayModeProperties2KHR = (PFN_vkGetDisplayModeProperties2KHR)gpa(inst, "vkGetDisplayModeProperties2KHR"); + table->GetDisplayPlaneCapabilities2KHR = (PFN_vkGetDisplayPlaneCapabilities2KHR)gpa(inst, "vkGetDisplayPlaneCapabilities2KHR"); + + // ---- VK_KHR_fragment_shading_rate extension commands + table->GetPhysicalDeviceFragmentShadingRatesKHR = (PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)gpa(inst, "vkGetPhysicalDeviceFragmentShadingRatesKHR"); + + // ---- VK_EXT_debug_report extension commands + table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)gpa(inst, "vkCreateDebugReportCallbackEXT"); + table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)gpa(inst, "vkDestroyDebugReportCallbackEXT"); + table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT)gpa(inst, "vkDebugReportMessageEXT"); + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + table->CreateStreamDescriptorSurfaceGGP = (PFN_vkCreateStreamDescriptorSurfaceGGP)gpa(inst, "vkCreateStreamDescriptorSurfaceGGP"); +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + table->GetPhysicalDeviceExternalImageFormatPropertiesNV = (PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)gpa(inst, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"); + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + table->CreateViSurfaceNN = (PFN_vkCreateViSurfaceNN)gpa(inst, "vkCreateViSurfaceNN"); +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + table->ReleaseDisplayEXT = (PFN_vkReleaseDisplayEXT)gpa(inst, "vkReleaseDisplayEXT"); + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + table->AcquireXlibDisplayEXT = (PFN_vkAcquireXlibDisplayEXT)gpa(inst, "vkAcquireXlibDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + table->GetRandROutputDisplayEXT = (PFN_vkGetRandROutputDisplayEXT)gpa(inst, "vkGetRandROutputDisplayEXT"); +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + table->GetPhysicalDeviceSurfaceCapabilities2EXT = (PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)gpa(inst, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"); + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + table->CreateIOSSurfaceMVK = (PFN_vkCreateIOSSurfaceMVK)gpa(inst, "vkCreateIOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + table->CreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK)gpa(inst, "vkCreateMacOSSurfaceMVK"); +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + table->CreateDebugUtilsMessengerEXT = (PFN_vkCreateDebugUtilsMessengerEXT)gpa(inst, "vkCreateDebugUtilsMessengerEXT"); + table->DestroyDebugUtilsMessengerEXT = (PFN_vkDestroyDebugUtilsMessengerEXT)gpa(inst, "vkDestroyDebugUtilsMessengerEXT"); + table->SubmitDebugUtilsMessageEXT = (PFN_vkSubmitDebugUtilsMessageEXT)gpa(inst, "vkSubmitDebugUtilsMessageEXT"); + + // ---- VK_EXT_sample_locations extension commands + table->GetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)gpa(inst, "vkGetPhysicalDeviceMultisamplePropertiesEXT"); + + // ---- VK_EXT_calibrated_timestamps extension commands + table->GetPhysicalDeviceCalibrateableTimeDomainsEXT = (PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)gpa(inst, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"); + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + table->CreateImagePipeSurfaceFUCHSIA = (PFN_vkCreateImagePipeSurfaceFUCHSIA)gpa(inst, "vkCreateImagePipeSurfaceFUCHSIA"); +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + table->CreateMetalSurfaceEXT = (PFN_vkCreateMetalSurfaceEXT)gpa(inst, "vkCreateMetalSurfaceEXT"); +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + table->GetPhysicalDeviceToolPropertiesEXT = (PFN_vkGetPhysicalDeviceToolPropertiesEXT)gpa(inst, "vkGetPhysicalDeviceToolPropertiesEXT"); + + // ---- VK_NV_cooperative_matrix extension commands + table->GetPhysicalDeviceCooperativeMatrixPropertiesNV = (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)gpa(inst, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"); + + // ---- VK_NV_coverage_reduction_mode extension commands + table->GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = (PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)gpa(inst, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + table->GetPhysicalDeviceSurfacePresentModes2EXT = (PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)gpa(inst, "vkGetPhysicalDeviceSurfacePresentModes2EXT"); +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + table->CreateHeadlessSurfaceEXT = (PFN_vkCreateHeadlessSurfaceEXT)gpa(inst, "vkCreateHeadlessSurfaceEXT"); + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + table->CreateDirectFBSurfaceEXT = (PFN_vkCreateDirectFBSurfaceEXT)gpa(inst, "vkCreateDirectFBSurfaceEXT"); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + table->GetPhysicalDeviceDirectFBPresentationSupportEXT = (PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT)gpa(inst, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"); +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +} + +// Device command lookup function +VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDispatchTable *table, const char *name) { + if (!name || name[0] != 'v' || name[1] != 'k') return NULL; + + name += 2; + + // ---- Core 1_0 commands + if (!strcmp(name, "GetDeviceProcAddr")) return (void *)table->GetDeviceProcAddr; + if (!strcmp(name, "DestroyDevice")) return (void *)table->DestroyDevice; + if (!strcmp(name, "GetDeviceQueue")) return (void *)table->GetDeviceQueue; + if (!strcmp(name, "QueueSubmit")) return (void *)table->QueueSubmit; + if (!strcmp(name, "QueueWaitIdle")) return (void *)table->QueueWaitIdle; + if (!strcmp(name, "DeviceWaitIdle")) return (void *)table->DeviceWaitIdle; + if (!strcmp(name, "AllocateMemory")) return (void *)table->AllocateMemory; + if (!strcmp(name, "FreeMemory")) return (void *)table->FreeMemory; + if (!strcmp(name, "MapMemory")) return (void *)table->MapMemory; + if (!strcmp(name, "UnmapMemory")) return (void *)table->UnmapMemory; + if (!strcmp(name, "FlushMappedMemoryRanges")) return (void *)table->FlushMappedMemoryRanges; + if (!strcmp(name, "InvalidateMappedMemoryRanges")) return (void *)table->InvalidateMappedMemoryRanges; + if (!strcmp(name, "GetDeviceMemoryCommitment")) return (void *)table->GetDeviceMemoryCommitment; + if (!strcmp(name, "BindBufferMemory")) return (void *)table->BindBufferMemory; + if (!strcmp(name, "BindImageMemory")) return (void *)table->BindImageMemory; + if (!strcmp(name, "GetBufferMemoryRequirements")) return (void *)table->GetBufferMemoryRequirements; + if (!strcmp(name, "GetImageMemoryRequirements")) return (void *)table->GetImageMemoryRequirements; + if (!strcmp(name, "GetImageSparseMemoryRequirements")) return (void *)table->GetImageSparseMemoryRequirements; + if (!strcmp(name, "QueueBindSparse")) return (void *)table->QueueBindSparse; + if (!strcmp(name, "CreateFence")) return (void *)table->CreateFence; + if (!strcmp(name, "DestroyFence")) return (void *)table->DestroyFence; + if (!strcmp(name, "ResetFences")) return (void *)table->ResetFences; + if (!strcmp(name, "GetFenceStatus")) return (void *)table->GetFenceStatus; + if (!strcmp(name, "WaitForFences")) return (void *)table->WaitForFences; + if (!strcmp(name, "CreateSemaphore")) return (void *)table->CreateSemaphore; + if (!strcmp(name, "DestroySemaphore")) return (void *)table->DestroySemaphore; + if (!strcmp(name, "CreateEvent")) return (void *)table->CreateEvent; + if (!strcmp(name, "DestroyEvent")) return (void *)table->DestroyEvent; + if (!strcmp(name, "GetEventStatus")) return (void *)table->GetEventStatus; + if (!strcmp(name, "SetEvent")) return (void *)table->SetEvent; + if (!strcmp(name, "ResetEvent")) return (void *)table->ResetEvent; + if (!strcmp(name, "CreateQueryPool")) return (void *)table->CreateQueryPool; + if (!strcmp(name, "DestroyQueryPool")) return (void *)table->DestroyQueryPool; + if (!strcmp(name, "GetQueryPoolResults")) return (void *)table->GetQueryPoolResults; + if (!strcmp(name, "CreateBuffer")) return (void *)table->CreateBuffer; + if (!strcmp(name, "DestroyBuffer")) return (void *)table->DestroyBuffer; + if (!strcmp(name, "CreateBufferView")) return (void *)table->CreateBufferView; + if (!strcmp(name, "DestroyBufferView")) return (void *)table->DestroyBufferView; + if (!strcmp(name, "CreateImage")) return (void *)table->CreateImage; + if (!strcmp(name, "DestroyImage")) return (void *)table->DestroyImage; + if (!strcmp(name, "GetImageSubresourceLayout")) return (void *)table->GetImageSubresourceLayout; + if (!strcmp(name, "CreateImageView")) return (void *)table->CreateImageView; + if (!strcmp(name, "DestroyImageView")) return (void *)table->DestroyImageView; + if (!strcmp(name, "CreateShaderModule")) return (void *)table->CreateShaderModule; + if (!strcmp(name, "DestroyShaderModule")) return (void *)table->DestroyShaderModule; + if (!strcmp(name, "CreatePipelineCache")) return (void *)table->CreatePipelineCache; + if (!strcmp(name, "DestroyPipelineCache")) return (void *)table->DestroyPipelineCache; + if (!strcmp(name, "GetPipelineCacheData")) return (void *)table->GetPipelineCacheData; + if (!strcmp(name, "MergePipelineCaches")) return (void *)table->MergePipelineCaches; + if (!strcmp(name, "CreateGraphicsPipelines")) return (void *)table->CreateGraphicsPipelines; + if (!strcmp(name, "CreateComputePipelines")) return (void *)table->CreateComputePipelines; + if (!strcmp(name, "DestroyPipeline")) return (void *)table->DestroyPipeline; + if (!strcmp(name, "CreatePipelineLayout")) return (void *)table->CreatePipelineLayout; + if (!strcmp(name, "DestroyPipelineLayout")) return (void *)table->DestroyPipelineLayout; + if (!strcmp(name, "CreateSampler")) return (void *)table->CreateSampler; + if (!strcmp(name, "DestroySampler")) return (void *)table->DestroySampler; + if (!strcmp(name, "CreateDescriptorSetLayout")) return (void *)table->CreateDescriptorSetLayout; + if (!strcmp(name, "DestroyDescriptorSetLayout")) return (void *)table->DestroyDescriptorSetLayout; + if (!strcmp(name, "CreateDescriptorPool")) return (void *)table->CreateDescriptorPool; + if (!strcmp(name, "DestroyDescriptorPool")) return (void *)table->DestroyDescriptorPool; + if (!strcmp(name, "ResetDescriptorPool")) return (void *)table->ResetDescriptorPool; + if (!strcmp(name, "AllocateDescriptorSets")) return (void *)table->AllocateDescriptorSets; + if (!strcmp(name, "FreeDescriptorSets")) return (void *)table->FreeDescriptorSets; + if (!strcmp(name, "UpdateDescriptorSets")) return (void *)table->UpdateDescriptorSets; + if (!strcmp(name, "CreateFramebuffer")) return (void *)table->CreateFramebuffer; + if (!strcmp(name, "DestroyFramebuffer")) return (void *)table->DestroyFramebuffer; + if (!strcmp(name, "CreateRenderPass")) return (void *)table->CreateRenderPass; + if (!strcmp(name, "DestroyRenderPass")) return (void *)table->DestroyRenderPass; + if (!strcmp(name, "GetRenderAreaGranularity")) return (void *)table->GetRenderAreaGranularity; + if (!strcmp(name, "CreateCommandPool")) return (void *)table->CreateCommandPool; + if (!strcmp(name, "DestroyCommandPool")) return (void *)table->DestroyCommandPool; + if (!strcmp(name, "ResetCommandPool")) return (void *)table->ResetCommandPool; + if (!strcmp(name, "AllocateCommandBuffers")) return (void *)table->AllocateCommandBuffers; + if (!strcmp(name, "FreeCommandBuffers")) return (void *)table->FreeCommandBuffers; + if (!strcmp(name, "BeginCommandBuffer")) return (void *)table->BeginCommandBuffer; + if (!strcmp(name, "EndCommandBuffer")) return (void *)table->EndCommandBuffer; + if (!strcmp(name, "ResetCommandBuffer")) return (void *)table->ResetCommandBuffer; + if (!strcmp(name, "CmdBindPipeline")) return (void *)table->CmdBindPipeline; + if (!strcmp(name, "CmdSetViewport")) return (void *)table->CmdSetViewport; + if (!strcmp(name, "CmdSetScissor")) return (void *)table->CmdSetScissor; + if (!strcmp(name, "CmdSetLineWidth")) return (void *)table->CmdSetLineWidth; + if (!strcmp(name, "CmdSetDepthBias")) return (void *)table->CmdSetDepthBias; + if (!strcmp(name, "CmdSetBlendConstants")) return (void *)table->CmdSetBlendConstants; + if (!strcmp(name, "CmdSetDepthBounds")) return (void *)table->CmdSetDepthBounds; + if (!strcmp(name, "CmdSetStencilCompareMask")) return (void *)table->CmdSetStencilCompareMask; + if (!strcmp(name, "CmdSetStencilWriteMask")) return (void *)table->CmdSetStencilWriteMask; + if (!strcmp(name, "CmdSetStencilReference")) return (void *)table->CmdSetStencilReference; + if (!strcmp(name, "CmdBindDescriptorSets")) return (void *)table->CmdBindDescriptorSets; + if (!strcmp(name, "CmdBindIndexBuffer")) return (void *)table->CmdBindIndexBuffer; + if (!strcmp(name, "CmdBindVertexBuffers")) return (void *)table->CmdBindVertexBuffers; + if (!strcmp(name, "CmdDraw")) return (void *)table->CmdDraw; + if (!strcmp(name, "CmdDrawIndexed")) return (void *)table->CmdDrawIndexed; + if (!strcmp(name, "CmdDrawIndirect")) return (void *)table->CmdDrawIndirect; + if (!strcmp(name, "CmdDrawIndexedIndirect")) return (void *)table->CmdDrawIndexedIndirect; + if (!strcmp(name, "CmdDispatch")) return (void *)table->CmdDispatch; + if (!strcmp(name, "CmdDispatchIndirect")) return (void *)table->CmdDispatchIndirect; + if (!strcmp(name, "CmdCopyBuffer")) return (void *)table->CmdCopyBuffer; + if (!strcmp(name, "CmdCopyImage")) return (void *)table->CmdCopyImage; + if (!strcmp(name, "CmdBlitImage")) return (void *)table->CmdBlitImage; + if (!strcmp(name, "CmdCopyBufferToImage")) return (void *)table->CmdCopyBufferToImage; + if (!strcmp(name, "CmdCopyImageToBuffer")) return (void *)table->CmdCopyImageToBuffer; + if (!strcmp(name, "CmdUpdateBuffer")) return (void *)table->CmdUpdateBuffer; + if (!strcmp(name, "CmdFillBuffer")) return (void *)table->CmdFillBuffer; + if (!strcmp(name, "CmdClearColorImage")) return (void *)table->CmdClearColorImage; + if (!strcmp(name, "CmdClearDepthStencilImage")) return (void *)table->CmdClearDepthStencilImage; + if (!strcmp(name, "CmdClearAttachments")) return (void *)table->CmdClearAttachments; + if (!strcmp(name, "CmdResolveImage")) return (void *)table->CmdResolveImage; + if (!strcmp(name, "CmdSetEvent")) return (void *)table->CmdSetEvent; + if (!strcmp(name, "CmdResetEvent")) return (void *)table->CmdResetEvent; + if (!strcmp(name, "CmdWaitEvents")) return (void *)table->CmdWaitEvents; + if (!strcmp(name, "CmdPipelineBarrier")) return (void *)table->CmdPipelineBarrier; + if (!strcmp(name, "CmdBeginQuery")) return (void *)table->CmdBeginQuery; + if (!strcmp(name, "CmdEndQuery")) return (void *)table->CmdEndQuery; + if (!strcmp(name, "CmdResetQueryPool")) return (void *)table->CmdResetQueryPool; + if (!strcmp(name, "CmdWriteTimestamp")) return (void *)table->CmdWriteTimestamp; + if (!strcmp(name, "CmdCopyQueryPoolResults")) return (void *)table->CmdCopyQueryPoolResults; + if (!strcmp(name, "CmdPushConstants")) return (void *)table->CmdPushConstants; + if (!strcmp(name, "CmdBeginRenderPass")) return (void *)table->CmdBeginRenderPass; + if (!strcmp(name, "CmdNextSubpass")) return (void *)table->CmdNextSubpass; + if (!strcmp(name, "CmdEndRenderPass")) return (void *)table->CmdEndRenderPass; + if (!strcmp(name, "CmdExecuteCommands")) return (void *)table->CmdExecuteCommands; + + // ---- Core 1_1 commands + if (!strcmp(name, "BindBufferMemory2")) return (void *)table->BindBufferMemory2; + if (!strcmp(name, "BindImageMemory2")) return (void *)table->BindImageMemory2; + if (!strcmp(name, "GetDeviceGroupPeerMemoryFeatures")) return (void *)table->GetDeviceGroupPeerMemoryFeatures; + if (!strcmp(name, "CmdSetDeviceMask")) return (void *)table->CmdSetDeviceMask; + if (!strcmp(name, "CmdDispatchBase")) return (void *)table->CmdDispatchBase; + if (!strcmp(name, "GetImageMemoryRequirements2")) return (void *)table->GetImageMemoryRequirements2; + if (!strcmp(name, "GetBufferMemoryRequirements2")) return (void *)table->GetBufferMemoryRequirements2; + if (!strcmp(name, "GetImageSparseMemoryRequirements2")) return (void *)table->GetImageSparseMemoryRequirements2; + if (!strcmp(name, "TrimCommandPool")) return (void *)table->TrimCommandPool; + if (!strcmp(name, "GetDeviceQueue2")) return (void *)table->GetDeviceQueue2; + if (!strcmp(name, "CreateSamplerYcbcrConversion")) return (void *)table->CreateSamplerYcbcrConversion; + if (!strcmp(name, "DestroySamplerYcbcrConversion")) return (void *)table->DestroySamplerYcbcrConversion; + if (!strcmp(name, "CreateDescriptorUpdateTemplate")) return (void *)table->CreateDescriptorUpdateTemplate; + if (!strcmp(name, "DestroyDescriptorUpdateTemplate")) return (void *)table->DestroyDescriptorUpdateTemplate; + if (!strcmp(name, "UpdateDescriptorSetWithTemplate")) return (void *)table->UpdateDescriptorSetWithTemplate; + if (!strcmp(name, "GetDescriptorSetLayoutSupport")) return (void *)table->GetDescriptorSetLayoutSupport; + + // ---- Core 1_2 commands + if (!strcmp(name, "CmdDrawIndirectCount")) return (void *)table->CmdDrawIndirectCount; + if (!strcmp(name, "CmdDrawIndexedIndirectCount")) return (void *)table->CmdDrawIndexedIndirectCount; + if (!strcmp(name, "CreateRenderPass2")) return (void *)table->CreateRenderPass2; + if (!strcmp(name, "CmdBeginRenderPass2")) return (void *)table->CmdBeginRenderPass2; + if (!strcmp(name, "CmdNextSubpass2")) return (void *)table->CmdNextSubpass2; + if (!strcmp(name, "CmdEndRenderPass2")) return (void *)table->CmdEndRenderPass2; + if (!strcmp(name, "ResetQueryPool")) return (void *)table->ResetQueryPool; + if (!strcmp(name, "GetSemaphoreCounterValue")) return (void *)table->GetSemaphoreCounterValue; + if (!strcmp(name, "WaitSemaphores")) return (void *)table->WaitSemaphores; + if (!strcmp(name, "SignalSemaphore")) return (void *)table->SignalSemaphore; + if (!strcmp(name, "GetBufferDeviceAddress")) return (void *)table->GetBufferDeviceAddress; + if (!strcmp(name, "GetBufferOpaqueCaptureAddress")) return (void *)table->GetBufferOpaqueCaptureAddress; + if (!strcmp(name, "GetDeviceMemoryOpaqueCaptureAddress")) return (void *)table->GetDeviceMemoryOpaqueCaptureAddress; + + // ---- VK_KHR_swapchain extension commands + if (!strcmp(name, "CreateSwapchainKHR")) return (void *)table->CreateSwapchainKHR; + if (!strcmp(name, "DestroySwapchainKHR")) return (void *)table->DestroySwapchainKHR; + if (!strcmp(name, "GetSwapchainImagesKHR")) return (void *)table->GetSwapchainImagesKHR; + if (!strcmp(name, "AcquireNextImageKHR")) return (void *)table->AcquireNextImageKHR; + if (!strcmp(name, "QueuePresentKHR")) return (void *)table->QueuePresentKHR; + if (!strcmp(name, "GetDeviceGroupPresentCapabilitiesKHR")) return (void *)table->GetDeviceGroupPresentCapabilitiesKHR; + if (!strcmp(name, "GetDeviceGroupSurfacePresentModesKHR")) return (void *)table->GetDeviceGroupSurfacePresentModesKHR; + if (!strcmp(name, "AcquireNextImage2KHR")) return (void *)table->AcquireNextImage2KHR; + + // ---- VK_KHR_display_swapchain extension commands + if (!strcmp(name, "CreateSharedSwapchainsKHR")) return (void *)table->CreateSharedSwapchainsKHR; + + // ---- VK_KHR_device_group extension commands + if (!strcmp(name, "GetDeviceGroupPeerMemoryFeaturesKHR")) return (void *)table->GetDeviceGroupPeerMemoryFeaturesKHR; + if (!strcmp(name, "CmdSetDeviceMaskKHR")) return (void *)table->CmdSetDeviceMaskKHR; + if (!strcmp(name, "CmdDispatchBaseKHR")) return (void *)table->CmdDispatchBaseKHR; + + // ---- VK_KHR_maintenance1 extension commands + if (!strcmp(name, "TrimCommandPoolKHR")) return (void *)table->TrimCommandPoolKHR; + + // ---- VK_KHR_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetMemoryWin32HandleKHR")) return (void *)table->GetMemoryWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetMemoryWin32HandlePropertiesKHR")) return (void *)table->GetMemoryWin32HandlePropertiesKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_memory_fd extension commands + if (!strcmp(name, "GetMemoryFdKHR")) return (void *)table->GetMemoryFdKHR; + if (!strcmp(name, "GetMemoryFdPropertiesKHR")) return (void *)table->GetMemoryFdPropertiesKHR; + + // ---- VK_KHR_external_semaphore_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "ImportSemaphoreWin32HandleKHR")) return (void *)table->ImportSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetSemaphoreWin32HandleKHR")) return (void *)table->GetSemaphoreWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_semaphore_fd extension commands + if (!strcmp(name, "ImportSemaphoreFdKHR")) return (void *)table->ImportSemaphoreFdKHR; + if (!strcmp(name, "GetSemaphoreFdKHR")) return (void *)table->GetSemaphoreFdKHR; + + // ---- VK_KHR_push_descriptor extension commands + if (!strcmp(name, "CmdPushDescriptorSetKHR")) return (void *)table->CmdPushDescriptorSetKHR; + if (!strcmp(name, "CmdPushDescriptorSetWithTemplateKHR")) return (void *)table->CmdPushDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_descriptor_update_template extension commands + if (!strcmp(name, "CreateDescriptorUpdateTemplateKHR")) return (void *)table->CreateDescriptorUpdateTemplateKHR; + if (!strcmp(name, "DestroyDescriptorUpdateTemplateKHR")) return (void *)table->DestroyDescriptorUpdateTemplateKHR; + if (!strcmp(name, "UpdateDescriptorSetWithTemplateKHR")) return (void *)table->UpdateDescriptorSetWithTemplateKHR; + + // ---- VK_KHR_create_renderpass2 extension commands + if (!strcmp(name, "CreateRenderPass2KHR")) return (void *)table->CreateRenderPass2KHR; + if (!strcmp(name, "CmdBeginRenderPass2KHR")) return (void *)table->CmdBeginRenderPass2KHR; + if (!strcmp(name, "CmdNextSubpass2KHR")) return (void *)table->CmdNextSubpass2KHR; + if (!strcmp(name, "CmdEndRenderPass2KHR")) return (void *)table->CmdEndRenderPass2KHR; + + // ---- VK_KHR_shared_presentable_image extension commands + if (!strcmp(name, "GetSwapchainStatusKHR")) return (void *)table->GetSwapchainStatusKHR; + + // ---- VK_KHR_external_fence_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "ImportFenceWin32HandleKHR")) return (void *)table->ImportFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetFenceWin32HandleKHR")) return (void *)table->GetFenceWin32HandleKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_fence_fd extension commands + if (!strcmp(name, "ImportFenceFdKHR")) return (void *)table->ImportFenceFdKHR; + if (!strcmp(name, "GetFenceFdKHR")) return (void *)table->GetFenceFdKHR; + + // ---- VK_KHR_performance_query extension commands + if (!strcmp(name, "AcquireProfilingLockKHR")) return (void *)table->AcquireProfilingLockKHR; + if (!strcmp(name, "ReleaseProfilingLockKHR")) return (void *)table->ReleaseProfilingLockKHR; + + // ---- VK_KHR_get_memory_requirements2 extension commands + if (!strcmp(name, "GetImageMemoryRequirements2KHR")) return (void *)table->GetImageMemoryRequirements2KHR; + if (!strcmp(name, "GetBufferMemoryRequirements2KHR")) return (void *)table->GetBufferMemoryRequirements2KHR; + if (!strcmp(name, "GetImageSparseMemoryRequirements2KHR")) return (void *)table->GetImageSparseMemoryRequirements2KHR; + + // ---- VK_KHR_sampler_ycbcr_conversion extension commands + if (!strcmp(name, "CreateSamplerYcbcrConversionKHR")) return (void *)table->CreateSamplerYcbcrConversionKHR; + if (!strcmp(name, "DestroySamplerYcbcrConversionKHR")) return (void *)table->DestroySamplerYcbcrConversionKHR; + + // ---- VK_KHR_bind_memory2 extension commands + if (!strcmp(name, "BindBufferMemory2KHR")) return (void *)table->BindBufferMemory2KHR; + if (!strcmp(name, "BindImageMemory2KHR")) return (void *)table->BindImageMemory2KHR; + + // ---- VK_KHR_maintenance3 extension commands + if (!strcmp(name, "GetDescriptorSetLayoutSupportKHR")) return (void *)table->GetDescriptorSetLayoutSupportKHR; + + // ---- VK_KHR_draw_indirect_count extension commands + if (!strcmp(name, "CmdDrawIndirectCountKHR")) return (void *)table->CmdDrawIndirectCountKHR; + if (!strcmp(name, "CmdDrawIndexedIndirectCountKHR")) return (void *)table->CmdDrawIndexedIndirectCountKHR; + + // ---- VK_KHR_timeline_semaphore extension commands + if (!strcmp(name, "GetSemaphoreCounterValueKHR")) return (void *)table->GetSemaphoreCounterValueKHR; + if (!strcmp(name, "WaitSemaphoresKHR")) return (void *)table->WaitSemaphoresKHR; + if (!strcmp(name, "SignalSemaphoreKHR")) return (void *)table->SignalSemaphoreKHR; + + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp(name, "CmdSetFragmentShadingRateKHR")) return (void *)table->CmdSetFragmentShadingRateKHR; + + // ---- VK_KHR_buffer_device_address extension commands + if (!strcmp(name, "GetBufferDeviceAddressKHR")) return (void *)table->GetBufferDeviceAddressKHR; + if (!strcmp(name, "GetBufferOpaqueCaptureAddressKHR")) return (void *)table->GetBufferOpaqueCaptureAddressKHR; + if (!strcmp(name, "GetDeviceMemoryOpaqueCaptureAddressKHR")) return (void *)table->GetDeviceMemoryOpaqueCaptureAddressKHR; + + // ---- VK_KHR_deferred_host_operations extension commands + if (!strcmp(name, "CreateDeferredOperationKHR")) return (void *)table->CreateDeferredOperationKHR; + if (!strcmp(name, "DestroyDeferredOperationKHR")) return (void *)table->DestroyDeferredOperationKHR; + if (!strcmp(name, "GetDeferredOperationMaxConcurrencyKHR")) return (void *)table->GetDeferredOperationMaxConcurrencyKHR; + if (!strcmp(name, "GetDeferredOperationResultKHR")) return (void *)table->GetDeferredOperationResultKHR; + if (!strcmp(name, "DeferredOperationJoinKHR")) return (void *)table->DeferredOperationJoinKHR; + + // ---- VK_KHR_pipeline_executable_properties extension commands + if (!strcmp(name, "GetPipelineExecutablePropertiesKHR")) return (void *)table->GetPipelineExecutablePropertiesKHR; + if (!strcmp(name, "GetPipelineExecutableStatisticsKHR")) return (void *)table->GetPipelineExecutableStatisticsKHR; + if (!strcmp(name, "GetPipelineExecutableInternalRepresentationsKHR")) return (void *)table->GetPipelineExecutableInternalRepresentationsKHR; + + // ---- VK_KHR_copy_commands2 extension commands + if (!strcmp(name, "CmdCopyBuffer2KHR")) return (void *)table->CmdCopyBuffer2KHR; + if (!strcmp(name, "CmdCopyImage2KHR")) return (void *)table->CmdCopyImage2KHR; + if (!strcmp(name, "CmdCopyBufferToImage2KHR")) return (void *)table->CmdCopyBufferToImage2KHR; + if (!strcmp(name, "CmdCopyImageToBuffer2KHR")) return (void *)table->CmdCopyImageToBuffer2KHR; + if (!strcmp(name, "CmdBlitImage2KHR")) return (void *)table->CmdBlitImage2KHR; + if (!strcmp(name, "CmdResolveImage2KHR")) return (void *)table->CmdResolveImage2KHR; + + // ---- VK_EXT_debug_marker extension commands + if (!strcmp(name, "DebugMarkerSetObjectTagEXT")) return (void *)table->DebugMarkerSetObjectTagEXT; + if (!strcmp(name, "DebugMarkerSetObjectNameEXT")) return (void *)table->DebugMarkerSetObjectNameEXT; + if (!strcmp(name, "CmdDebugMarkerBeginEXT")) return (void *)table->CmdDebugMarkerBeginEXT; + if (!strcmp(name, "CmdDebugMarkerEndEXT")) return (void *)table->CmdDebugMarkerEndEXT; + if (!strcmp(name, "CmdDebugMarkerInsertEXT")) return (void *)table->CmdDebugMarkerInsertEXT; + + // ---- VK_EXT_transform_feedback extension commands + if (!strcmp(name, "CmdBindTransformFeedbackBuffersEXT")) return (void *)table->CmdBindTransformFeedbackBuffersEXT; + if (!strcmp(name, "CmdBeginTransformFeedbackEXT")) return (void *)table->CmdBeginTransformFeedbackEXT; + if (!strcmp(name, "CmdEndTransformFeedbackEXT")) return (void *)table->CmdEndTransformFeedbackEXT; + if (!strcmp(name, "CmdBeginQueryIndexedEXT")) return (void *)table->CmdBeginQueryIndexedEXT; + if (!strcmp(name, "CmdEndQueryIndexedEXT")) return (void *)table->CmdEndQueryIndexedEXT; + if (!strcmp(name, "CmdDrawIndirectByteCountEXT")) return (void *)table->CmdDrawIndirectByteCountEXT; + + // ---- VK_NVX_image_view_handle extension commands + if (!strcmp(name, "GetImageViewHandleNVX")) return (void *)table->GetImageViewHandleNVX; + if (!strcmp(name, "GetImageViewAddressNVX")) return (void *)table->GetImageViewAddressNVX; + + // ---- VK_AMD_draw_indirect_count extension commands + if (!strcmp(name, "CmdDrawIndirectCountAMD")) return (void *)table->CmdDrawIndirectCountAMD; + if (!strcmp(name, "CmdDrawIndexedIndirectCountAMD")) return (void *)table->CmdDrawIndexedIndirectCountAMD; + + // ---- VK_AMD_shader_info extension commands + if (!strcmp(name, "GetShaderInfoAMD")) return (void *)table->GetShaderInfoAMD; + + // ---- VK_NV_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetMemoryWin32HandleNV")) return (void *)table->GetMemoryWin32HandleNV; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_conditional_rendering extension commands + if (!strcmp(name, "CmdBeginConditionalRenderingEXT")) return (void *)table->CmdBeginConditionalRenderingEXT; + if (!strcmp(name, "CmdEndConditionalRenderingEXT")) return (void *)table->CmdEndConditionalRenderingEXT; + + // ---- VK_NV_clip_space_w_scaling extension commands + if (!strcmp(name, "CmdSetViewportWScalingNV")) return (void *)table->CmdSetViewportWScalingNV; + + // ---- VK_EXT_display_control extension commands + if (!strcmp(name, "DisplayPowerControlEXT")) return (void *)table->DisplayPowerControlEXT; + if (!strcmp(name, "RegisterDeviceEventEXT")) return (void *)table->RegisterDeviceEventEXT; + if (!strcmp(name, "RegisterDisplayEventEXT")) return (void *)table->RegisterDisplayEventEXT; + if (!strcmp(name, "GetSwapchainCounterEXT")) return (void *)table->GetSwapchainCounterEXT; + + // ---- VK_GOOGLE_display_timing extension commands + if (!strcmp(name, "GetRefreshCycleDurationGOOGLE")) return (void *)table->GetRefreshCycleDurationGOOGLE; + if (!strcmp(name, "GetPastPresentationTimingGOOGLE")) return (void *)table->GetPastPresentationTimingGOOGLE; + + // ---- VK_EXT_discard_rectangles extension commands + if (!strcmp(name, "CmdSetDiscardRectangleEXT")) return (void *)table->CmdSetDiscardRectangleEXT; + + // ---- VK_EXT_hdr_metadata extension commands + if (!strcmp(name, "SetHdrMetadataEXT")) return (void *)table->SetHdrMetadataEXT; + + // ---- VK_EXT_debug_utils extension commands + if (!strcmp(name, "SetDebugUtilsObjectNameEXT")) return (void *)table->SetDebugUtilsObjectNameEXT; + if (!strcmp(name, "SetDebugUtilsObjectTagEXT")) return (void *)table->SetDebugUtilsObjectTagEXT; + if (!strcmp(name, "QueueBeginDebugUtilsLabelEXT")) return (void *)table->QueueBeginDebugUtilsLabelEXT; + if (!strcmp(name, "QueueEndDebugUtilsLabelEXT")) return (void *)table->QueueEndDebugUtilsLabelEXT; + if (!strcmp(name, "QueueInsertDebugUtilsLabelEXT")) return (void *)table->QueueInsertDebugUtilsLabelEXT; + if (!strcmp(name, "CmdBeginDebugUtilsLabelEXT")) return (void *)table->CmdBeginDebugUtilsLabelEXT; + if (!strcmp(name, "CmdEndDebugUtilsLabelEXT")) return (void *)table->CmdEndDebugUtilsLabelEXT; + if (!strcmp(name, "CmdInsertDebugUtilsLabelEXT")) return (void *)table->CmdInsertDebugUtilsLabelEXT; + + // ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (!strcmp(name, "GetAndroidHardwareBufferPropertiesANDROID")) return (void *)table->GetAndroidHardwareBufferPropertiesANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (!strcmp(name, "GetMemoryAndroidHardwareBufferANDROID")) return (void *)table->GetMemoryAndroidHardwareBufferANDROID; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_EXT_sample_locations extension commands + if (!strcmp(name, "CmdSetSampleLocationsEXT")) return (void *)table->CmdSetSampleLocationsEXT; + + // ---- VK_EXT_image_drm_format_modifier extension commands + if (!strcmp(name, "GetImageDrmFormatModifierPropertiesEXT")) return (void *)table->GetImageDrmFormatModifierPropertiesEXT; + + // ---- VK_EXT_validation_cache extension commands + if (!strcmp(name, "CreateValidationCacheEXT")) return (void *)table->CreateValidationCacheEXT; + if (!strcmp(name, "DestroyValidationCacheEXT")) return (void *)table->DestroyValidationCacheEXT; + if (!strcmp(name, "MergeValidationCachesEXT")) return (void *)table->MergeValidationCachesEXT; + if (!strcmp(name, "GetValidationCacheDataEXT")) return (void *)table->GetValidationCacheDataEXT; + + // ---- VK_NV_shading_rate_image extension commands + if (!strcmp(name, "CmdBindShadingRateImageNV")) return (void *)table->CmdBindShadingRateImageNV; + if (!strcmp(name, "CmdSetViewportShadingRatePaletteNV")) return (void *)table->CmdSetViewportShadingRatePaletteNV; + if (!strcmp(name, "CmdSetCoarseSampleOrderNV")) return (void *)table->CmdSetCoarseSampleOrderNV; + + // ---- VK_NV_ray_tracing extension commands + if (!strcmp(name, "CreateAccelerationStructureNV")) return (void *)table->CreateAccelerationStructureNV; + if (!strcmp(name, "DestroyAccelerationStructureNV")) return (void *)table->DestroyAccelerationStructureNV; + if (!strcmp(name, "GetAccelerationStructureMemoryRequirementsNV")) return (void *)table->GetAccelerationStructureMemoryRequirementsNV; + if (!strcmp(name, "BindAccelerationStructureMemoryNV")) return (void *)table->BindAccelerationStructureMemoryNV; + if (!strcmp(name, "CmdBuildAccelerationStructureNV")) return (void *)table->CmdBuildAccelerationStructureNV; + if (!strcmp(name, "CmdCopyAccelerationStructureNV")) return (void *)table->CmdCopyAccelerationStructureNV; + if (!strcmp(name, "CmdTraceRaysNV")) return (void *)table->CmdTraceRaysNV; + if (!strcmp(name, "CreateRayTracingPipelinesNV")) return (void *)table->CreateRayTracingPipelinesNV; + if (!strcmp(name, "GetRayTracingShaderGroupHandlesKHR")) return (void *)table->GetRayTracingShaderGroupHandlesKHR; + if (!strcmp(name, "GetRayTracingShaderGroupHandlesNV")) return (void *)table->GetRayTracingShaderGroupHandlesNV; + if (!strcmp(name, "GetAccelerationStructureHandleNV")) return (void *)table->GetAccelerationStructureHandleNV; + if (!strcmp(name, "CmdWriteAccelerationStructuresPropertiesNV")) return (void *)table->CmdWriteAccelerationStructuresPropertiesNV; + if (!strcmp(name, "CompileDeferredNV")) return (void *)table->CompileDeferredNV; + + // ---- VK_EXT_external_memory_host extension commands + if (!strcmp(name, "GetMemoryHostPointerPropertiesEXT")) return (void *)table->GetMemoryHostPointerPropertiesEXT; + + // ---- VK_AMD_buffer_marker extension commands + if (!strcmp(name, "CmdWriteBufferMarkerAMD")) return (void *)table->CmdWriteBufferMarkerAMD; + + // ---- VK_EXT_calibrated_timestamps extension commands + if (!strcmp(name, "GetCalibratedTimestampsEXT")) return (void *)table->GetCalibratedTimestampsEXT; + + // ---- VK_NV_mesh_shader extension commands + if (!strcmp(name, "CmdDrawMeshTasksNV")) return (void *)table->CmdDrawMeshTasksNV; + if (!strcmp(name, "CmdDrawMeshTasksIndirectNV")) return (void *)table->CmdDrawMeshTasksIndirectNV; + if (!strcmp(name, "CmdDrawMeshTasksIndirectCountNV")) return (void *)table->CmdDrawMeshTasksIndirectCountNV; + + // ---- VK_NV_scissor_exclusive extension commands + if (!strcmp(name, "CmdSetExclusiveScissorNV")) return (void *)table->CmdSetExclusiveScissorNV; + + // ---- VK_NV_device_diagnostic_checkpoints extension commands + if (!strcmp(name, "CmdSetCheckpointNV")) return (void *)table->CmdSetCheckpointNV; + if (!strcmp(name, "GetQueueCheckpointDataNV")) return (void *)table->GetQueueCheckpointDataNV; + + // ---- VK_INTEL_performance_query extension commands + if (!strcmp(name, "InitializePerformanceApiINTEL")) return (void *)table->InitializePerformanceApiINTEL; + if (!strcmp(name, "UninitializePerformanceApiINTEL")) return (void *)table->UninitializePerformanceApiINTEL; + if (!strcmp(name, "CmdSetPerformanceMarkerINTEL")) return (void *)table->CmdSetPerformanceMarkerINTEL; + if (!strcmp(name, "CmdSetPerformanceStreamMarkerINTEL")) return (void *)table->CmdSetPerformanceStreamMarkerINTEL; + if (!strcmp(name, "CmdSetPerformanceOverrideINTEL")) return (void *)table->CmdSetPerformanceOverrideINTEL; + if (!strcmp(name, "AcquirePerformanceConfigurationINTEL")) return (void *)table->AcquirePerformanceConfigurationINTEL; + if (!strcmp(name, "ReleasePerformanceConfigurationINTEL")) return (void *)table->ReleasePerformanceConfigurationINTEL; + if (!strcmp(name, "QueueSetPerformanceConfigurationINTEL")) return (void *)table->QueueSetPerformanceConfigurationINTEL; + if (!strcmp(name, "GetPerformanceParameterINTEL")) return (void *)table->GetPerformanceParameterINTEL; + + // ---- VK_AMD_display_native_hdr extension commands + if (!strcmp(name, "SetLocalDimmingAMD")) return (void *)table->SetLocalDimmingAMD; + + // ---- VK_EXT_buffer_device_address extension commands + if (!strcmp(name, "GetBufferDeviceAddressEXT")) return (void *)table->GetBufferDeviceAddressEXT; + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "AcquireFullScreenExclusiveModeEXT")) return (void *)table->AcquireFullScreenExclusiveModeEXT; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "ReleaseFullScreenExclusiveModeEXT")) return (void *)table->ReleaseFullScreenExclusiveModeEXT; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetDeviceGroupSurfacePresentModes2EXT")) return (void *)table->GetDeviceGroupSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_line_rasterization extension commands + if (!strcmp(name, "CmdSetLineStippleEXT")) return (void *)table->CmdSetLineStippleEXT; + + // ---- VK_EXT_host_query_reset extension commands + if (!strcmp(name, "ResetQueryPoolEXT")) return (void *)table->ResetQueryPoolEXT; + + // ---- VK_EXT_extended_dynamic_state extension commands + if (!strcmp(name, "CmdSetCullModeEXT")) return (void *)table->CmdSetCullModeEXT; + if (!strcmp(name, "CmdSetFrontFaceEXT")) return (void *)table->CmdSetFrontFaceEXT; + if (!strcmp(name, "CmdSetPrimitiveTopologyEXT")) return (void *)table->CmdSetPrimitiveTopologyEXT; + if (!strcmp(name, "CmdSetViewportWithCountEXT")) return (void *)table->CmdSetViewportWithCountEXT; + if (!strcmp(name, "CmdSetScissorWithCountEXT")) return (void *)table->CmdSetScissorWithCountEXT; + if (!strcmp(name, "CmdBindVertexBuffers2EXT")) return (void *)table->CmdBindVertexBuffers2EXT; + if (!strcmp(name, "CmdSetDepthTestEnableEXT")) return (void *)table->CmdSetDepthTestEnableEXT; + if (!strcmp(name, "CmdSetDepthWriteEnableEXT")) return (void *)table->CmdSetDepthWriteEnableEXT; + if (!strcmp(name, "CmdSetDepthCompareOpEXT")) return (void *)table->CmdSetDepthCompareOpEXT; + if (!strcmp(name, "CmdSetDepthBoundsTestEnableEXT")) return (void *)table->CmdSetDepthBoundsTestEnableEXT; + if (!strcmp(name, "CmdSetStencilTestEnableEXT")) return (void *)table->CmdSetStencilTestEnableEXT; + if (!strcmp(name, "CmdSetStencilOpEXT")) return (void *)table->CmdSetStencilOpEXT; + + // ---- VK_NV_device_generated_commands extension commands + if (!strcmp(name, "GetGeneratedCommandsMemoryRequirementsNV")) return (void *)table->GetGeneratedCommandsMemoryRequirementsNV; + if (!strcmp(name, "CmdPreprocessGeneratedCommandsNV")) return (void *)table->CmdPreprocessGeneratedCommandsNV; + if (!strcmp(name, "CmdExecuteGeneratedCommandsNV")) return (void *)table->CmdExecuteGeneratedCommandsNV; + if (!strcmp(name, "CmdBindPipelineShaderGroupNV")) return (void *)table->CmdBindPipelineShaderGroupNV; + if (!strcmp(name, "CreateIndirectCommandsLayoutNV")) return (void *)table->CreateIndirectCommandsLayoutNV; + if (!strcmp(name, "DestroyIndirectCommandsLayoutNV")) return (void *)table->DestroyIndirectCommandsLayoutNV; + + // ---- VK_EXT_private_data extension commands + if (!strcmp(name, "CreatePrivateDataSlotEXT")) return (void *)table->CreatePrivateDataSlotEXT; + if (!strcmp(name, "DestroyPrivateDataSlotEXT")) return (void *)table->DestroyPrivateDataSlotEXT; + if (!strcmp(name, "SetPrivateDataEXT")) return (void *)table->SetPrivateDataEXT; + if (!strcmp(name, "GetPrivateDataEXT")) return (void *)table->GetPrivateDataEXT; + + // ---- VK_NV_fragment_shading_rate_enums extension commands + if (!strcmp(name, "CmdSetFragmentShadingRateEnumNV")) return (void *)table->CmdSetFragmentShadingRateEnumNV; + + // ---- VK_KHR_acceleration_structure extension commands + if (!strcmp(name, "CreateAccelerationStructureKHR")) return (void *)table->CreateAccelerationStructureKHR; + if (!strcmp(name, "DestroyAccelerationStructureKHR")) return (void *)table->DestroyAccelerationStructureKHR; + if (!strcmp(name, "CmdBuildAccelerationStructuresKHR")) return (void *)table->CmdBuildAccelerationStructuresKHR; + if (!strcmp(name, "CmdBuildAccelerationStructuresIndirectKHR")) return (void *)table->CmdBuildAccelerationStructuresIndirectKHR; + if (!strcmp(name, "BuildAccelerationStructuresKHR")) return (void *)table->BuildAccelerationStructuresKHR; + if (!strcmp(name, "CopyAccelerationStructureKHR")) return (void *)table->CopyAccelerationStructureKHR; + if (!strcmp(name, "CopyAccelerationStructureToMemoryKHR")) return (void *)table->CopyAccelerationStructureToMemoryKHR; + if (!strcmp(name, "CopyMemoryToAccelerationStructureKHR")) return (void *)table->CopyMemoryToAccelerationStructureKHR; + if (!strcmp(name, "WriteAccelerationStructuresPropertiesKHR")) return (void *)table->WriteAccelerationStructuresPropertiesKHR; + if (!strcmp(name, "CmdCopyAccelerationStructureKHR")) return (void *)table->CmdCopyAccelerationStructureKHR; + if (!strcmp(name, "CmdCopyAccelerationStructureToMemoryKHR")) return (void *)table->CmdCopyAccelerationStructureToMemoryKHR; + if (!strcmp(name, "CmdCopyMemoryToAccelerationStructureKHR")) return (void *)table->CmdCopyMemoryToAccelerationStructureKHR; + if (!strcmp(name, "GetAccelerationStructureDeviceAddressKHR")) return (void *)table->GetAccelerationStructureDeviceAddressKHR; + if (!strcmp(name, "CmdWriteAccelerationStructuresPropertiesKHR")) return (void *)table->CmdWriteAccelerationStructuresPropertiesKHR; + if (!strcmp(name, "GetDeviceAccelerationStructureCompatibilityKHR")) return (void *)table->GetDeviceAccelerationStructureCompatibilityKHR; + if (!strcmp(name, "GetAccelerationStructureBuildSizesKHR")) return (void *)table->GetAccelerationStructureBuildSizesKHR; + + // ---- VK_KHR_ray_tracing_pipeline extension commands + if (!strcmp(name, "CmdTraceRaysKHR")) return (void *)table->CmdTraceRaysKHR; + if (!strcmp(name, "CreateRayTracingPipelinesKHR")) return (void *)table->CreateRayTracingPipelinesKHR; + if (!strcmp(name, "GetRayTracingCaptureReplayShaderGroupHandlesKHR")) return (void *)table->GetRayTracingCaptureReplayShaderGroupHandlesKHR; + if (!strcmp(name, "CmdTraceRaysIndirectKHR")) return (void *)table->CmdTraceRaysIndirectKHR; + if (!strcmp(name, "GetRayTracingShaderGroupStackSizeKHR")) return (void *)table->GetRayTracingShaderGroupStackSizeKHR; + if (!strcmp(name, "CmdSetRayTracingPipelineStackSizeKHR")) return (void *)table->CmdSetRayTracingPipelineStackSizeKHR; + + return NULL; +} + +// Instance command lookup function +VKAPI_ATTR void* VKAPI_CALL loader_lookup_instance_dispatch_table(const VkLayerInstanceDispatchTable *table, const char *name, + bool *found_name) { + if (!name || name[0] != 'v' || name[1] != 'k') { + *found_name = false; + return NULL; + } + + *found_name = true; + name += 2; + + // ---- Core 1_0 commands + if (!strcmp(name, "DestroyInstance")) return (void *)table->DestroyInstance; + if (!strcmp(name, "EnumeratePhysicalDevices")) return (void *)table->EnumeratePhysicalDevices; + if (!strcmp(name, "GetPhysicalDeviceFeatures")) return (void *)table->GetPhysicalDeviceFeatures; + if (!strcmp(name, "GetPhysicalDeviceFormatProperties")) return (void *)table->GetPhysicalDeviceFormatProperties; + if (!strcmp(name, "GetPhysicalDeviceImageFormatProperties")) return (void *)table->GetPhysicalDeviceImageFormatProperties; + if (!strcmp(name, "GetPhysicalDeviceProperties")) return (void *)table->GetPhysicalDeviceProperties; + if (!strcmp(name, "GetPhysicalDeviceQueueFamilyProperties")) return (void *)table->GetPhysicalDeviceQueueFamilyProperties; + if (!strcmp(name, "GetPhysicalDeviceMemoryProperties")) return (void *)table->GetPhysicalDeviceMemoryProperties; + if (!strcmp(name, "GetInstanceProcAddr")) return (void *)table->GetInstanceProcAddr; + if (!strcmp(name, "EnumerateDeviceExtensionProperties")) return (void *)table->EnumerateDeviceExtensionProperties; + if (!strcmp(name, "EnumerateDeviceLayerProperties")) return (void *)table->EnumerateDeviceLayerProperties; + if (!strcmp(name, "GetPhysicalDeviceSparseImageFormatProperties")) return (void *)table->GetPhysicalDeviceSparseImageFormatProperties; + + // ---- Core 1_1 commands + if (!strcmp(name, "EnumeratePhysicalDeviceGroups")) return (void *)table->EnumeratePhysicalDeviceGroups; + if (!strcmp(name, "GetPhysicalDeviceFeatures2")) return (void *)table->GetPhysicalDeviceFeatures2; + if (!strcmp(name, "GetPhysicalDeviceProperties2")) return (void *)table->GetPhysicalDeviceProperties2; + if (!strcmp(name, "GetPhysicalDeviceFormatProperties2")) return (void *)table->GetPhysicalDeviceFormatProperties2; + if (!strcmp(name, "GetPhysicalDeviceImageFormatProperties2")) return (void *)table->GetPhysicalDeviceImageFormatProperties2; + if (!strcmp(name, "GetPhysicalDeviceQueueFamilyProperties2")) return (void *)table->GetPhysicalDeviceQueueFamilyProperties2; + if (!strcmp(name, "GetPhysicalDeviceMemoryProperties2")) return (void *)table->GetPhysicalDeviceMemoryProperties2; + if (!strcmp(name, "GetPhysicalDeviceSparseImageFormatProperties2")) return (void *)table->GetPhysicalDeviceSparseImageFormatProperties2; + if (!strcmp(name, "GetPhysicalDeviceExternalBufferProperties")) return (void *)table->GetPhysicalDeviceExternalBufferProperties; + if (!strcmp(name, "GetPhysicalDeviceExternalFenceProperties")) return (void *)table->GetPhysicalDeviceExternalFenceProperties; + if (!strcmp(name, "GetPhysicalDeviceExternalSemaphoreProperties")) return (void *)table->GetPhysicalDeviceExternalSemaphoreProperties; + + // ---- VK_KHR_surface extension commands + if (!strcmp(name, "DestroySurfaceKHR")) return (void *)table->DestroySurfaceKHR; + if (!strcmp(name, "GetPhysicalDeviceSurfaceSupportKHR")) return (void *)table->GetPhysicalDeviceSurfaceSupportKHR; + if (!strcmp(name, "GetPhysicalDeviceSurfaceCapabilitiesKHR")) return (void *)table->GetPhysicalDeviceSurfaceCapabilitiesKHR; + if (!strcmp(name, "GetPhysicalDeviceSurfaceFormatsKHR")) return (void *)table->GetPhysicalDeviceSurfaceFormatsKHR; + if (!strcmp(name, "GetPhysicalDeviceSurfacePresentModesKHR")) return (void *)table->GetPhysicalDeviceSurfacePresentModesKHR; + + // ---- VK_KHR_swapchain extension commands + if (!strcmp(name, "GetPhysicalDevicePresentRectanglesKHR")) return (void *)table->GetPhysicalDevicePresentRectanglesKHR; + + // ---- VK_KHR_display extension commands + if (!strcmp(name, "GetPhysicalDeviceDisplayPropertiesKHR")) return (void *)table->GetPhysicalDeviceDisplayPropertiesKHR; + if (!strcmp(name, "GetPhysicalDeviceDisplayPlanePropertiesKHR")) return (void *)table->GetPhysicalDeviceDisplayPlanePropertiesKHR; + if (!strcmp(name, "GetDisplayPlaneSupportedDisplaysKHR")) return (void *)table->GetDisplayPlaneSupportedDisplaysKHR; + if (!strcmp(name, "GetDisplayModePropertiesKHR")) return (void *)table->GetDisplayModePropertiesKHR; + if (!strcmp(name, "CreateDisplayModeKHR")) return (void *)table->CreateDisplayModeKHR; + if (!strcmp(name, "GetDisplayPlaneCapabilitiesKHR")) return (void *)table->GetDisplayPlaneCapabilitiesKHR; + if (!strcmp(name, "CreateDisplayPlaneSurfaceKHR")) return (void *)table->CreateDisplayPlaneSurfaceKHR; + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + if (!strcmp(name, "CreateXlibSurfaceKHR")) return (void *)table->CreateXlibSurfaceKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + if (!strcmp(name, "GetPhysicalDeviceXlibPresentationSupportKHR")) return (void *)table->GetPhysicalDeviceXlibPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + if (!strcmp(name, "CreateXcbSurfaceKHR")) return (void *)table->CreateXcbSurfaceKHR; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + if (!strcmp(name, "GetPhysicalDeviceXcbPresentationSupportKHR")) return (void *)table->GetPhysicalDeviceXcbPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + if (!strcmp(name, "CreateWaylandSurfaceKHR")) return (void *)table->CreateWaylandSurfaceKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + if (!strcmp(name, "GetPhysicalDeviceWaylandPresentationSupportKHR")) return (void *)table->GetPhysicalDeviceWaylandPresentationSupportKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (!strcmp(name, "CreateAndroidSurfaceKHR")) return (void *)table->CreateAndroidSurfaceKHR; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "CreateWin32SurfaceKHR")) return (void *)table->CreateWin32SurfaceKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetPhysicalDeviceWin32PresentationSupportKHR")) return (void *)table->GetPhysicalDeviceWin32PresentationSupportKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + if (!strcmp(name, "GetPhysicalDeviceFeatures2KHR")) return (void *)table->GetPhysicalDeviceFeatures2KHR; + if (!strcmp(name, "GetPhysicalDeviceProperties2KHR")) return (void *)table->GetPhysicalDeviceProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceFormatProperties2KHR")) return (void *)table->GetPhysicalDeviceFormatProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceImageFormatProperties2KHR")) return (void *)table->GetPhysicalDeviceImageFormatProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceQueueFamilyProperties2KHR")) return (void *)table->GetPhysicalDeviceQueueFamilyProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceMemoryProperties2KHR")) return (void *)table->GetPhysicalDeviceMemoryProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceSparseImageFormatProperties2KHR")) return (void *)table->GetPhysicalDeviceSparseImageFormatProperties2KHR; + + // ---- VK_KHR_device_group_creation extension commands + if (!strcmp(name, "EnumeratePhysicalDeviceGroupsKHR")) return (void *)table->EnumeratePhysicalDeviceGroupsKHR; + + // ---- VK_KHR_external_memory_capabilities extension commands + if (!strcmp(name, "GetPhysicalDeviceExternalBufferPropertiesKHR")) return (void *)table->GetPhysicalDeviceExternalBufferPropertiesKHR; + + // ---- VK_KHR_external_semaphore_capabilities extension commands + if (!strcmp(name, "GetPhysicalDeviceExternalSemaphorePropertiesKHR")) return (void *)table->GetPhysicalDeviceExternalSemaphorePropertiesKHR; + + // ---- VK_KHR_external_fence_capabilities extension commands + if (!strcmp(name, "GetPhysicalDeviceExternalFencePropertiesKHR")) return (void *)table->GetPhysicalDeviceExternalFencePropertiesKHR; + + // ---- VK_KHR_performance_query extension commands + if (!strcmp(name, "EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR")) return (void *)table->EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; + if (!strcmp(name, "GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR")) return (void *)table->GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; + + // ---- VK_KHR_get_surface_capabilities2 extension commands + if (!strcmp(name, "GetPhysicalDeviceSurfaceCapabilities2KHR")) return (void *)table->GetPhysicalDeviceSurfaceCapabilities2KHR; + if (!strcmp(name, "GetPhysicalDeviceSurfaceFormats2KHR")) return (void *)table->GetPhysicalDeviceSurfaceFormats2KHR; + + // ---- VK_KHR_get_display_properties2 extension commands + if (!strcmp(name, "GetPhysicalDeviceDisplayProperties2KHR")) return (void *)table->GetPhysicalDeviceDisplayProperties2KHR; + if (!strcmp(name, "GetPhysicalDeviceDisplayPlaneProperties2KHR")) return (void *)table->GetPhysicalDeviceDisplayPlaneProperties2KHR; + if (!strcmp(name, "GetDisplayModeProperties2KHR")) return (void *)table->GetDisplayModeProperties2KHR; + if (!strcmp(name, "GetDisplayPlaneCapabilities2KHR")) return (void *)table->GetDisplayPlaneCapabilities2KHR; + + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp(name, "GetPhysicalDeviceFragmentShadingRatesKHR")) return (void *)table->GetPhysicalDeviceFragmentShadingRatesKHR; + + // ---- VK_EXT_debug_report extension commands + if (!strcmp(name, "CreateDebugReportCallbackEXT")) return (void *)table->CreateDebugReportCallbackEXT; + if (!strcmp(name, "DestroyDebugReportCallbackEXT")) return (void *)table->DestroyDebugReportCallbackEXT; + if (!strcmp(name, "DebugReportMessageEXT")) return (void *)table->DebugReportMessageEXT; + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + if (!strcmp(name, "CreateStreamDescriptorSurfaceGGP")) return (void *)table->CreateStreamDescriptorSurfaceGGP; +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + if (!strcmp(name, "GetPhysicalDeviceExternalImageFormatPropertiesNV")) return (void *)table->GetPhysicalDeviceExternalImageFormatPropertiesNV; + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + if (!strcmp(name, "CreateViSurfaceNN")) return (void *)table->CreateViSurfaceNN; +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + if (!strcmp(name, "ReleaseDisplayEXT")) return (void *)table->ReleaseDisplayEXT; + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + if (!strcmp(name, "AcquireXlibDisplayEXT")) return (void *)table->AcquireXlibDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + if (!strcmp(name, "GetRandROutputDisplayEXT")) return (void *)table->GetRandROutputDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + if (!strcmp(name, "GetPhysicalDeviceSurfaceCapabilities2EXT")) return (void *)table->GetPhysicalDeviceSurfaceCapabilities2EXT; + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + if (!strcmp(name, "CreateIOSSurfaceMVK")) return (void *)table->CreateIOSSurfaceMVK; +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + if (!strcmp(name, "CreateMacOSSurfaceMVK")) return (void *)table->CreateMacOSSurfaceMVK; +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + if (!strcmp(name, "CreateDebugUtilsMessengerEXT")) return (void *)table->CreateDebugUtilsMessengerEXT; + if (!strcmp(name, "DestroyDebugUtilsMessengerEXT")) return (void *)table->DestroyDebugUtilsMessengerEXT; + if (!strcmp(name, "SubmitDebugUtilsMessageEXT")) return (void *)table->SubmitDebugUtilsMessageEXT; + + // ---- VK_EXT_sample_locations extension commands + if (!strcmp(name, "GetPhysicalDeviceMultisamplePropertiesEXT")) return (void *)table->GetPhysicalDeviceMultisamplePropertiesEXT; + + // ---- VK_EXT_calibrated_timestamps extension commands + if (!strcmp(name, "GetPhysicalDeviceCalibrateableTimeDomainsEXT")) return (void *)table->GetPhysicalDeviceCalibrateableTimeDomainsEXT; + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + if (!strcmp(name, "CreateImagePipeSurfaceFUCHSIA")) return (void *)table->CreateImagePipeSurfaceFUCHSIA; +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + if (!strcmp(name, "CreateMetalSurfaceEXT")) return (void *)table->CreateMetalSurfaceEXT; +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + if (!strcmp(name, "GetPhysicalDeviceToolPropertiesEXT")) return (void *)table->GetPhysicalDeviceToolPropertiesEXT; + + // ---- VK_NV_cooperative_matrix extension commands + if (!strcmp(name, "GetPhysicalDeviceCooperativeMatrixPropertiesNV")) return (void *)table->GetPhysicalDeviceCooperativeMatrixPropertiesNV; + + // ---- VK_NV_coverage_reduction_mode extension commands + if (!strcmp(name, "GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV")) return (void *)table->GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp(name, "GetPhysicalDeviceSurfacePresentModes2EXT")) return (void *)table->GetPhysicalDeviceSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + if (!strcmp(name, "CreateHeadlessSurfaceEXT")) return (void *)table->CreateHeadlessSurfaceEXT; + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + if (!strcmp(name, "CreateDirectFBSurfaceEXT")) return (void *)table->CreateDirectFBSurfaceEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + if (!strcmp(name, "GetPhysicalDeviceDirectFBPresentationSupportEXT")) return (void *)table->GetPhysicalDeviceDirectFBPresentationSupportEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + + *found_name = false; + return NULL; +} + + +// ---- VK_KHR_device_group extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL GetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDeviceMaskKHR(commandBuffer, deviceMask); +} + +VKAPI_ATTR void VKAPI_CALL CmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDispatchBaseKHR(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ); +} + + +// ---- VK_KHR_maintenance1 extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL TrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->TrimCommandPoolKHR(device, commandPool, flags); +} + + +// ---- VK_KHR_external_memory_win32 extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_KHR_external_memory_fd extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryFdKHR(device, pGetFdInfo, pFd); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties); +} + + +// ---- VK_KHR_external_semaphore_win32 extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_KHR_external_semaphore_fd extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSemaphoreFdKHR(device, pGetFdInfo, pFd); +} + + +// ---- VK_KHR_push_descriptor extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites); +} + +VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData); +} + + +// ---- VK_KHR_descriptor_update_template extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate); +} + +VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); +} + +VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData); +} + + +// ---- VK_KHR_create_renderpass2 extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass); +} + +VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo); +} + + +// ---- VK_KHR_shared_presentable_image extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSwapchainStatusKHR(device, swapchain); +} + + +// ---- VK_KHR_external_fence_win32 extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_KHR_external_fence_fd extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ImportFenceFdKHR(device, pImportFenceFdInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetFenceFdKHR(device, pGetFdInfo, pFd); +} + + +// ---- VK_KHR_performance_query extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(unwrapped_phys_dev, queueFamilyIndex, pCounterCount, pCounters, pCounterDescriptions); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"); + } + return icd_term->dispatch.EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(phys_dev_term->phys_dev, queueFamilyIndex, pCounterCount, pCounters, pCounterDescriptions); +} + +VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(unwrapped_phys_dev, pPerformanceQueryCreateInfo, pNumPasses); +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"); + } + icd_term->dispatch.GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(phys_dev_term->phys_dev, pPerformanceQueryCreateInfo, pNumPasses); +} + +VKAPI_ATTR VkResult VKAPI_CALL AcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->AcquireProfilingLockKHR(device, pInfo); +} + +VKAPI_ATTR void VKAPI_CALL ReleaseProfilingLockKHR( + VkDevice device) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->ReleaseProfilingLockKHR(device); +} + + +// ---- VK_KHR_get_memory_requirements2 extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements); +} + +VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements); +} + +VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements); +} + + +// ---- VK_KHR_sampler_ycbcr_conversion extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateSamplerYcbcrConversionKHR(device, pCreateInfo, pAllocator, pYcbcrConversion); +} + +VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator); +} + + +// ---- VK_KHR_bind_memory2 extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BindBufferMemory2KHR(device, bindInfoCount, pBindInfos); +} + +VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BindImageMemory2KHR(device, bindInfoCount, pBindInfos); +} + + +// ---- VK_KHR_maintenance3 extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport); +} + + +// ---- VK_KHR_draw_indirect_count extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + +VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + + +// ---- VK_KHR_timeline_semaphore extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSemaphoreCounterValueKHR(device, semaphore, pValue); +} + +VKAPI_ATTR VkResult VKAPI_CALL WaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->WaitSemaphoresKHR(device, pWaitInfo, timeout); +} + +VKAPI_ATTR VkResult VKAPI_CALL SignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->SignalSemaphoreKHR(device, pSignalInfo); +} + + +// ---- VK_KHR_fragment_shading_rate extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceFragmentShadingRatesKHR(unwrapped_phys_dev, pFragmentShadingRateCount, pFragmentShadingRates); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceFragmentShadingRatesKHR) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceFragmentShadingRatesKHR"); + } + return icd_term->dispatch.GetPhysicalDeviceFragmentShadingRatesKHR(phys_dev_term->phys_dev, pFragmentShadingRateCount, pFragmentShadingRates); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetFragmentShadingRateKHR(commandBuffer, pFragmentSize, combinerOps); +} + + +// ---- VK_KHR_buffer_device_address extension trampoline/terminators + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetBufferDeviceAddressKHR(device, pInfo); +} + +VKAPI_ATTR uint64_t VKAPI_CALL GetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetBufferOpaqueCaptureAddressKHR(device, pInfo); +} + +VKAPI_ATTR uint64_t VKAPI_CALL GetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeviceMemoryOpaqueCaptureAddressKHR(device, pInfo); +} + + +// ---- VK_KHR_deferred_host_operations extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateDeferredOperationKHR(device, pAllocator, pDeferredOperation); +} + +VKAPI_ATTR void VKAPI_CALL DestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyDeferredOperationKHR(device, operation, pAllocator); +} + +VKAPI_ATTR uint32_t VKAPI_CALL GetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeferredOperationMaxConcurrencyKHR(device, operation); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeferredOperationResultKHR(device, operation); +} + +VKAPI_ATTR VkResult VKAPI_CALL DeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->DeferredOperationJoinKHR(device, operation); +} + + +// ---- VK_KHR_pipeline_executable_properties extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetPipelineExecutablePropertiesKHR(device, pPipelineInfo, pExecutableCount, pProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetPipelineExecutableStatisticsKHR(device, pExecutableInfo, pStatisticCount, pStatistics); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetPipelineExecutableInternalRepresentationsKHR(device, pExecutableInfo, pInternalRepresentationCount, pInternalRepresentations); +} + + +// ---- VK_KHR_copy_commands2 extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferInfo2KHR* pCopyBufferInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyBuffer2KHR(commandBuffer, pCopyBufferInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageInfo2KHR* pCopyImageInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyImage2KHR(commandBuffer, pCopyImageInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdBlitImage2KHR( + VkCommandBuffer commandBuffer, + const VkBlitImageInfo2KHR* pBlitImageInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBlitImage2KHR(commandBuffer, pBlitImageInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdResolveImage2KHR( + VkCommandBuffer commandBuffer, + const VkResolveImageInfo2KHR* pResolveImageInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdResolveImage2KHR(commandBuffer, pResolveImageInfo); +} + + +// ---- VK_EXT_debug_marker extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + VkDebugMarkerObjectTagInfoEXT local_tag_info; + memcpy(&local_tag_info, pTagInfo, sizeof(VkDebugMarkerObjectTagInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pTagInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT) { + struct loader_physical_device_tramp *phys_dev_tramp = (struct loader_physical_device_tramp *)(uintptr_t)pTagInfo->object; + local_tag_info.object = (uint64_t)(uintptr_t)phys_dev_tramp->phys_dev; + } + return disp->DebugMarkerSetObjectTagEXT(device, &local_tag_info); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.DebugMarkerSetObjectTagEXT) { + VkDebugMarkerObjectTagInfoEXT local_tag_info; + memcpy(&local_tag_info, pTagInfo, sizeof(VkDebugMarkerObjectTagInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pTagInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)(uintptr_t)pTagInfo->object; + local_tag_info.object = (uint64_t)(uintptr_t)phys_dev_term->phys_dev; + // If this is a KHR_surface, and the ICD has created its own, we have to replace it with the proper one for the next call. + } else if (pTagInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT) { + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSwapchainKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pTagInfo->object; + if (NULL != icd_surface->real_icd_surfaces) { + local_tag_info.object = (uint64_t)icd_surface->real_icd_surfaces[icd_index]; + } + } + } + return icd_term->dispatch.DebugMarkerSetObjectTagEXT(device, &local_tag_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + VkDebugMarkerObjectNameInfoEXT local_name_info; + memcpy(&local_name_info, pNameInfo, sizeof(VkDebugMarkerObjectNameInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pNameInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT) { + struct loader_physical_device_tramp *phys_dev_tramp = (struct loader_physical_device_tramp *)(uintptr_t)pNameInfo->object; + local_name_info.object = (uint64_t)(uintptr_t)phys_dev_tramp->phys_dev; + } + return disp->DebugMarkerSetObjectNameEXT(device, &local_name_info); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.DebugMarkerSetObjectNameEXT) { + VkDebugMarkerObjectNameInfoEXT local_name_info; + memcpy(&local_name_info, pNameInfo, sizeof(VkDebugMarkerObjectNameInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pNameInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)(uintptr_t)pNameInfo->object; + local_name_info.object = (uint64_t)(uintptr_t)phys_dev_term->phys_dev; + // If this is a KHR_surface, and the ICD has created its own, we have to replace it with the proper one for the next call. + } else if (pNameInfo->objectType == VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT) { + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSwapchainKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pNameInfo->object; + if (NULL != icd_surface->real_icd_surfaces) { + local_name_info.object = (uint64_t)icd_surface->real_icd_surfaces[icd_index]; + } + } + } + return icd_term->dispatch.DebugMarkerSetObjectNameEXT(device, &local_name_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDebugMarkerEndEXT(commandBuffer); +} + +VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo); +} + + +// ---- VK_EXT_transform_feedback extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBindTransformFeedbackBuffersEXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes); +} + +VKAPI_ATTR void VKAPI_CALL CmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBeginTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets); +} + +VKAPI_ATTR void VKAPI_CALL CmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdEndTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets); +} + +VKAPI_ATTR void VKAPI_CALL CmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, index); +} + +VKAPI_ATTR void VKAPI_CALL CmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdEndQueryIndexedEXT(commandBuffer, queryPool, query, index); +} + +VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndirectByteCountEXT(commandBuffer, instanceCount, firstInstance, counterBuffer, counterBufferOffset, counterOffset, vertexStride); +} + + +// ---- VK_NVX_image_view_handle extension trampoline/terminators + +VKAPI_ATTR uint32_t VKAPI_CALL GetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetImageViewHandleNVX(device, pInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetImageViewAddressNVX( + VkDevice device, + VkImageView imageView, + VkImageViewAddressPropertiesNVX* pProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetImageViewAddressNVX(device, imageView, pProperties); +} + + +// ---- VK_AMD_draw_indirect_count extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + +VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + + +// ---- VK_AMD_shader_info extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo); +} + + +// ---- VK_NV_external_memory_win32 extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryWin32HandleNV(device, memory, handleType, pHandle); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_NN_vi_surface extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_VI_NN +VKAPI_ATTR VkResult VKAPI_CALL CreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface) { +#error("Not implemented. Likely needs to be manually generated!"); + return disp->CreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface) { +#error("Not implemented. Likely needs to be manually generated!"); +} + +#endif // VK_USE_PLATFORM_VI_NN + +// ---- VK_EXT_conditional_rendering extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBeginConditionalRenderingEXT(commandBuffer, pConditionalRenderingBegin); +} + +VKAPI_ATTR void VKAPI_CALL CmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdEndConditionalRenderingEXT(commandBuffer); +} + + +// ---- VK_NV_clip_space_w_scaling extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetViewportWScalingNV(commandBuffer, firstViewport, viewportCount, pViewportWScalings); +} + + +// ---- VK_EXT_display_control extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL DisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->DisplayPowerControlEXT(device, display, pDisplayPowerInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL RegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->RegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence); +} + +VKAPI_ATTR VkResult VKAPI_CALL RegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->RegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetSwapchainCounterEXT(device, swapchain, counter, pCounterValue); +} + + +// ---- VK_GOOGLE_display_timing extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings); +} + + +// ---- VK_EXT_discard_rectangles extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles); +} + + +// ---- VK_EXT_hdr_metadata extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL SetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->SetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata); +} + + +// ---- VK_EXT_debug_utils extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + VkDebugUtilsObjectNameInfoEXT local_name_info; + memcpy(&local_name_info, pNameInfo, sizeof(VkDebugUtilsObjectNameInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pNameInfo->objectType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) { + struct loader_physical_device_tramp *phys_dev_tramp = (struct loader_physical_device_tramp *)(uintptr_t)pNameInfo->objectHandle; + local_name_info.objectHandle = (uint64_t)(uintptr_t)phys_dev_tramp->phys_dev; + } + if (disp->SetDebugUtilsObjectNameEXT != NULL) { + return disp->SetDebugUtilsObjectNameEXT(device, &local_name_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_SetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.SetDebugUtilsObjectNameEXT) { + VkDebugUtilsObjectNameInfoEXT local_name_info; + memcpy(&local_name_info, pNameInfo, sizeof(VkDebugUtilsObjectNameInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pNameInfo->objectType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)(uintptr_t)pNameInfo->objectHandle; + local_name_info.objectHandle = (uint64_t)(uintptr_t)phys_dev_term->phys_dev; + // If this is a KHR_surface, and the ICD has created its own, we have to replace it with the proper one for the next call. + } else if (pNameInfo->objectType == VK_OBJECT_TYPE_SURFACE_KHR) { + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSwapchainKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pNameInfo->objectHandle; + if (NULL != icd_surface->real_icd_surfaces) { + local_name_info.objectHandle = (uint64_t)icd_surface->real_icd_surfaces[icd_index]; + } + } + } + return icd_term->dispatch.SetDebugUtilsObjectNameEXT(device, &local_name_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + VkDebugUtilsObjectTagInfoEXT local_tag_info; + memcpy(&local_tag_info, pTagInfo, sizeof(VkDebugUtilsObjectTagInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pTagInfo->objectType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) { + struct loader_physical_device_tramp *phys_dev_tramp = (struct loader_physical_device_tramp *)(uintptr_t)pTagInfo->objectHandle; + local_tag_info.objectHandle = (uint64_t)(uintptr_t)phys_dev_tramp->phys_dev; + } + if (disp->SetDebugUtilsObjectTagEXT != NULL) { + return disp->SetDebugUtilsObjectTagEXT(device, &local_tag_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_SetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.SetDebugUtilsObjectTagEXT) { + VkDebugUtilsObjectTagInfoEXT local_tag_info; + memcpy(&local_tag_info, pTagInfo, sizeof(VkDebugUtilsObjectTagInfoEXT)); + // If this is a physical device, we have to replace it with the proper one for the next call. + if (pTagInfo->objectType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)(uintptr_t)pTagInfo->objectHandle; + local_tag_info.objectHandle = (uint64_t)(uintptr_t)phys_dev_term->phys_dev; + // If this is a KHR_surface, and the ICD has created its own, we have to replace it with the proper one for the next call. + } else if (pTagInfo->objectType == VK_OBJECT_TYPE_SURFACE_KHR) { + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSwapchainKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pTagInfo->objectHandle; + if (NULL != icd_surface->real_icd_surfaces) { + local_tag_info.objectHandle = (uint64_t)icd_surface->real_icd_surfaces[icd_index]; + } + } + } + return icd_term->dispatch.SetDebugUtilsObjectTagEXT(device, &local_tag_info); + } else { + return VK_SUCCESS; + } +} + +VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(queue); + if (disp->QueueBeginDebugUtilsLabelEXT != NULL) { + disp->QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_QueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(queue, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.QueueBeginDebugUtilsLabelEXT) { + icd_term->dispatch.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT( + VkQueue queue) { + const VkLayerDispatchTable *disp = loader_get_dispatch(queue); + if (disp->QueueEndDebugUtilsLabelEXT != NULL) { + disp->QueueEndDebugUtilsLabelEXT(queue); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_QueueEndDebugUtilsLabelEXT( + VkQueue queue) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(queue, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.QueueEndDebugUtilsLabelEXT) { + icd_term->dispatch.QueueEndDebugUtilsLabelEXT(queue); + } +} + +VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(queue); + if (disp->QueueInsertDebugUtilsLabelEXT != NULL) { + disp->QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_QueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(queue, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.QueueInsertDebugUtilsLabelEXT) { + icd_term->dispatch.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + if (disp->CmdBeginDebugUtilsLabelEXT != NULL) { + disp->CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_CmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(commandBuffer, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.CmdBeginDebugUtilsLabelEXT) { + icd_term->dispatch.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + if (disp->CmdEndDebugUtilsLabelEXT != NULL) { + disp->CmdEndDebugUtilsLabelEXT(commandBuffer); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_CmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(commandBuffer, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.CmdEndDebugUtilsLabelEXT) { + icd_term->dispatch.CmdEndDebugUtilsLabelEXT(commandBuffer); + } +} + +VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + if (disp->CmdInsertDebugUtilsLabelEXT != NULL) { + disp->CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +} + +VKAPI_ATTR void VKAPI_CALL terminator_CmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(commandBuffer, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.CmdInsertDebugUtilsLabelEXT) { + icd_term->dispatch.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo); + } +} + + +// ---- VK_ANDROID_external_memory_android_hardware_buffer extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetAndroidHardwareBufferPropertiesANDROID(device, buffer, pProperties); +} + +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryAndroidHardwareBufferANDROID(device, pInfo, pBuffer); +} + +#endif // VK_USE_PLATFORM_ANDROID_KHR + +// ---- VK_EXT_sample_locations extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo); +} + +VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + disp->GetPhysicalDeviceMultisamplePropertiesEXT(unwrapped_phys_dev, samples, pMultisampleProperties); +} + +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceMultisamplePropertiesEXT) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceMultisamplePropertiesEXT"); + } + icd_term->dispatch.GetPhysicalDeviceMultisamplePropertiesEXT(phys_dev_term->phys_dev, samples, pMultisampleProperties); +} + + +// ---- VK_EXT_image_drm_format_modifier extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetImageDrmFormatModifierPropertiesEXT(device, image, pProperties); +} + + +// ---- VK_EXT_validation_cache extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache); +} + +VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyValidationCacheEXT(device, validationCache, pAllocator); +} + +VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->MergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetValidationCacheDataEXT(device, validationCache, pDataSize, pData); +} + + +// ---- VK_NV_shading_rate_image extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBindShadingRateImageNV(commandBuffer, imageView, imageLayout); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetViewportShadingRatePaletteNV(commandBuffer, firstViewport, viewportCount, pShadingRatePalettes); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetCoarseSampleOrderNV(commandBuffer, sampleOrderType, customSampleOrderCount, pCustomSampleOrders); +} + + +// ---- VK_NV_ray_tracing extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateAccelerationStructureNV(device, pCreateInfo, pAllocator, pAccelerationStructure); +} + +VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyAccelerationStructureNV(device, accelerationStructure, pAllocator); +} + +VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetAccelerationStructureMemoryRequirementsNV(device, pInfo, pMemoryRequirements); +} + +VKAPI_ATTR VkResult VKAPI_CALL BindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BindAccelerationStructureMemoryNV(device, bindInfoCount, pBindInfos); +} + +VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBuildAccelerationStructureNV(commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeKHR mode) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyAccelerationStructureNV(commandBuffer, dst, src, mode); +} + +VKAPI_ATTR void VKAPI_CALL CmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdTraceRaysNV(commandBuffer, raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, callableShaderBindingTableBuffer, callableShaderBindingOffset, callableShaderBindingStride, width, height, depth); +} + +VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetRayTracingShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetRayTracingShaderGroupHandlesNV(device, pipeline, firstGroup, groupCount, dataSize, pData); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetAccelerationStructureHandleNV(device, accelerationStructure, dataSize, pData); +} + +VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdWriteAccelerationStructuresPropertiesNV(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); +} + +VKAPI_ATTR VkResult VKAPI_CALL CompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CompileDeferredNV(device, pipeline, shader); +} + + +// ---- VK_EXT_external_memory_host extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties); +} + + +// ---- VK_AMD_buffer_marker extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker); +} + + +// ---- VK_EXT_calibrated_timestamps extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceCalibrateableTimeDomainsEXT(unwrapped_phys_dev, pTimeDomainCount, pTimeDomains); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceCalibrateableTimeDomainsEXT) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceCalibrateableTimeDomainsEXT"); + } + return icd_term->dispatch.GetPhysicalDeviceCalibrateableTimeDomainsEXT(phys_dev_term->phys_dev, pTimeDomainCount, pTimeDomains); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoEXT* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetCalibratedTimestampsEXT(device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation); +} + + +// ---- VK_NV_mesh_shader extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask); +} + +VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride); +} + +VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride); +} + + +// ---- VK_NV_scissor_exclusive extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetExclusiveScissorNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors); +} + + +// ---- VK_NV_device_diagnostic_checkpoints extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetCheckpointNV(commandBuffer, pCheckpointMarker); +} + +VKAPI_ATTR void VKAPI_CALL GetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(queue); + disp->GetQueueCheckpointDataNV(queue, pCheckpointDataCount, pCheckpointData); +} + + +// ---- VK_INTEL_performance_query extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL InitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->InitializePerformanceApiINTEL(device, pInitializeInfo); +} + +VKAPI_ATTR void VKAPI_CALL UninitializePerformanceApiINTEL( + VkDevice device) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->UninitializePerformanceApiINTEL(device); +} + +VKAPI_ATTR VkResult VKAPI_CALL CmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + return disp->CmdSetPerformanceMarkerINTEL(commandBuffer, pMarkerInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL CmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + return disp->CmdSetPerformanceStreamMarkerINTEL(commandBuffer, pMarkerInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL CmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + return disp->CmdSetPerformanceOverrideINTEL(commandBuffer, pOverrideInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL AcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->AcquirePerformanceConfigurationINTEL(device, pAcquireInfo, pConfiguration); +} + +VKAPI_ATTR VkResult VKAPI_CALL ReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ReleasePerformanceConfigurationINTEL(device, configuration); +} + +VKAPI_ATTR VkResult VKAPI_CALL QueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration) { + const VkLayerDispatchTable *disp = loader_get_dispatch(queue); + return disp->QueueSetPerformanceConfigurationINTEL(queue, configuration); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetPerformanceParameterINTEL(device, parameter, pValue); +} + + +// ---- VK_AMD_display_native_hdr extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL SetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->SetLocalDimmingAMD(device, swapChain, localDimmingEnable); +} + + +// ---- VK_EXT_buffer_device_address extension trampoline/terminators + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetBufferDeviceAddressEXT(device, pInfo); +} + + +// ---- VK_NV_cooperative_matrix extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceCooperativeMatrixPropertiesNV(unwrapped_phys_dev, pPropertyCount, pProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceCooperativeMatrixPropertiesNV) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceCooperativeMatrixPropertiesNV"); + } + return icd_term->dispatch.GetPhysicalDeviceCooperativeMatrixPropertiesNV(phys_dev_term->phys_dev, pPropertyCount, pProperties); +} + + +// ---- VK_NV_coverage_reduction_mode extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(unwrapped_phys_dev, pCombinationCount, pCombinations); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); + } + return icd_term->dispatch.GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(phys_dev_term->phys_dev, pCombinationCount, pCombinations); +} + + +// ---- VK_EXT_full_screen_exclusive extension trampoline/terminators + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL AcquireFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->AcquireFullScreenExclusiveModeEXT(device, swapchain); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL ReleaseFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->ReleaseFullScreenExclusiveModeEXT(device, swapchain); +} + +#endif // VK_USE_PLATFORM_WIN32_KHR + +// ---- VK_EXT_line_rasterization extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetLineStippleEXT(commandBuffer, lineStippleFactor, lineStipplePattern); +} + + +// ---- VK_EXT_host_query_reset extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL ResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->ResetQueryPoolEXT(device, queryPool, firstQuery, queryCount); +} + + +// ---- VK_EXT_extended_dynamic_state extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetCullModeEXT( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetCullModeEXT(commandBuffer, cullMode); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetFrontFaceEXT( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetFrontFaceEXT(commandBuffer, frontFace); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetPrimitiveTopologyEXT( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetPrimitiveTopologyEXT(commandBuffer, primitiveTopology); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetViewportWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetViewportWithCountEXT(commandBuffer, viewportCount, pViewports); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetScissorWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetScissorWithCountEXT(commandBuffer, scissorCount, pScissors); +} + +VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers2EXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBindVertexBuffers2EXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, pStrides); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetDepthTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDepthTestEnableEXT(commandBuffer, depthTestEnable); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetDepthWriteEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDepthWriteEnableEXT(commandBuffer, depthWriteEnable); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetDepthCompareOpEXT( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDepthCompareOpEXT(commandBuffer, depthCompareOp); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetDepthBoundsTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetDepthBoundsTestEnableEXT(commandBuffer, depthBoundsTestEnable); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetStencilTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetStencilTestEnableEXT(commandBuffer, stencilTestEnable); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetStencilOpEXT( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetStencilOpEXT(commandBuffer, faceMask, failOp, passOp, depthFailOp, compareOp); +} + + +// ---- VK_NV_device_generated_commands extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL GetGeneratedCommandsMemoryRequirementsNV( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2* pMemoryRequirements) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetGeneratedCommandsMemoryRequirementsNV(device, pInfo, pMemoryRequirements); +} + +VKAPI_ATTR void VKAPI_CALL CmdPreprocessGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdPreprocessGeneratedCommandsNV(commandBuffer, pGeneratedCommandsInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdExecuteGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdExecuteGeneratedCommandsNV(commandBuffer, isPreprocessed, pGeneratedCommandsInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdBindPipelineShaderGroupNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline, + uint32_t groupIndex) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBindPipelineShaderGroupNV(commandBuffer, pipelineBindPoint, pipeline, groupIndex); +} + +VKAPI_ATTR VkResult VKAPI_CALL CreateIndirectCommandsLayoutNV( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateIndirectCommandsLayoutNV(device, pCreateInfo, pAllocator, pIndirectCommandsLayout); +} + +VKAPI_ATTR void VKAPI_CALL DestroyIndirectCommandsLayoutNV( + VkDevice device, + VkIndirectCommandsLayoutNV indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyIndirectCommandsLayoutNV(device, indirectCommandsLayout, pAllocator); +} + + +// ---- VK_EXT_private_data extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreatePrivateDataSlotEXT( + VkDevice device, + const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlotEXT* pPrivateDataSlot) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreatePrivateDataSlotEXT(device, pCreateInfo, pAllocator, pPrivateDataSlot); +} + +VKAPI_ATTR void VKAPI_CALL DestroyPrivateDataSlotEXT( + VkDevice device, + VkPrivateDataSlotEXT privateDataSlot, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyPrivateDataSlotEXT(device, privateDataSlot, pAllocator); +} + +VKAPI_ATTR VkResult VKAPI_CALL SetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t data) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->SetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data); +} + +VKAPI_ATTR void VKAPI_CALL GetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData); +} + + +// ---- VK_NV_fragment_shading_rate_enums extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetFragmentShadingRateEnumNV(commandBuffer, shadingRate, combinerOps); +} + + +// ---- VK_KHR_acceleration_structure extension trampoline/terminators + +VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateAccelerationStructureKHR(device, pCreateInfo, pAllocator, pAccelerationStructure); +} + +VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->DestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator); +} + +VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructuresKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBuildAccelerationStructuresKHR(commandBuffer, infoCount, pInfos, ppBuildRangeInfos); +} + +VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructuresIndirectKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdBuildAccelerationStructuresIndirectKHR(commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts); +} + +VKAPI_ATTR VkResult VKAPI_CALL BuildAccelerationStructuresKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->BuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos); +} + +VKAPI_ATTR VkResult VKAPI_CALL CopyAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CopyAccelerationStructureKHR(device, deferredOperation, pInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL CopyAccelerationStructureToMemoryKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CopyAccelerationStructureToMemoryKHR(device, deferredOperation, pInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL CopyMemoryToAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CopyMemoryToAccelerationStructureKHR(device, deferredOperation, pInfo); +} + +VKAPI_ATTR VkResult VKAPI_CALL WriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->WriteAccelerationStructuresPropertiesKHR(device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyAccelerationStructureKHR(commandBuffer, pInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyAccelerationStructureToMemoryKHR(commandBuffer, pInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdCopyMemoryToAccelerationStructureKHR(commandBuffer, pInfo); +} + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetAccelerationStructureDeviceAddressKHR(device, pInfo); +} + +VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdWriteAccelerationStructuresPropertiesKHR(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery); +} + +VKAPI_ATTR void VKAPI_CALL GetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetDeviceAccelerationStructureCompatibilityKHR(device, pVersionInfo, pCompatibility); +} + +VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + disp->GetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo); +} + + +// ---- VK_KHR_ray_tracing_pipeline extension trampoline/terminators + +VKAPI_ATTR void VKAPI_CALL CmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdTraceRaysKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth); +} + +VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->CreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); +} + +VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData); +} + +VKAPI_ATTR void VKAPI_CALL CmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdTraceRaysIndirectKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress); +} + +VKAPI_ATTR VkDeviceSize VKAPI_CALL GetRayTracingShaderGroupStackSizeKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader); +} + +VKAPI_ATTR void VKAPI_CALL CmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize) { + const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer); + disp->CmdSetRayTracingPipelineStackSizeKHR(commandBuffer, pipelineStackSize); +} + +// GPA helpers for extensions +bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr) { + *addr = NULL; + + + // ---- VK_KHR_get_physical_device_properties2 extension commands + if (!strcmp("vkGetPhysicalDeviceFeatures2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceFeatures2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceProperties2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceFormatProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceFormatProperties2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceImageFormatProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceImageFormatProperties2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceQueueFamilyProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceQueueFamilyProperties2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceMemoryProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceMemoryProperties2 + : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceSparseImageFormatProperties2KHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 == 1) + ? (void *)vkGetPhysicalDeviceSparseImageFormatProperties2 + : NULL; + return true; + } + + // ---- VK_KHR_device_group extension commands + if (!strcmp("vkGetDeviceGroupPeerMemoryFeaturesKHR", name)) { + *addr = (void *)GetDeviceGroupPeerMemoryFeaturesKHR; + return true; + } + if (!strcmp("vkCmdSetDeviceMaskKHR", name)) { + *addr = (void *)CmdSetDeviceMaskKHR; + return true; + } + if (!strcmp("vkCmdDispatchBaseKHR", name)) { + *addr = (void *)CmdDispatchBaseKHR; + return true; + } + + // ---- VK_KHR_maintenance1 extension commands + if (!strcmp("vkTrimCommandPoolKHR", name)) { + *addr = (void *)TrimCommandPoolKHR; + return true; + } + + // ---- VK_KHR_device_group_creation extension commands + if (!strcmp("vkEnumeratePhysicalDeviceGroupsKHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_device_group_creation == 1) + ? (void *)vkEnumeratePhysicalDeviceGroups + : NULL; + return true; + } + + // ---- VK_KHR_external_memory_capabilities extension commands + if (!strcmp("vkGetPhysicalDeviceExternalBufferPropertiesKHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_external_memory_capabilities == 1) + ? (void *)vkGetPhysicalDeviceExternalBufferProperties + : NULL; + return true; + } + + // ---- VK_KHR_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetMemoryWin32HandleKHR", name)) { + *addr = (void *)GetMemoryWin32HandleKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetMemoryWin32HandlePropertiesKHR", name)) { + *addr = (void *)GetMemoryWin32HandlePropertiesKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_memory_fd extension commands + if (!strcmp("vkGetMemoryFdKHR", name)) { + *addr = (void *)GetMemoryFdKHR; + return true; + } + if (!strcmp("vkGetMemoryFdPropertiesKHR", name)) { + *addr = (void *)GetMemoryFdPropertiesKHR; + return true; + } + + // ---- VK_KHR_external_semaphore_capabilities extension commands + if (!strcmp("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_external_semaphore_capabilities == 1) + ? (void *)vkGetPhysicalDeviceExternalSemaphoreProperties + : NULL; + return true; + } + + // ---- VK_KHR_external_semaphore_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkImportSemaphoreWin32HandleKHR", name)) { + *addr = (void *)ImportSemaphoreWin32HandleKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetSemaphoreWin32HandleKHR", name)) { + *addr = (void *)GetSemaphoreWin32HandleKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_semaphore_fd extension commands + if (!strcmp("vkImportSemaphoreFdKHR", name)) { + *addr = (void *)ImportSemaphoreFdKHR; + return true; + } + if (!strcmp("vkGetSemaphoreFdKHR", name)) { + *addr = (void *)GetSemaphoreFdKHR; + return true; + } + + // ---- VK_KHR_push_descriptor extension commands + if (!strcmp("vkCmdPushDescriptorSetKHR", name)) { + *addr = (void *)CmdPushDescriptorSetKHR; + return true; + } + if (!strcmp("vkCmdPushDescriptorSetWithTemplateKHR", name)) { + *addr = (void *)CmdPushDescriptorSetWithTemplateKHR; + return true; + } + + // ---- VK_KHR_descriptor_update_template extension commands + if (!strcmp("vkCreateDescriptorUpdateTemplateKHR", name)) { + *addr = (void *)CreateDescriptorUpdateTemplateKHR; + return true; + } + if (!strcmp("vkDestroyDescriptorUpdateTemplateKHR", name)) { + *addr = (void *)DestroyDescriptorUpdateTemplateKHR; + return true; + } + if (!strcmp("vkUpdateDescriptorSetWithTemplateKHR", name)) { + *addr = (void *)UpdateDescriptorSetWithTemplateKHR; + return true; + } + + // ---- VK_KHR_create_renderpass2 extension commands + if (!strcmp("vkCreateRenderPass2KHR", name)) { + *addr = (void *)CreateRenderPass2KHR; + return true; + } + if (!strcmp("vkCmdBeginRenderPass2KHR", name)) { + *addr = (void *)CmdBeginRenderPass2KHR; + return true; + } + if (!strcmp("vkCmdNextSubpass2KHR", name)) { + *addr = (void *)CmdNextSubpass2KHR; + return true; + } + if (!strcmp("vkCmdEndRenderPass2KHR", name)) { + *addr = (void *)CmdEndRenderPass2KHR; + return true; + } + + // ---- VK_KHR_shared_presentable_image extension commands + if (!strcmp("vkGetSwapchainStatusKHR", name)) { + *addr = (void *)GetSwapchainStatusKHR; + return true; + } + + // ---- VK_KHR_external_fence_capabilities extension commands + if (!strcmp("vkGetPhysicalDeviceExternalFencePropertiesKHR", name)) { + *addr = (ptr_instance->enabled_known_extensions.khr_external_fence_capabilities == 1) + ? (void *)vkGetPhysicalDeviceExternalFenceProperties + : NULL; + return true; + } + + // ---- VK_KHR_external_fence_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkImportFenceWin32HandleKHR", name)) { + *addr = (void *)ImportFenceWin32HandleKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetFenceWin32HandleKHR", name)) { + *addr = (void *)GetFenceWin32HandleKHR; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_external_fence_fd extension commands + if (!strcmp("vkImportFenceFdKHR", name)) { + *addr = (void *)ImportFenceFdKHR; + return true; + } + if (!strcmp("vkGetFenceFdKHR", name)) { + *addr = (void *)GetFenceFdKHR; + return true; + } + + // ---- VK_KHR_performance_query extension commands + if (!strcmp("vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR", name)) { + *addr = (void *)EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; + return true; + } + if (!strcmp("vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR", name)) { + *addr = (void *)GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; + return true; + } + if (!strcmp("vkAcquireProfilingLockKHR", name)) { + *addr = (void *)AcquireProfilingLockKHR; + return true; + } + if (!strcmp("vkReleaseProfilingLockKHR", name)) { + *addr = (void *)ReleaseProfilingLockKHR; + return true; + } + + // ---- VK_KHR_get_memory_requirements2 extension commands + if (!strcmp("vkGetImageMemoryRequirements2KHR", name)) { + *addr = (void *)GetImageMemoryRequirements2KHR; + return true; + } + if (!strcmp("vkGetBufferMemoryRequirements2KHR", name)) { + *addr = (void *)GetBufferMemoryRequirements2KHR; + return true; + } + if (!strcmp("vkGetImageSparseMemoryRequirements2KHR", name)) { + *addr = (void *)GetImageSparseMemoryRequirements2KHR; + return true; + } + + // ---- VK_KHR_sampler_ycbcr_conversion extension commands + if (!strcmp("vkCreateSamplerYcbcrConversionKHR", name)) { + *addr = (void *)CreateSamplerYcbcrConversionKHR; + return true; + } + if (!strcmp("vkDestroySamplerYcbcrConversionKHR", name)) { + *addr = (void *)DestroySamplerYcbcrConversionKHR; + return true; + } + + // ---- VK_KHR_bind_memory2 extension commands + if (!strcmp("vkBindBufferMemory2KHR", name)) { + *addr = (void *)BindBufferMemory2KHR; + return true; + } + if (!strcmp("vkBindImageMemory2KHR", name)) { + *addr = (void *)BindImageMemory2KHR; + return true; + } + + // ---- VK_KHR_maintenance3 extension commands + if (!strcmp("vkGetDescriptorSetLayoutSupportKHR", name)) { + *addr = (void *)GetDescriptorSetLayoutSupportKHR; + return true; + } + + // ---- VK_KHR_draw_indirect_count extension commands + if (!strcmp("vkCmdDrawIndirectCountKHR", name)) { + *addr = (void *)CmdDrawIndirectCountKHR; + return true; + } + if (!strcmp("vkCmdDrawIndexedIndirectCountKHR", name)) { + *addr = (void *)CmdDrawIndexedIndirectCountKHR; + return true; + } + + // ---- VK_KHR_timeline_semaphore extension commands + if (!strcmp("vkGetSemaphoreCounterValueKHR", name)) { + *addr = (void *)GetSemaphoreCounterValueKHR; + return true; + } + if (!strcmp("vkWaitSemaphoresKHR", name)) { + *addr = (void *)WaitSemaphoresKHR; + return true; + } + if (!strcmp("vkSignalSemaphoreKHR", name)) { + *addr = (void *)SignalSemaphoreKHR; + return true; + } + + // ---- VK_KHR_fragment_shading_rate extension commands + if (!strcmp("vkGetPhysicalDeviceFragmentShadingRatesKHR", name)) { + *addr = (void *)GetPhysicalDeviceFragmentShadingRatesKHR; + return true; + } + if (!strcmp("vkCmdSetFragmentShadingRateKHR", name)) { + *addr = (void *)CmdSetFragmentShadingRateKHR; + return true; + } + + // ---- VK_KHR_buffer_device_address extension commands + if (!strcmp("vkGetBufferDeviceAddressKHR", name)) { + *addr = (void *)GetBufferDeviceAddressKHR; + return true; + } + if (!strcmp("vkGetBufferOpaqueCaptureAddressKHR", name)) { + *addr = (void *)GetBufferOpaqueCaptureAddressKHR; + return true; + } + if (!strcmp("vkGetDeviceMemoryOpaqueCaptureAddressKHR", name)) { + *addr = (void *)GetDeviceMemoryOpaqueCaptureAddressKHR; + return true; + } + + // ---- VK_KHR_deferred_host_operations extension commands + if (!strcmp("vkCreateDeferredOperationKHR", name)) { + *addr = (void *)CreateDeferredOperationKHR; + return true; + } + if (!strcmp("vkDestroyDeferredOperationKHR", name)) { + *addr = (void *)DestroyDeferredOperationKHR; + return true; + } + if (!strcmp("vkGetDeferredOperationMaxConcurrencyKHR", name)) { + *addr = (void *)GetDeferredOperationMaxConcurrencyKHR; + return true; + } + if (!strcmp("vkGetDeferredOperationResultKHR", name)) { + *addr = (void *)GetDeferredOperationResultKHR; + return true; + } + if (!strcmp("vkDeferredOperationJoinKHR", name)) { + *addr = (void *)DeferredOperationJoinKHR; + return true; + } + + // ---- VK_KHR_pipeline_executable_properties extension commands + if (!strcmp("vkGetPipelineExecutablePropertiesKHR", name)) { + *addr = (void *)GetPipelineExecutablePropertiesKHR; + return true; + } + if (!strcmp("vkGetPipelineExecutableStatisticsKHR", name)) { + *addr = (void *)GetPipelineExecutableStatisticsKHR; + return true; + } + if (!strcmp("vkGetPipelineExecutableInternalRepresentationsKHR", name)) { + *addr = (void *)GetPipelineExecutableInternalRepresentationsKHR; + return true; + } + + // ---- VK_KHR_copy_commands2 extension commands + if (!strcmp("vkCmdCopyBuffer2KHR", name)) { + *addr = (void *)CmdCopyBuffer2KHR; + return true; + } + if (!strcmp("vkCmdCopyImage2KHR", name)) { + *addr = (void *)CmdCopyImage2KHR; + return true; + } + if (!strcmp("vkCmdCopyBufferToImage2KHR", name)) { + *addr = (void *)CmdCopyBufferToImage2KHR; + return true; + } + if (!strcmp("vkCmdCopyImageToBuffer2KHR", name)) { + *addr = (void *)CmdCopyImageToBuffer2KHR; + return true; + } + if (!strcmp("vkCmdBlitImage2KHR", name)) { + *addr = (void *)CmdBlitImage2KHR; + return true; + } + if (!strcmp("vkCmdResolveImage2KHR", name)) { + *addr = (void *)CmdResolveImage2KHR; + return true; + } + + // ---- VK_EXT_debug_marker extension commands + if (!strcmp("vkDebugMarkerSetObjectTagEXT", name)) { + *addr = (void *)DebugMarkerSetObjectTagEXT; + return true; + } + if (!strcmp("vkDebugMarkerSetObjectNameEXT", name)) { + *addr = (void *)DebugMarkerSetObjectNameEXT; + return true; + } + if (!strcmp("vkCmdDebugMarkerBeginEXT", name)) { + *addr = (void *)CmdDebugMarkerBeginEXT; + return true; + } + if (!strcmp("vkCmdDebugMarkerEndEXT", name)) { + *addr = (void *)CmdDebugMarkerEndEXT; + return true; + } + if (!strcmp("vkCmdDebugMarkerInsertEXT", name)) { + *addr = (void *)CmdDebugMarkerInsertEXT; + return true; + } + + // ---- VK_EXT_transform_feedback extension commands + if (!strcmp("vkCmdBindTransformFeedbackBuffersEXT", name)) { + *addr = (void *)CmdBindTransformFeedbackBuffersEXT; + return true; + } + if (!strcmp("vkCmdBeginTransformFeedbackEXT", name)) { + *addr = (void *)CmdBeginTransformFeedbackEXT; + return true; + } + if (!strcmp("vkCmdEndTransformFeedbackEXT", name)) { + *addr = (void *)CmdEndTransformFeedbackEXT; + return true; + } + if (!strcmp("vkCmdBeginQueryIndexedEXT", name)) { + *addr = (void *)CmdBeginQueryIndexedEXT; + return true; + } + if (!strcmp("vkCmdEndQueryIndexedEXT", name)) { + *addr = (void *)CmdEndQueryIndexedEXT; + return true; + } + if (!strcmp("vkCmdDrawIndirectByteCountEXT", name)) { + *addr = (void *)CmdDrawIndirectByteCountEXT; + return true; + } + + // ---- VK_NVX_image_view_handle extension commands + if (!strcmp("vkGetImageViewHandleNVX", name)) { + *addr = (void *)GetImageViewHandleNVX; + return true; + } + if (!strcmp("vkGetImageViewAddressNVX", name)) { + *addr = (void *)GetImageViewAddressNVX; + return true; + } + + // ---- VK_AMD_draw_indirect_count extension commands + if (!strcmp("vkCmdDrawIndirectCountAMD", name)) { + *addr = (void *)CmdDrawIndirectCountAMD; + return true; + } + if (!strcmp("vkCmdDrawIndexedIndirectCountAMD", name)) { + *addr = (void *)CmdDrawIndexedIndirectCountAMD; + return true; + } + + // ---- VK_AMD_shader_info extension commands + if (!strcmp("vkGetShaderInfoAMD", name)) { + *addr = (void *)GetShaderInfoAMD; + return true; + } + + // ---- VK_NV_external_memory_capabilities extension commands + if (!strcmp("vkGetPhysicalDeviceExternalImageFormatPropertiesNV", name)) { + *addr = (ptr_instance->enabled_known_extensions.nv_external_memory_capabilities == 1) + ? (void *)GetPhysicalDeviceExternalImageFormatPropertiesNV + : NULL; + return true; + } + + // ---- VK_NV_external_memory_win32 extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetMemoryWin32HandleNV", name)) { + *addr = (void *)GetMemoryWin32HandleNV; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + if (!strcmp("vkCreateViSurfaceNN", name)) { + *addr = (ptr_instance->enabled_known_extensions.nn_vi_surface == 1) + ? (void *)CreateViSurfaceNN + : NULL; + return true; + } +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_conditional_rendering extension commands + if (!strcmp("vkCmdBeginConditionalRenderingEXT", name)) { + *addr = (void *)CmdBeginConditionalRenderingEXT; + return true; + } + if (!strcmp("vkCmdEndConditionalRenderingEXT", name)) { + *addr = (void *)CmdEndConditionalRenderingEXT; + return true; + } + + // ---- VK_NV_clip_space_w_scaling extension commands + if (!strcmp("vkCmdSetViewportWScalingNV", name)) { + *addr = (void *)CmdSetViewportWScalingNV; + return true; + } + + // ---- VK_EXT_direct_mode_display extension commands + if (!strcmp("vkReleaseDisplayEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_direct_mode_display == 1) + ? (void *)ReleaseDisplayEXT + : NULL; + return true; + } + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + if (!strcmp("vkAcquireXlibDisplayEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_acquire_xlib_display == 1) + ? (void *)AcquireXlibDisplayEXT + : NULL; + return true; + } +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + if (!strcmp("vkGetRandROutputDisplayEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_acquire_xlib_display == 1) + ? (void *)GetRandROutputDisplayEXT + : NULL; + return true; + } +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + if (!strcmp("vkGetPhysicalDeviceSurfaceCapabilities2EXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_display_surface_counter == 1) + ? (void *)GetPhysicalDeviceSurfaceCapabilities2EXT + : NULL; + return true; + } + + // ---- VK_EXT_display_control extension commands + if (!strcmp("vkDisplayPowerControlEXT", name)) { + *addr = (void *)DisplayPowerControlEXT; + return true; + } + if (!strcmp("vkRegisterDeviceEventEXT", name)) { + *addr = (void *)RegisterDeviceEventEXT; + return true; + } + if (!strcmp("vkRegisterDisplayEventEXT", name)) { + *addr = (void *)RegisterDisplayEventEXT; + return true; + } + if (!strcmp("vkGetSwapchainCounterEXT", name)) { + *addr = (void *)GetSwapchainCounterEXT; + return true; + } + + // ---- VK_GOOGLE_display_timing extension commands + if (!strcmp("vkGetRefreshCycleDurationGOOGLE", name)) { + *addr = (void *)GetRefreshCycleDurationGOOGLE; + return true; + } + if (!strcmp("vkGetPastPresentationTimingGOOGLE", name)) { + *addr = (void *)GetPastPresentationTimingGOOGLE; + return true; + } + + // ---- VK_EXT_discard_rectangles extension commands + if (!strcmp("vkCmdSetDiscardRectangleEXT", name)) { + *addr = (void *)CmdSetDiscardRectangleEXT; + return true; + } + + // ---- VK_EXT_hdr_metadata extension commands + if (!strcmp("vkSetHdrMetadataEXT", name)) { + *addr = (void *)SetHdrMetadataEXT; + return true; + } + + // ---- VK_EXT_debug_utils extension commands + if (!strcmp("vkSetDebugUtilsObjectNameEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)SetDebugUtilsObjectNameEXT + : NULL; + return true; + } + if (!strcmp("vkSetDebugUtilsObjectTagEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)SetDebugUtilsObjectTagEXT + : NULL; + return true; + } + if (!strcmp("vkQueueBeginDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)QueueBeginDebugUtilsLabelEXT + : NULL; + return true; + } + if (!strcmp("vkQueueEndDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)QueueEndDebugUtilsLabelEXT + : NULL; + return true; + } + if (!strcmp("vkQueueInsertDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)QueueInsertDebugUtilsLabelEXT + : NULL; + return true; + } + if (!strcmp("vkCmdBeginDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)CmdBeginDebugUtilsLabelEXT + : NULL; + return true; + } + if (!strcmp("vkCmdEndDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)CmdEndDebugUtilsLabelEXT + : NULL; + return true; + } + if (!strcmp("vkCmdInsertDebugUtilsLabelEXT", name)) { + *addr = (ptr_instance->enabled_known_extensions.ext_debug_utils == 1) + ? (void *)CmdInsertDebugUtilsLabelEXT + : NULL; + return true; + } + + // ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (!strcmp("vkGetAndroidHardwareBufferPropertiesANDROID", name)) { + *addr = (void *)GetAndroidHardwareBufferPropertiesANDROID; + return true; + } +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (!strcmp("vkGetMemoryAndroidHardwareBufferANDROID", name)) { + *addr = (void *)GetMemoryAndroidHardwareBufferANDROID; + return true; + } +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_EXT_sample_locations extension commands + if (!strcmp("vkCmdSetSampleLocationsEXT", name)) { + *addr = (void *)CmdSetSampleLocationsEXT; + return true; + } + if (!strcmp("vkGetPhysicalDeviceMultisamplePropertiesEXT", name)) { + *addr = (void *)GetPhysicalDeviceMultisamplePropertiesEXT; + return true; + } + + // ---- VK_EXT_image_drm_format_modifier extension commands + if (!strcmp("vkGetImageDrmFormatModifierPropertiesEXT", name)) { + *addr = (void *)GetImageDrmFormatModifierPropertiesEXT; + return true; + } + + // ---- VK_EXT_validation_cache extension commands + if (!strcmp("vkCreateValidationCacheEXT", name)) { + *addr = (void *)CreateValidationCacheEXT; + return true; + } + if (!strcmp("vkDestroyValidationCacheEXT", name)) { + *addr = (void *)DestroyValidationCacheEXT; + return true; + } + if (!strcmp("vkMergeValidationCachesEXT", name)) { + *addr = (void *)MergeValidationCachesEXT; + return true; + } + if (!strcmp("vkGetValidationCacheDataEXT", name)) { + *addr = (void *)GetValidationCacheDataEXT; + return true; + } + + // ---- VK_NV_shading_rate_image extension commands + if (!strcmp("vkCmdBindShadingRateImageNV", name)) { + *addr = (void *)CmdBindShadingRateImageNV; + return true; + } + if (!strcmp("vkCmdSetViewportShadingRatePaletteNV", name)) { + *addr = (void *)CmdSetViewportShadingRatePaletteNV; + return true; + } + if (!strcmp("vkCmdSetCoarseSampleOrderNV", name)) { + *addr = (void *)CmdSetCoarseSampleOrderNV; + return true; + } + + // ---- VK_NV_ray_tracing extension commands + if (!strcmp("vkCreateAccelerationStructureNV", name)) { + *addr = (void *)CreateAccelerationStructureNV; + return true; + } + if (!strcmp("vkDestroyAccelerationStructureNV", name)) { + *addr = (void *)DestroyAccelerationStructureNV; + return true; + } + if (!strcmp("vkGetAccelerationStructureMemoryRequirementsNV", name)) { + *addr = (void *)GetAccelerationStructureMemoryRequirementsNV; + return true; + } + if (!strcmp("vkBindAccelerationStructureMemoryNV", name)) { + *addr = (void *)BindAccelerationStructureMemoryNV; + return true; + } + if (!strcmp("vkCmdBuildAccelerationStructureNV", name)) { + *addr = (void *)CmdBuildAccelerationStructureNV; + return true; + } + if (!strcmp("vkCmdCopyAccelerationStructureNV", name)) { + *addr = (void *)CmdCopyAccelerationStructureNV; + return true; + } + if (!strcmp("vkCmdTraceRaysNV", name)) { + *addr = (void *)CmdTraceRaysNV; + return true; + } + if (!strcmp("vkCreateRayTracingPipelinesNV", name)) { + *addr = (void *)CreateRayTracingPipelinesNV; + return true; + } + if (!strcmp("vkGetRayTracingShaderGroupHandlesKHR", name)) { + *addr = (void *)GetRayTracingShaderGroupHandlesKHR; + return true; + } + if (!strcmp("vkGetRayTracingShaderGroupHandlesNV", name)) { + *addr = (void *)GetRayTracingShaderGroupHandlesNV; + return true; + } + if (!strcmp("vkGetAccelerationStructureHandleNV", name)) { + *addr = (void *)GetAccelerationStructureHandleNV; + return true; + } + if (!strcmp("vkCmdWriteAccelerationStructuresPropertiesNV", name)) { + *addr = (void *)CmdWriteAccelerationStructuresPropertiesNV; + return true; + } + if (!strcmp("vkCompileDeferredNV", name)) { + *addr = (void *)CompileDeferredNV; + return true; + } + + // ---- VK_EXT_external_memory_host extension commands + if (!strcmp("vkGetMemoryHostPointerPropertiesEXT", name)) { + *addr = (void *)GetMemoryHostPointerPropertiesEXT; + return true; + } + + // ---- VK_AMD_buffer_marker extension commands + if (!strcmp("vkCmdWriteBufferMarkerAMD", name)) { + *addr = (void *)CmdWriteBufferMarkerAMD; + return true; + } + + // ---- VK_EXT_calibrated_timestamps extension commands + if (!strcmp("vkGetPhysicalDeviceCalibrateableTimeDomainsEXT", name)) { + *addr = (void *)GetPhysicalDeviceCalibrateableTimeDomainsEXT; + return true; + } + if (!strcmp("vkGetCalibratedTimestampsEXT", name)) { + *addr = (void *)GetCalibratedTimestampsEXT; + return true; + } + + // ---- VK_NV_mesh_shader extension commands + if (!strcmp("vkCmdDrawMeshTasksNV", name)) { + *addr = (void *)CmdDrawMeshTasksNV; + return true; + } + if (!strcmp("vkCmdDrawMeshTasksIndirectNV", name)) { + *addr = (void *)CmdDrawMeshTasksIndirectNV; + return true; + } + if (!strcmp("vkCmdDrawMeshTasksIndirectCountNV", name)) { + *addr = (void *)CmdDrawMeshTasksIndirectCountNV; + return true; + } + + // ---- VK_NV_scissor_exclusive extension commands + if (!strcmp("vkCmdSetExclusiveScissorNV", name)) { + *addr = (void *)CmdSetExclusiveScissorNV; + return true; + } + + // ---- VK_NV_device_diagnostic_checkpoints extension commands + if (!strcmp("vkCmdSetCheckpointNV", name)) { + *addr = (void *)CmdSetCheckpointNV; + return true; + } + if (!strcmp("vkGetQueueCheckpointDataNV", name)) { + *addr = (void *)GetQueueCheckpointDataNV; + return true; + } + + // ---- VK_INTEL_performance_query extension commands + if (!strcmp("vkInitializePerformanceApiINTEL", name)) { + *addr = (void *)InitializePerformanceApiINTEL; + return true; + } + if (!strcmp("vkUninitializePerformanceApiINTEL", name)) { + *addr = (void *)UninitializePerformanceApiINTEL; + return true; + } + if (!strcmp("vkCmdSetPerformanceMarkerINTEL", name)) { + *addr = (void *)CmdSetPerformanceMarkerINTEL; + return true; + } + if (!strcmp("vkCmdSetPerformanceStreamMarkerINTEL", name)) { + *addr = (void *)CmdSetPerformanceStreamMarkerINTEL; + return true; + } + if (!strcmp("vkCmdSetPerformanceOverrideINTEL", name)) { + *addr = (void *)CmdSetPerformanceOverrideINTEL; + return true; + } + if (!strcmp("vkAcquirePerformanceConfigurationINTEL", name)) { + *addr = (void *)AcquirePerformanceConfigurationINTEL; + return true; + } + if (!strcmp("vkReleasePerformanceConfigurationINTEL", name)) { + *addr = (void *)ReleasePerformanceConfigurationINTEL; + return true; + } + if (!strcmp("vkQueueSetPerformanceConfigurationINTEL", name)) { + *addr = (void *)QueueSetPerformanceConfigurationINTEL; + return true; + } + if (!strcmp("vkGetPerformanceParameterINTEL", name)) { + *addr = (void *)GetPerformanceParameterINTEL; + return true; + } + + // ---- VK_AMD_display_native_hdr extension commands + if (!strcmp("vkSetLocalDimmingAMD", name)) { + *addr = (void *)SetLocalDimmingAMD; + return true; + } + + // ---- VK_EXT_buffer_device_address extension commands + if (!strcmp("vkGetBufferDeviceAddressEXT", name)) { + *addr = (void *)GetBufferDeviceAddressEXT; + return true; + } + + // ---- VK_EXT_tooling_info extension commands + if (!strcmp("vkGetPhysicalDeviceToolPropertiesEXT", name)) { + *addr = (void *)GetPhysicalDeviceToolPropertiesEXT; + return true; + } + + // ---- VK_NV_cooperative_matrix extension commands + if (!strcmp("vkGetPhysicalDeviceCooperativeMatrixPropertiesNV", name)) { + *addr = (void *)GetPhysicalDeviceCooperativeMatrixPropertiesNV; + return true; + } + + // ---- VK_NV_coverage_reduction_mode extension commands + if (!strcmp("vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV", name)) { + *addr = (void *)GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; + return true; + } + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetPhysicalDeviceSurfacePresentModes2EXT", name)) { + *addr = (void *)GetPhysicalDeviceSurfacePresentModes2EXT; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkAcquireFullScreenExclusiveModeEXT", name)) { + *addr = (void *)AcquireFullScreenExclusiveModeEXT; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkReleaseFullScreenExclusiveModeEXT", name)) { + *addr = (void *)ReleaseFullScreenExclusiveModeEXT; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (!strcmp("vkGetDeviceGroupSurfacePresentModes2EXT", name)) { + *addr = (void *)GetDeviceGroupSurfacePresentModes2EXT; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_line_rasterization extension commands + if (!strcmp("vkCmdSetLineStippleEXT", name)) { + *addr = (void *)CmdSetLineStippleEXT; + return true; + } + + // ---- VK_EXT_host_query_reset extension commands + if (!strcmp("vkResetQueryPoolEXT", name)) { + *addr = (void *)ResetQueryPoolEXT; + return true; + } + + // ---- VK_EXT_extended_dynamic_state extension commands + if (!strcmp("vkCmdSetCullModeEXT", name)) { + *addr = (void *)CmdSetCullModeEXT; + return true; + } + if (!strcmp("vkCmdSetFrontFaceEXT", name)) { + *addr = (void *)CmdSetFrontFaceEXT; + return true; + } + if (!strcmp("vkCmdSetPrimitiveTopologyEXT", name)) { + *addr = (void *)CmdSetPrimitiveTopologyEXT; + return true; + } + if (!strcmp("vkCmdSetViewportWithCountEXT", name)) { + *addr = (void *)CmdSetViewportWithCountEXT; + return true; + } + if (!strcmp("vkCmdSetScissorWithCountEXT", name)) { + *addr = (void *)CmdSetScissorWithCountEXT; + return true; + } + if (!strcmp("vkCmdBindVertexBuffers2EXT", name)) { + *addr = (void *)CmdBindVertexBuffers2EXT; + return true; + } + if (!strcmp("vkCmdSetDepthTestEnableEXT", name)) { + *addr = (void *)CmdSetDepthTestEnableEXT; + return true; + } + if (!strcmp("vkCmdSetDepthWriteEnableEXT", name)) { + *addr = (void *)CmdSetDepthWriteEnableEXT; + return true; + } + if (!strcmp("vkCmdSetDepthCompareOpEXT", name)) { + *addr = (void *)CmdSetDepthCompareOpEXT; + return true; + } + if (!strcmp("vkCmdSetDepthBoundsTestEnableEXT", name)) { + *addr = (void *)CmdSetDepthBoundsTestEnableEXT; + return true; + } + if (!strcmp("vkCmdSetStencilTestEnableEXT", name)) { + *addr = (void *)CmdSetStencilTestEnableEXT; + return true; + } + if (!strcmp("vkCmdSetStencilOpEXT", name)) { + *addr = (void *)CmdSetStencilOpEXT; + return true; + } + + // ---- VK_NV_device_generated_commands extension commands + if (!strcmp("vkGetGeneratedCommandsMemoryRequirementsNV", name)) { + *addr = (void *)GetGeneratedCommandsMemoryRequirementsNV; + return true; + } + if (!strcmp("vkCmdPreprocessGeneratedCommandsNV", name)) { + *addr = (void *)CmdPreprocessGeneratedCommandsNV; + return true; + } + if (!strcmp("vkCmdExecuteGeneratedCommandsNV", name)) { + *addr = (void *)CmdExecuteGeneratedCommandsNV; + return true; + } + if (!strcmp("vkCmdBindPipelineShaderGroupNV", name)) { + *addr = (void *)CmdBindPipelineShaderGroupNV; + return true; + } + if (!strcmp("vkCreateIndirectCommandsLayoutNV", name)) { + *addr = (void *)CreateIndirectCommandsLayoutNV; + return true; + } + if (!strcmp("vkDestroyIndirectCommandsLayoutNV", name)) { + *addr = (void *)DestroyIndirectCommandsLayoutNV; + return true; + } + + // ---- VK_EXT_private_data extension commands + if (!strcmp("vkCreatePrivateDataSlotEXT", name)) { + *addr = (void *)CreatePrivateDataSlotEXT; + return true; + } + if (!strcmp("vkDestroyPrivateDataSlotEXT", name)) { + *addr = (void *)DestroyPrivateDataSlotEXT; + return true; + } + if (!strcmp("vkSetPrivateDataEXT", name)) { + *addr = (void *)SetPrivateDataEXT; + return true; + } + if (!strcmp("vkGetPrivateDataEXT", name)) { + *addr = (void *)GetPrivateDataEXT; + return true; + } + + // ---- VK_NV_fragment_shading_rate_enums extension commands + if (!strcmp("vkCmdSetFragmentShadingRateEnumNV", name)) { + *addr = (void *)CmdSetFragmentShadingRateEnumNV; + return true; + } + + // ---- VK_KHR_acceleration_structure extension commands + if (!strcmp("vkCreateAccelerationStructureKHR", name)) { + *addr = (void *)CreateAccelerationStructureKHR; + return true; + } + if (!strcmp("vkDestroyAccelerationStructureKHR", name)) { + *addr = (void *)DestroyAccelerationStructureKHR; + return true; + } + if (!strcmp("vkCmdBuildAccelerationStructuresKHR", name)) { + *addr = (void *)CmdBuildAccelerationStructuresKHR; + return true; + } + if (!strcmp("vkCmdBuildAccelerationStructuresIndirectKHR", name)) { + *addr = (void *)CmdBuildAccelerationStructuresIndirectKHR; + return true; + } + if (!strcmp("vkBuildAccelerationStructuresKHR", name)) { + *addr = (void *)BuildAccelerationStructuresKHR; + return true; + } + if (!strcmp("vkCopyAccelerationStructureKHR", name)) { + *addr = (void *)CopyAccelerationStructureKHR; + return true; + } + if (!strcmp("vkCopyAccelerationStructureToMemoryKHR", name)) { + *addr = (void *)CopyAccelerationStructureToMemoryKHR; + return true; + } + if (!strcmp("vkCopyMemoryToAccelerationStructureKHR", name)) { + *addr = (void *)CopyMemoryToAccelerationStructureKHR; + return true; + } + if (!strcmp("vkWriteAccelerationStructuresPropertiesKHR", name)) { + *addr = (void *)WriteAccelerationStructuresPropertiesKHR; + return true; + } + if (!strcmp("vkCmdCopyAccelerationStructureKHR", name)) { + *addr = (void *)CmdCopyAccelerationStructureKHR; + return true; + } + if (!strcmp("vkCmdCopyAccelerationStructureToMemoryKHR", name)) { + *addr = (void *)CmdCopyAccelerationStructureToMemoryKHR; + return true; + } + if (!strcmp("vkCmdCopyMemoryToAccelerationStructureKHR", name)) { + *addr = (void *)CmdCopyMemoryToAccelerationStructureKHR; + return true; + } + if (!strcmp("vkGetAccelerationStructureDeviceAddressKHR", name)) { + *addr = (void *)GetAccelerationStructureDeviceAddressKHR; + return true; + } + if (!strcmp("vkCmdWriteAccelerationStructuresPropertiesKHR", name)) { + *addr = (void *)CmdWriteAccelerationStructuresPropertiesKHR; + return true; + } + if (!strcmp("vkGetDeviceAccelerationStructureCompatibilityKHR", name)) { + *addr = (void *)GetDeviceAccelerationStructureCompatibilityKHR; + return true; + } + if (!strcmp("vkGetAccelerationStructureBuildSizesKHR", name)) { + *addr = (void *)GetAccelerationStructureBuildSizesKHR; + return true; + } + + // ---- VK_KHR_ray_tracing_pipeline extension commands + if (!strcmp("vkCmdTraceRaysKHR", name)) { + *addr = (void *)CmdTraceRaysKHR; + return true; + } + if (!strcmp("vkCreateRayTracingPipelinesKHR", name)) { + *addr = (void *)CreateRayTracingPipelinesKHR; + return true; + } + if (!strcmp("vkGetRayTracingCaptureReplayShaderGroupHandlesKHR", name)) { + *addr = (void *)GetRayTracingCaptureReplayShaderGroupHandlesKHR; + return true; + } + if (!strcmp("vkCmdTraceRaysIndirectKHR", name)) { + *addr = (void *)CmdTraceRaysIndirectKHR; + return true; + } + if (!strcmp("vkGetRayTracingShaderGroupStackSizeKHR", name)) { + *addr = (void *)GetRayTracingShaderGroupStackSizeKHR; + return true; + } + if (!strcmp("vkCmdSetRayTracingPipelineStackSizeKHR", name)) { + *addr = (void *)CmdSetRayTracingPipelineStackSizeKHR; + return true; + } + return false; +} + +// A function that can be used to query enabled extensions during a vkCreateInstance call +void extensions_create_instance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo) { + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + + // ---- VK_KHR_get_physical_device_properties2 extension commands + if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.khr_get_physical_device_properties2 = 1; + + // ---- VK_KHR_device_group_creation extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.khr_device_group_creation = 1; + + // ---- VK_KHR_external_memory_capabilities extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.khr_external_memory_capabilities = 1; + + // ---- VK_KHR_external_semaphore_capabilities extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.khr_external_semaphore_capabilities = 1; + + // ---- VK_KHR_external_fence_capabilities extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.khr_external_fence_capabilities = 1; + + // ---- VK_NV_external_memory_capabilities extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.nv_external_memory_capabilities = 1; + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_NN_VI_SURFACE_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.nn_vi_surface = 1; +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.ext_direct_mode_display = 1; + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.ext_acquire_xlib_display = 1; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.ext_display_surface_counter = 1; + + // ---- VK_EXT_debug_utils extension commands + } else if (0 == strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) { + ptr_instance->enabled_known_extensions.ext_debug_utils = 1; + } + } +} + +// Some device commands still need a terminator because the loader needs to unwrap something about them. +// In many cases, the item needing unwrapping is a VkPhysicalDevice or VkSurfaceKHR object. But there may be other items +// in the future. +PFN_vkVoidFunction get_extension_device_proc_terminator(struct loader_device *dev, const char *pName) { + PFN_vkVoidFunction addr = NULL; + + // ---- VK_KHR_swapchain extension commands + if (dev->extensions.khr_swapchain_enabled) { + if(!strcmp(pName, "vkCreateSwapchainKHR")) { + addr = (PFN_vkVoidFunction)terminator_CreateSwapchainKHR; + } else if(!strcmp(pName, "vkGetDeviceGroupSurfacePresentModesKHR")) { + addr = (PFN_vkVoidFunction)terminator_GetDeviceGroupSurfacePresentModesKHR; + } + } + + // ---- VK_KHR_display_swapchain extension commands + if (dev->extensions.khr_display_swapchain_enabled) { + if(!strcmp(pName, "vkCreateSharedSwapchainsKHR")) { + addr = (PFN_vkVoidFunction)terminator_CreateSharedSwapchainsKHR; + } + } + + // ---- VK_EXT_debug_marker extension commands + if (dev->extensions.ext_debug_marker_enabled) { + if(!strcmp(pName, "vkDebugMarkerSetObjectTagEXT")) { + addr = (PFN_vkVoidFunction)terminator_DebugMarkerSetObjectTagEXT; + } else if(!strcmp(pName, "vkDebugMarkerSetObjectNameEXT")) { + addr = (PFN_vkVoidFunction)terminator_DebugMarkerSetObjectNameEXT; + } + } + + // ---- VK_EXT_debug_utils extension commands + if (dev->extensions.ext_debug_utils_enabled) { + if(!strcmp(pName, "vkSetDebugUtilsObjectNameEXT")) { + addr = (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectNameEXT; + } else if(!strcmp(pName, "vkSetDebugUtilsObjectTagEXT")) { + addr = (PFN_vkVoidFunction)terminator_SetDebugUtilsObjectTagEXT; + } else if(!strcmp(pName, "vkQueueBeginDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_QueueBeginDebugUtilsLabelEXT; + } else if(!strcmp(pName, "vkQueueEndDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_QueueEndDebugUtilsLabelEXT; + } else if(!strcmp(pName, "vkQueueInsertDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_QueueInsertDebugUtilsLabelEXT; + } else if(!strcmp(pName, "vkCmdBeginDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_CmdBeginDebugUtilsLabelEXT; + } else if(!strcmp(pName, "vkCmdEndDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_CmdEndDebugUtilsLabelEXT; + } else if(!strcmp(pName, "vkCmdInsertDebugUtilsLabelEXT")) { + addr = (PFN_vkVoidFunction)terminator_CmdInsertDebugUtilsLabelEXT; + } + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_full_screen_exclusive extension commands + if (dev->extensions.ext_full_screen_exclusive_enabled && dev->extensions.khr_device_group_enabled) { + if(!strcmp(pName, "vkGetDeviceGroupSurfacePresentModes2EXT")) { + addr = (PFN_vkVoidFunction)terminator_GetDeviceGroupSurfacePresentModes2EXT; + } + } +#endif // None + return addr; +} + +// This table contains the loader's instance dispatch table, which contains +// default functions if no instance layers are activated. This contains +// pointers to "terminator functions". +const VkLayerInstanceDispatchTable instance_disp = { + + // ---- Core 1_0 commands + .DestroyInstance = terminator_DestroyInstance, + .EnumeratePhysicalDevices = terminator_EnumeratePhysicalDevices, + .GetPhysicalDeviceFeatures = terminator_GetPhysicalDeviceFeatures, + .GetPhysicalDeviceFormatProperties = terminator_GetPhysicalDeviceFormatProperties, + .GetPhysicalDeviceImageFormatProperties = terminator_GetPhysicalDeviceImageFormatProperties, + .GetPhysicalDeviceProperties = terminator_GetPhysicalDeviceProperties, + .GetPhysicalDeviceQueueFamilyProperties = terminator_GetPhysicalDeviceQueueFamilyProperties, + .GetPhysicalDeviceMemoryProperties = terminator_GetPhysicalDeviceMemoryProperties, + .GetInstanceProcAddr = vkGetInstanceProcAddr, + .EnumerateDeviceExtensionProperties = terminator_EnumerateDeviceExtensionProperties, + .EnumerateDeviceLayerProperties = terminator_EnumerateDeviceLayerProperties, + .GetPhysicalDeviceSparseImageFormatProperties = terminator_GetPhysicalDeviceSparseImageFormatProperties, + + // ---- Core 1_1 commands + .EnumeratePhysicalDeviceGroups = terminator_EnumeratePhysicalDeviceGroups, + .GetPhysicalDeviceFeatures2 = terminator_GetPhysicalDeviceFeatures2, + .GetPhysicalDeviceProperties2 = terminator_GetPhysicalDeviceProperties2, + .GetPhysicalDeviceFormatProperties2 = terminator_GetPhysicalDeviceFormatProperties2, + .GetPhysicalDeviceImageFormatProperties2 = terminator_GetPhysicalDeviceImageFormatProperties2, + .GetPhysicalDeviceQueueFamilyProperties2 = terminator_GetPhysicalDeviceQueueFamilyProperties2, + .GetPhysicalDeviceMemoryProperties2 = terminator_GetPhysicalDeviceMemoryProperties2, + .GetPhysicalDeviceSparseImageFormatProperties2 = terminator_GetPhysicalDeviceSparseImageFormatProperties2, + .GetPhysicalDeviceExternalBufferProperties = terminator_GetPhysicalDeviceExternalBufferProperties, + .GetPhysicalDeviceExternalFenceProperties = terminator_GetPhysicalDeviceExternalFenceProperties, + .GetPhysicalDeviceExternalSemaphoreProperties = terminator_GetPhysicalDeviceExternalSemaphoreProperties, + + // ---- VK_KHR_surface extension commands + .DestroySurfaceKHR = terminator_DestroySurfaceKHR, + .GetPhysicalDeviceSurfaceSupportKHR = terminator_GetPhysicalDeviceSurfaceSupportKHR, + .GetPhysicalDeviceSurfaceCapabilitiesKHR = terminator_GetPhysicalDeviceSurfaceCapabilitiesKHR, + .GetPhysicalDeviceSurfaceFormatsKHR = terminator_GetPhysicalDeviceSurfaceFormatsKHR, + .GetPhysicalDeviceSurfacePresentModesKHR = terminator_GetPhysicalDeviceSurfacePresentModesKHR, + + // ---- VK_KHR_swapchain extension commands + .GetPhysicalDevicePresentRectanglesKHR = terminator_GetPhysicalDevicePresentRectanglesKHR, + + // ---- VK_KHR_display extension commands + .GetPhysicalDeviceDisplayPropertiesKHR = terminator_GetPhysicalDeviceDisplayPropertiesKHR, + .GetPhysicalDeviceDisplayPlanePropertiesKHR = terminator_GetPhysicalDeviceDisplayPlanePropertiesKHR, + .GetDisplayPlaneSupportedDisplaysKHR = terminator_GetDisplayPlaneSupportedDisplaysKHR, + .GetDisplayModePropertiesKHR = terminator_GetDisplayModePropertiesKHR, + .CreateDisplayModeKHR = terminator_CreateDisplayModeKHR, + .GetDisplayPlaneCapabilitiesKHR = terminator_GetDisplayPlaneCapabilitiesKHR, + .CreateDisplayPlaneSurfaceKHR = terminator_CreateDisplayPlaneSurfaceKHR, + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + .CreateXlibSurfaceKHR = terminator_CreateXlibSurfaceKHR, +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + .GetPhysicalDeviceXlibPresentationSupportKHR = terminator_GetPhysicalDeviceXlibPresentationSupportKHR, +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + .CreateXcbSurfaceKHR = terminator_CreateXcbSurfaceKHR, +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + .GetPhysicalDeviceXcbPresentationSupportKHR = terminator_GetPhysicalDeviceXcbPresentationSupportKHR, +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + .CreateWaylandSurfaceKHR = terminator_CreateWaylandSurfaceKHR, +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + .GetPhysicalDeviceWaylandPresentationSupportKHR = terminator_GetPhysicalDeviceWaylandPresentationSupportKHR, +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + .CreateAndroidSurfaceKHR = terminator_CreateAndroidSurfaceKHR, +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + .CreateWin32SurfaceKHR = terminator_CreateWin32SurfaceKHR, +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + .GetPhysicalDeviceWin32PresentationSupportKHR = terminator_GetPhysicalDeviceWin32PresentationSupportKHR, +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + .GetPhysicalDeviceFeatures2KHR = terminator_GetPhysicalDeviceFeatures2, + .GetPhysicalDeviceProperties2KHR = terminator_GetPhysicalDeviceProperties2, + .GetPhysicalDeviceFormatProperties2KHR = terminator_GetPhysicalDeviceFormatProperties2, + .GetPhysicalDeviceImageFormatProperties2KHR = terminator_GetPhysicalDeviceImageFormatProperties2, + .GetPhysicalDeviceQueueFamilyProperties2KHR = terminator_GetPhysicalDeviceQueueFamilyProperties2, + .GetPhysicalDeviceMemoryProperties2KHR = terminator_GetPhysicalDeviceMemoryProperties2, + .GetPhysicalDeviceSparseImageFormatProperties2KHR = terminator_GetPhysicalDeviceSparseImageFormatProperties2, + + // ---- VK_KHR_device_group_creation extension commands + .EnumeratePhysicalDeviceGroupsKHR = terminator_EnumeratePhysicalDeviceGroups, + + // ---- VK_KHR_external_memory_capabilities extension commands + .GetPhysicalDeviceExternalBufferPropertiesKHR = terminator_GetPhysicalDeviceExternalBufferProperties, + + // ---- VK_KHR_external_semaphore_capabilities extension commands + .GetPhysicalDeviceExternalSemaphorePropertiesKHR = terminator_GetPhysicalDeviceExternalSemaphoreProperties, + + // ---- VK_KHR_external_fence_capabilities extension commands + .GetPhysicalDeviceExternalFencePropertiesKHR = terminator_GetPhysicalDeviceExternalFenceProperties, + + // ---- VK_KHR_performance_query extension commands + .EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = terminator_EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR, + .GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = terminator_GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR, + + // ---- VK_KHR_get_surface_capabilities2 extension commands + .GetPhysicalDeviceSurfaceCapabilities2KHR = terminator_GetPhysicalDeviceSurfaceCapabilities2KHR, + .GetPhysicalDeviceSurfaceFormats2KHR = terminator_GetPhysicalDeviceSurfaceFormats2KHR, + + // ---- VK_KHR_get_display_properties2 extension commands + .GetPhysicalDeviceDisplayProperties2KHR = terminator_GetPhysicalDeviceDisplayProperties2KHR, + .GetPhysicalDeviceDisplayPlaneProperties2KHR = terminator_GetPhysicalDeviceDisplayPlaneProperties2KHR, + .GetDisplayModeProperties2KHR = terminator_GetDisplayModeProperties2KHR, + .GetDisplayPlaneCapabilities2KHR = terminator_GetDisplayPlaneCapabilities2KHR, + + // ---- VK_KHR_fragment_shading_rate extension commands + .GetPhysicalDeviceFragmentShadingRatesKHR = terminator_GetPhysicalDeviceFragmentShadingRatesKHR, + + // ---- VK_EXT_debug_report extension commands + .CreateDebugReportCallbackEXT = terminator_CreateDebugReportCallbackEXT, + .DestroyDebugReportCallbackEXT = terminator_DestroyDebugReportCallbackEXT, + .DebugReportMessageEXT = terminator_DebugReportMessageEXT, + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + .CreateStreamDescriptorSurfaceGGP = terminator_CreateStreamDescriptorSurfaceGGP, +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + .GetPhysicalDeviceExternalImageFormatPropertiesNV = terminator_GetPhysicalDeviceExternalImageFormatPropertiesNV, + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + .CreateViSurfaceNN = terminator_CreateViSurfaceNN, +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + .ReleaseDisplayEXT = terminator_ReleaseDisplayEXT, + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + .AcquireXlibDisplayEXT = terminator_AcquireXlibDisplayEXT, +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + .GetRandROutputDisplayEXT = terminator_GetRandROutputDisplayEXT, +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + .GetPhysicalDeviceSurfaceCapabilities2EXT = terminator_GetPhysicalDeviceSurfaceCapabilities2EXT, + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + .CreateIOSSurfaceMVK = terminator_CreateIOSSurfaceMVK, +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + .CreateMacOSSurfaceMVK = terminator_CreateMacOSSurfaceMVK, +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + .CreateDebugUtilsMessengerEXT = terminator_CreateDebugUtilsMessengerEXT, + .DestroyDebugUtilsMessengerEXT = terminator_DestroyDebugUtilsMessengerEXT, + .SubmitDebugUtilsMessageEXT = terminator_SubmitDebugUtilsMessageEXT, + + // ---- VK_EXT_sample_locations extension commands + .GetPhysicalDeviceMultisamplePropertiesEXT = terminator_GetPhysicalDeviceMultisamplePropertiesEXT, + + // ---- VK_EXT_calibrated_timestamps extension commands + .GetPhysicalDeviceCalibrateableTimeDomainsEXT = terminator_GetPhysicalDeviceCalibrateableTimeDomainsEXT, + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + .CreateImagePipeSurfaceFUCHSIA = terminator_CreateImagePipeSurfaceFUCHSIA, +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + .CreateMetalSurfaceEXT = terminator_CreateMetalSurfaceEXT, +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + .GetPhysicalDeviceToolPropertiesEXT = terminator_GetPhysicalDeviceToolPropertiesEXT, + + // ---- VK_NV_cooperative_matrix extension commands + .GetPhysicalDeviceCooperativeMatrixPropertiesNV = terminator_GetPhysicalDeviceCooperativeMatrixPropertiesNV, + + // ---- VK_NV_coverage_reduction_mode extension commands + .GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = terminator_GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV, + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + .GetPhysicalDeviceSurfacePresentModes2EXT = terminator_GetPhysicalDeviceSurfacePresentModes2EXT, +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + .CreateHeadlessSurfaceEXT = terminator_CreateHeadlessSurfaceEXT, + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + .CreateDirectFBSurfaceEXT = terminator_CreateDirectFBSurfaceEXT, +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + .GetPhysicalDeviceDirectFBPresentationSupportEXT = terminator_GetPhysicalDeviceDirectFBPresentationSupportEXT, +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +}; + +// A null-terminated list of all of the instance extensions supported by the loader. +// If an instance extension name is not in this list, but it is exported by one or more of the +// ICDs detected by the loader, then the extension name not in the list will be filtered out +// before passing the list of extensions to the application. +const char *const LOADER_INSTANCE_EXTENSIONS[] = { + VK_KHR_SURFACE_EXTENSION_NAME, + VK_KHR_DISPLAY_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_XLIB_KHR + VK_KHR_XLIB_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + VK_KHR_XCB_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + VK_KHR_WIN32_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_WIN32_KHR + VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, + VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME, + VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, + VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME, + VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME, + VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME, + VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME, + VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME, + VK_EXT_DEBUG_REPORT_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_GGP + VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_GGP + VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, + VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_VI_NN + VK_NN_VI_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_VI_NN + VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME, + VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_IOS_MVK + VK_MVK_IOS_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_MACOS_MVK + VK_MVK_MACOS_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_MACOS_MVK + VK_EXT_DEBUG_UTILS_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_FUCHSIA + VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_FUCHSIA +#ifdef VK_USE_PLATFORM_METAL_EXT + VK_EXT_METAL_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_METAL_EXT + VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME, + VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME, +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VK_EXT_DIRECTFB_SURFACE_EXTENSION_NAME, +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + NULL }; + diff --git a/src/third_party/vulkan/loader/vk_loader_extensions.h b/src/third_party/vulkan/loader/vk_loader_extensions.h new file mode 100644 index 0000000..84e2150 --- /dev/null +++ b/src/third_party/vulkan/loader/vk_loader_extensions.h @@ -0,0 +1,462 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See loader_extension_generator.py for modifications + +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Lobodzinski + * Author: Mark Young + */ + +#pragma once + +// Structures defined externally, but used here +struct loader_instance; +struct loader_device; +struct loader_icd_term; +struct loader_dev_dispatch_table; + +// Device extension error function +VKAPI_ATTR VkResult VKAPI_CALL vkDevExtError(VkDevice dev); + +// Extension interception for vkGetInstanceProcAddr function, so we can return +// the appropriate information for any instance extensions we know about. +bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr); + +// Extension interception for vkCreateInstance function, so we can properly +// detect and enable any instance extension information for extensions we know +// about. +void extensions_create_instance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo); + +// Extension interception for vkGetDeviceProcAddr function, so we can return +// an appropriate terminator if this is one of those few device commands requiring +// a terminator. +PFN_vkVoidFunction get_extension_device_proc_terminator(struct loader_device *dev, const char *pName); + +// Dispatch table properly filled in with appropriate terminators for the +// supported extensions. +extern const VkLayerInstanceDispatchTable instance_disp; + +// Array of extension strings for instance extensions we support. +extern const char *const LOADER_INSTANCE_EXTENSIONS[]; + +VKAPI_ATTR bool VKAPI_CALL loader_icd_init_entries(struct loader_icd_term *icd_term, VkInstance inst, + const PFN_vkGetInstanceProcAddr fp_gipa); + +// Init Device function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_device_dispatch_table(struct loader_dev_dispatch_table *dev_table, PFN_vkGetDeviceProcAddr gpa, + VkDevice dev); + +// Init Device function pointer dispatch table with extension commands +VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct loader_dev_dispatch_table *dev_table, + PFN_vkGetInstanceProcAddr gipa, + PFN_vkGetDeviceProcAddr gdpa, + VkInstance inst, + VkDevice dev); + +// Init Instance function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_instance_core_dispatch_table(VkLayerInstanceDispatchTable *table, PFN_vkGetInstanceProcAddr gpa, + VkInstance inst); + +// Init Instance function pointer dispatch table with core commands +VKAPI_ATTR void VKAPI_CALL loader_init_instance_extension_dispatch_table(VkLayerInstanceDispatchTable *table, PFN_vkGetInstanceProcAddr gpa, + VkInstance inst); + +// Device command lookup function +VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDispatchTable *table, const char *name); + +// Instance command lookup function +VKAPI_ATTR void* VKAPI_CALL loader_lookup_instance_dispatch_table(const VkLayerInstanceDispatchTable *table, const char *name, + bool *found_name); + +VKAPI_ATTR bool VKAPI_CALL loader_icd_init_entries(struct loader_icd_term *icd_term, VkInstance inst, + const PFN_vkGetInstanceProcAddr fp_gipa); + +// Loader core instance terminators +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); +VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL terminator_GetInstanceProcAddr( + VkInstance instance, + const char* pName); +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceExtensionProperties( + const VkEnumerateInstanceExtensionPropertiesChain* chain, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties( + const VkEnumerateInstanceLayerPropertiesChain* chain, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceVersion( + const VkEnumerateInstanceVersionChain* chain, + uint32_t* pApiVersion); +VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +// ICD function pointer dispatch table +struct loader_icd_term_dispatch { + + // ---- Core 1_0 commands + PFN_vkCreateInstance CreateInstance; + PFN_vkDestroyInstance DestroyInstance; + PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices; + PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures; + PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties; + PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties; + PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties; + PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties; + PFN_vkGetDeviceProcAddr GetDeviceProcAddr; + PFN_vkCreateDevice CreateDevice; + PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties; + PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties; + PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties; + + // ---- Core 1_1 commands + PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion; + PFN_vkEnumeratePhysicalDeviceGroups EnumeratePhysicalDeviceGroups; + PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2; + PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2; + PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2; + PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2; + PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 GetPhysicalDeviceSparseImageFormatProperties2; + PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties; + PFN_vkGetPhysicalDeviceExternalFenceProperties GetPhysicalDeviceExternalFenceProperties; + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties GetPhysicalDeviceExternalSemaphoreProperties; + + // ---- VK_KHR_surface extension commands + PFN_vkDestroySurfaceKHR DestroySurfaceKHR; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR; + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR; + + // ---- VK_KHR_swapchain extension commands + PFN_vkCreateSwapchainKHR CreateSwapchainKHR; + PFN_vkGetDeviceGroupSurfacePresentModesKHR GetDeviceGroupSurfacePresentModesKHR; + PFN_vkGetPhysicalDevicePresentRectanglesKHR GetPhysicalDevicePresentRectanglesKHR; + + // ---- VK_KHR_display extension commands + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR GetPhysicalDeviceDisplayPropertiesKHR; + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR GetPhysicalDeviceDisplayPlanePropertiesKHR; + PFN_vkGetDisplayPlaneSupportedDisplaysKHR GetDisplayPlaneSupportedDisplaysKHR; + PFN_vkGetDisplayModePropertiesKHR GetDisplayModePropertiesKHR; + PFN_vkCreateDisplayModeKHR CreateDisplayModeKHR; + PFN_vkGetDisplayPlaneCapabilitiesKHR GetDisplayPlaneCapabilitiesKHR; + PFN_vkCreateDisplayPlaneSurfaceKHR CreateDisplayPlaneSurfaceKHR; + + // ---- VK_KHR_display_swapchain extension commands + PFN_vkCreateSharedSwapchainsKHR CreateSharedSwapchainsKHR; + + // ---- VK_KHR_xlib_surface extension commands +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR GetPhysicalDeviceXlibPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XLIB_KHR + + // ---- VK_KHR_xcb_surface extension commands +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR GetPhysicalDeviceXcbPresentationSupportKHR; +#endif // VK_USE_PLATFORM_XCB_KHR + + // ---- VK_KHR_wayland_surface extension commands +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkCreateWaylandSurfaceKHR CreateWaylandSurfaceKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR GetPhysicalDeviceWaylandPresentationSupportKHR; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + + // ---- VK_KHR_android_surface extension commands +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR; +#endif // VK_USE_PLATFORM_ANDROID_KHR + + // ---- VK_KHR_win32_surface extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR GetPhysicalDeviceWin32PresentationSupportKHR; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_KHR_get_physical_device_properties2 extension commands + PFN_vkGetPhysicalDeviceFeatures2KHR GetPhysicalDeviceFeatures2KHR; + PFN_vkGetPhysicalDeviceProperties2KHR GetPhysicalDeviceProperties2KHR; + PFN_vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR; + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR GetPhysicalDeviceImageFormatProperties2KHR; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR GetPhysicalDeviceQueueFamilyProperties2KHR; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR GetPhysicalDeviceMemoryProperties2KHR; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR GetPhysicalDeviceSparseImageFormatProperties2KHR; + + // ---- VK_KHR_device_group_creation extension commands + PFN_vkEnumeratePhysicalDeviceGroupsKHR EnumeratePhysicalDeviceGroupsKHR; + + // ---- VK_KHR_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR GetPhysicalDeviceExternalBufferPropertiesKHR; + + // ---- VK_KHR_external_semaphore_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR GetPhysicalDeviceExternalSemaphorePropertiesKHR; + + // ---- VK_KHR_external_fence_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR GetPhysicalDeviceExternalFencePropertiesKHR; + + // ---- VK_KHR_performance_query extension commands + PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR; + PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR GetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR; + + // ---- VK_KHR_get_surface_capabilities2 extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR GetPhysicalDeviceSurfaceCapabilities2KHR; + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR GetPhysicalDeviceSurfaceFormats2KHR; + + // ---- VK_KHR_get_display_properties2 extension commands + PFN_vkGetPhysicalDeviceDisplayProperties2KHR GetPhysicalDeviceDisplayProperties2KHR; + PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR GetPhysicalDeviceDisplayPlaneProperties2KHR; + PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR; + PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR; + + // ---- VK_KHR_fragment_shading_rate extension commands + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR GetPhysicalDeviceFragmentShadingRatesKHR; + + // ---- VK_EXT_debug_report extension commands + PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT; + PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT; + PFN_vkDebugReportMessageEXT DebugReportMessageEXT; + + // ---- VK_EXT_debug_marker extension commands + PFN_vkDebugMarkerSetObjectTagEXT DebugMarkerSetObjectTagEXT; + PFN_vkDebugMarkerSetObjectNameEXT DebugMarkerSetObjectNameEXT; + + // ---- VK_GGP_stream_descriptor_surface extension commands +#ifdef VK_USE_PLATFORM_GGP + PFN_vkCreateStreamDescriptorSurfaceGGP CreateStreamDescriptorSurfaceGGP; +#endif // VK_USE_PLATFORM_GGP + + // ---- VK_NV_external_memory_capabilities extension commands + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV; + + // ---- VK_NN_vi_surface extension commands +#ifdef VK_USE_PLATFORM_VI_NN + PFN_vkCreateViSurfaceNN CreateViSurfaceNN; +#endif // VK_USE_PLATFORM_VI_NN + + // ---- VK_EXT_direct_mode_display extension commands + PFN_vkReleaseDisplayEXT ReleaseDisplayEXT; + + // ---- VK_EXT_acquire_xlib_display extension commands +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkAcquireXlibDisplayEXT AcquireXlibDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkGetRandROutputDisplayEXT GetRandROutputDisplayEXT; +#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT + + // ---- VK_EXT_display_surface_counter extension commands + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT GetPhysicalDeviceSurfaceCapabilities2EXT; + + // ---- VK_MVK_ios_surface extension commands +#ifdef VK_USE_PLATFORM_IOS_MVK + PFN_vkCreateIOSSurfaceMVK CreateIOSSurfaceMVK; +#endif // VK_USE_PLATFORM_IOS_MVK + + // ---- VK_MVK_macos_surface extension commands +#ifdef VK_USE_PLATFORM_MACOS_MVK + PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK; +#endif // VK_USE_PLATFORM_MACOS_MVK + + // ---- VK_EXT_debug_utils extension commands + PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT; + PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT; + PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT; + PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT; + PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT; + PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT; + PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT; + PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT; + PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT; + PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT; + PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT; + + // ---- VK_EXT_sample_locations extension commands + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT; + + // ---- VK_EXT_calibrated_timestamps extension commands + PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT GetPhysicalDeviceCalibrateableTimeDomainsEXT; + + // ---- VK_FUCHSIA_imagepipe_surface extension commands +#ifdef VK_USE_PLATFORM_FUCHSIA + PFN_vkCreateImagePipeSurfaceFUCHSIA CreateImagePipeSurfaceFUCHSIA; +#endif // VK_USE_PLATFORM_FUCHSIA + + // ---- VK_EXT_metal_surface extension commands +#ifdef VK_USE_PLATFORM_METAL_EXT + PFN_vkCreateMetalSurfaceEXT CreateMetalSurfaceEXT; +#endif // VK_USE_PLATFORM_METAL_EXT + + // ---- VK_EXT_tooling_info extension commands + PFN_vkGetPhysicalDeviceToolPropertiesEXT GetPhysicalDeviceToolPropertiesEXT; + + // ---- VK_NV_cooperative_matrix extension commands + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV GetPhysicalDeviceCooperativeMatrixPropertiesNV; + + // ---- VK_NV_coverage_reduction_mode extension commands + PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV GetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV; + + // ---- VK_EXT_full_screen_exclusive extension commands +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT GetPhysicalDeviceSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetDeviceGroupSurfacePresentModes2EXT GetDeviceGroupSurfacePresentModes2EXT; +#endif // VK_USE_PLATFORM_WIN32_KHR + + // ---- VK_EXT_headless_surface extension commands + PFN_vkCreateHeadlessSurfaceEXT CreateHeadlessSurfaceEXT; + + // ---- VK_EXT_directfb_surface extension commands +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkCreateDirectFBSurfaceEXT CreateDirectFBSurfaceEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT GetPhysicalDeviceDirectFBPresentationSupportEXT; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +}; + +union loader_instance_extension_enables { + struct { + uint8_t khr_get_physical_device_properties2 : 1; + uint8_t khr_device_group_creation : 1; + uint8_t khr_external_memory_capabilities : 1; + uint8_t khr_external_semaphore_capabilities : 1; + uint8_t khr_external_fence_capabilities : 1; + uint8_t ext_debug_report : 1; + uint8_t nv_external_memory_capabilities : 1; + uint8_t nn_vi_surface : 1; + uint8_t ext_direct_mode_display : 1; + uint8_t ext_acquire_xlib_display : 1; + uint8_t ext_display_surface_counter : 1; + uint8_t ext_debug_utils : 1; + }; + uint64_t padding[4]; +}; + + diff --git a/src/third_party/vulkan/loader/vk_loader_layer.h b/src/third_party/vulkan/loader/vk_loader_layer.h new file mode 100644 index 0000000..dfcf5b2 --- /dev/null +++ b/src/third_party/vulkan/loader/vk_loader_layer.h @@ -0,0 +1,46 @@ +/* +* +* Copyright (c) 2016 The Khronos Group Inc. +* Copyright (c) 2016 Valve Corporation +* Copyright (c) 2016 LunarG, Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* +* Author: Mark Lobodzinski +* +*/ +#pragma once + +// Linked list node for tree of debug callbacks +typedef struct VkDebugReportContent { + VkDebugReportCallbackEXT msgCallback; + PFN_vkDebugReportCallbackEXT pfnMsgCallback; + VkFlags msgFlags; +} VkDebugReportContent; + +typedef struct VkDebugUtilsMessengerContent { + VkDebugUtilsMessengerEXT messenger; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; +} VkDebugUtilsMessengerContent; + +typedef struct VkLayerDbgFunctionNode_ { + bool is_messenger; + union { + VkDebugReportContent report; + VkDebugUtilsMessengerContent messenger; + }; + void *pUserData; + struct VkLayerDbgFunctionNode_ *pNext; +} VkLayerDbgFunctionNode; diff --git a/src/third_party/vulkan/loader/vk_loader_platform.h b/src/third_party/vulkan/loader/vk_loader_platform.h new file mode 100644 index 0000000..c018f51 --- /dev/null +++ b/src/third_party/vulkan/loader/vk_loader_platform.h @@ -0,0 +1,432 @@ +/* + * + * Copyright (c) 2015-2018 The Khronos Group Inc. + * Copyright (c) 2015-2018 Valve Corporation + * Copyright (c) 2015-2018 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Ian Elliot + * Author: Jon Ashburn + * Author: Lenny Komow + * + */ +#pragma once + +#if defined(_WIN32) +// WinSock2.h must be included *BEFORE* windows.h +#include +#endif // _WIN32 + +#if defined(__Fuchsia__) +#include "dlopen_fuchsia.h" +#endif // defined(__Fuchsia__) + +#include "../vk_platform.h" +#include "../vk_sdk_platform.h" + +#if defined(__linux__) || defined(__APPLE__) || defined(__Fuchsia__) +/* Linux-specific common code: */ + +// Headers: +//#ifndef _GNU_SOURCE +//#define _GNU_SOURCE 1 +//#endif +#include +// Note: The following file is for dynamic loading: +#include +#include +#include +#include +#include +#include +#include + +// VK Library Filenames, Paths, etc.: +#define PATH_SEPARATOR ':' +#define DIRECTORY_SYMBOL '/' + +#define VULKAN_DIR "vulkan/" +#define VULKAN_ICDCONF_DIR "icd.d" +#define VULKAN_ICD_DIR "icd" +#define VULKAN_SETTINGSCONF_DIR "settings.d" +#define VULKAN_ELAYERCONF_DIR "explicit_layer.d" +#define VULKAN_ILAYERCONF_DIR "implicit_layer.d" +#define VULKAN_LAYER_DIR "layer" + +#define VK_DRIVERS_INFO_RELATIVE_DIR VULKAN_DIR VULKAN_ICDCONF_DIR +#define VK_SETTINGS_INFO_RELATIVE_DIR VULKAN_DIR VULKAN_SETTINGSCONF_DIR +#define VK_ELAYERS_INFO_RELATIVE_DIR VULKAN_DIR VULKAN_ELAYERCONF_DIR +#define VK_ILAYERS_INFO_RELATIVE_DIR VULKAN_DIR VULKAN_ILAYERCONF_DIR + +#define VK_DRIVERS_INFO_REGISTRY_LOC "" +#define VK_SETTINGS_INFO_REGISTRY_LOC "" +#define VK_ELAYERS_INFO_REGISTRY_LOC "" +#define VK_ILAYERS_INFO_REGISTRY_LOC "" + +#if !defined(DEFAULT_VK_LAYERS_PATH) +#define DEFAULT_VK_LAYERS_PATH "" +#endif +#if !defined(LAYERS_SOURCE_PATH) +#define LAYERS_SOURCE_PATH NULL +#endif +#define LAYERS_PATH_ENV "VK_LAYER_PATH" +#define ENABLED_LAYERS_ENV "VK_INSTANCE_LAYERS" + +// C99: +#define PRINTF_SIZE_T_SPECIFIER "%zu" + +// File IO +static inline bool loader_platform_file_exists(const char *path) { + if (access(path, F_OK)) + return false; + else + return true; +} + +static inline bool loader_platform_is_path_absolute(const char *path) { + if (path[0] == '/') + return true; + else + return false; +} + +static inline char *loader_platform_dirname(char *path) { return dirname(path); } + +#if defined(__linux__) + +// find application path + name. Path cannot be longer than 1024, returns NULL if it is greater than that. +static inline char *loader_platform_executable_path(char *buffer, size_t size) { + ssize_t count = readlink("/proc/self/exe", buffer, size); + if (count == -1) return NULL; + if (count == 0) return NULL; + buffer[count] = '\0'; + return buffer; +} +#elif defined(__APPLE__) // defined(__linux__) +#include +static inline char *loader_platform_executable_path(char *buffer, size_t size) { + pid_t pid = getpid(); + int ret = proc_pidpath(pid, buffer, size); + if (ret <= 0) return NULL; + buffer[ret] = '\0'; + return buffer; +} +#elif defined(__Fuchsia__) +static inline char *loader_platform_executable_path(char *buffer, size_t size) { return NULL; } +#endif // defined (__APPLE__) + +// Compatability with compilers that don't support __has_feature +#ifndef __has_feature +#define __has_feature(x) 0 +#endif + +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +#define LOADER_ADDRESS_SANITIZER +#endif + +// Dynamic Loading of libraries: +typedef void *loader_platform_dl_handle; +// When loading the library, we use RTLD_LAZY so that not all symbols have to be +// resolved at this time (which improves performance). Note that if not all symbols +// can be resolved, this could cause crashes later. Use the LD_BIND_NOW environment +// variable to force all symbols to be resolved here. +#define LOADER_DLOPEN_MODE (RTLD_LAZY | RTLD_LOCAL) + +#if defined(__Fuchsia__) +static inline loader_platform_dl_handle loader_platform_open_driver(const char *libPath) { + return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, true); +} +static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) { + return dlopen_fuchsia(libPath, LOADER_DLOPEN_MODE, false); +} +#else +static inline loader_platform_dl_handle loader_platform_open_library(const char *libPath) { + return dlopen(libPath, LOADER_DLOPEN_MODE); +} +#endif + +static inline const char *loader_platform_open_library_error(const char *libPath) { +#ifdef __Fuchsia__ + return dlerror_fuchsia(); +#else + return dlerror(); +#endif +} +static inline void loader_platform_close_library(loader_platform_dl_handle library) { dlclose(library); } +static inline void *loader_platform_get_proc_address(loader_platform_dl_handle library, const char *name) { + assert(library); + assert(name); + return dlsym(library, name); +} +static inline const char *loader_platform_get_proc_address_error(const char *name) { return dlerror(); } + +// Threads: +typedef pthread_t loader_platform_thread; +#define THREAD_LOCAL_DECL __thread + +// The once init functionality is not used on Linux +#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var) +#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var) +#define LOADER_PLATFORM_THREAD_ONCE(ctl, func) + +// Thread IDs: +typedef pthread_t loader_platform_thread_id; +static inline loader_platform_thread_id loader_platform_get_thread_id() { return pthread_self(); } + +// Thread mutex: +typedef pthread_mutex_t loader_platform_thread_mutex; +static inline void loader_platform_thread_create_mutex(loader_platform_thread_mutex *pMutex) { pthread_mutex_init(pMutex, NULL); } +static inline void loader_platform_thread_lock_mutex(loader_platform_thread_mutex *pMutex) { pthread_mutex_lock(pMutex); } +static inline void loader_platform_thread_unlock_mutex(loader_platform_thread_mutex *pMutex) { pthread_mutex_unlock(pMutex); } +static inline void loader_platform_thread_delete_mutex(loader_platform_thread_mutex *pMutex) { pthread_mutex_destroy(pMutex); } +typedef pthread_cond_t loader_platform_thread_cond; +static inline void loader_platform_thread_init_cond(loader_platform_thread_cond *pCond) { pthread_cond_init(pCond, NULL); } +static inline void loader_platform_thread_cond_wait(loader_platform_thread_cond *pCond, loader_platform_thread_mutex *pMutex) { + pthread_cond_wait(pCond, pMutex); +} +static inline void loader_platform_thread_cond_broadcast(loader_platform_thread_cond *pCond) { pthread_cond_broadcast(pCond); } + +#define loader_stack_alloc(size) alloca(size) + +#elif defined(_WIN32) // defined(__linux__) +/* Windows-specific common code: */ +// WinBase.h defines CreateSemaphore and synchapi.h defines CreateEvent +// undefine them to avoid conflicts with VkLayerDispatchTable struct members. +#ifdef CreateSemaphore +#undef CreateSemaphore +#endif +#ifdef CreateEvent +#undef CreateEvent +#endif +#include +#include +#include +#include +#include +#include +#include +#ifdef __cplusplus +#include +#include +#endif // __cplusplus + +// VK Library Filenames, Paths, etc.: +#define PATH_SEPARATOR ';' +#define DIRECTORY_SYMBOL '\\' +#define DEFAULT_VK_REGISTRY_HIVE HKEY_LOCAL_MACHINE +#define DEFAULT_VK_REGISTRY_HIVE_STR "HKEY_LOCAL_MACHINE" +#define SECONDARY_VK_REGISTRY_HIVE HKEY_CURRENT_USER +#define SECONDARY_VK_REGISTRY_HIVE_STR "HKEY_CURRENT_USER" + +#define VK_DRIVERS_INFO_RELATIVE_DIR "" +#define VK_SETTINGS_INFO_RELATIVE_DIR "" +#define VK_ELAYERS_INFO_RELATIVE_DIR "" +#define VK_ILAYERS_INFO_RELATIVE_DIR "" + +#ifdef _WIN64 +#define HKR_VK_DRIVER_NAME API_NAME "DriverName" +#else +#define HKR_VK_DRIVER_NAME API_NAME "DriverNameWow" +#endif +#define VK_DRIVERS_INFO_REGISTRY_LOC "SOFTWARE\\Khronos\\" API_NAME "\\Drivers" +#define VK_SETTINGS_INFO_REGISTRY_LOC "SOFTWARE\\Khronos\\" API_NAME "\\Settings" +#define VK_ELAYERS_INFO_REGISTRY_LOC "SOFTWARE\\Khronos\\" API_NAME "\\ExplicitLayers" +#define VK_ILAYERS_INFO_REGISTRY_LOC "SOFTWARE\\Khronos\\" API_NAME "\\ImplicitLayers" + +#if !defined(DEFAULT_VK_LAYERS_PATH) +#define DEFAULT_VK_LAYERS_PATH "" +#endif +#if !defined(LAYERS_SOURCE_PATH) +#define LAYERS_SOURCE_PATH NULL +#endif +#define LAYERS_PATH_ENV "VK_LAYER_PATH" +#define ENABLED_LAYERS_ENV "VK_INSTANCE_LAYERS" + +#define PRINTF_SIZE_T_SPECIFIER "%Iu" + +#if defined(_WIN32) +// Get the key for the plug n play driver registry +// The string returned by this function should NOT be freed +static inline const char *LoaderPnpDriverRegistry() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? "VulkanDriverNameWow" : "VulkanDriverName"; +} +static inline const wchar_t *LoaderPnpDriverRegistryWide() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? L"VulkanDriverNameWow" : L"VulkanDriverName"; +} + +// Get the key for the plug 'n play explicit layer registry +// The string returned by this function should NOT be freed +static inline const char *LoaderPnpELayerRegistry() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? "VulkanExplicitLayersWow" : "VulkanExplicitLayers"; +} +static inline const wchar_t *LoaderPnpELayerRegistryWide() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? L"VulkanExplicitLayersWow" : L"VulkanExplicitLayers"; +} + +// Get the key for the plug 'n play implicit layer registry +// The string returned by this function should NOT be freed +static inline const char *LoaderPnpILayerRegistry() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? "VulkanImplicitLayersWow" : "VulkanImplicitLayers"; +} +static inline const wchar_t *LoaderPnpILayerRegistryWide() { + BOOL is_wow; + IsWow64Process(GetCurrentProcess(), &is_wow); + return is_wow ? L"VulkanImplicitLayersWow" : L"VulkanImplicitLayers"; +} +#endif + +// File IO +static bool loader_platform_file_exists(const char *path) { + if ((_access(path, 0)) == -1) + return false; + else + return true; +} + +static bool loader_platform_is_path_absolute(const char *path) { + if (!path || !*path) { + return false; + } + if (*path == DIRECTORY_SYMBOL || path[1] == ':') { + return true; + } + return false; +} + +// WIN32 runtime doesn't have dirname(). +static inline char *loader_platform_dirname(char *path) { + char *current, *next; + + // TODO/TBD: Do we need to deal with the Windows's ":" character? + + for (current = path; *current != '\0'; current = next) { + next = strchr(current, DIRECTORY_SYMBOL); + if (next == NULL) { + if (current != path) *(current - 1) = '\0'; + return path; + } else { + // Point one character past the DIRECTORY_SYMBOL: + next++; + } + } + return path; +} + +static inline char *loader_platform_executable_path(char *buffer, size_t size) { + DWORD ret = GetModuleFileName(NULL, buffer, (DWORD)size); + if (ret == 0) return NULL; + if (ret > size) return NULL; + buffer[ret] = '\0'; + return buffer; +} + +// Dynamic Loading: +typedef HMODULE loader_platform_dl_handle; +static loader_platform_dl_handle loader_platform_open_library(const char *lib_path) { + // Try loading the library the original way first. + loader_platform_dl_handle lib_handle = LoadLibrary(lib_path); + if (lib_handle == NULL && GetLastError() == ERROR_MOD_NOT_FOUND) { + // If that failed, then try loading it with broader search folders. + lib_handle = LoadLibraryEx(lib_path, NULL, LOAD_LIBRARY_SEARCH_DEFAULT_DIRS | LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR); + } + return lib_handle; +} +static char *loader_platform_open_library_error(const char *libPath) { + static char errorMsg[164]; + (void)snprintf(errorMsg, 163, "Failed to open dynamic library \"%s\" with error %lu", libPath, GetLastError()); + return errorMsg; +} +static void loader_platform_close_library(loader_platform_dl_handle library) { FreeLibrary(library); } +static void *loader_platform_get_proc_address(loader_platform_dl_handle library, const char *name) { + assert(library); + assert(name); + return (void *)GetProcAddress(library, name); +} +static char *loader_platform_get_proc_address_error(const char *name) { + static char errorMsg[120]; + (void)snprintf(errorMsg, 119, "Failed to find function \"%s\" in dynamic library", name); + return errorMsg; +} + +// Threads: +typedef HANDLE loader_platform_thread; + +// __declspec(thread) is not supported by MinGW compiler (ignored with warning or +// cause error depending on compiler switches) +// +// __thread should be used instead +// +// __MINGW32__ defined for both 32 and 64 bit MinGW compilers, so it is enough to +// detect any (32 or 64) flavor of MinGW compiler. +// +// @note __GNUC__ could be used as a more generic way to detect _any_ +// GCC[-compatible] compiler on Windows, but this fix was tested +// only with MinGW, so keep it explicit at the moment. +#if defined(__MINGW32__) +#define THREAD_LOCAL_DECL __thread +#else +#define THREAD_LOCAL_DECL __declspec(thread) +#endif + +// The once init functionality is not used when building a DLL on Windows. This is because there is no way to clean up the +// resources allocated by anything allocated by once init. This isn't a problem for static libraries, but it is for dynamic +// ones. When building a DLL, we use DllMain() instead to allow properly cleaning up resources. +#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var) +#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var) +#define LOADER_PLATFORM_THREAD_ONCE(ctl, func) + +// Thread IDs: +typedef DWORD loader_platform_thread_id; +static loader_platform_thread_id loader_platform_get_thread_id() { return GetCurrentThreadId(); } + +// Thread mutex: +typedef CRITICAL_SECTION loader_platform_thread_mutex; +static void loader_platform_thread_create_mutex(loader_platform_thread_mutex *pMutex) { InitializeCriticalSection(pMutex); } +static void loader_platform_thread_lock_mutex(loader_platform_thread_mutex *pMutex) { EnterCriticalSection(pMutex); } +static void loader_platform_thread_unlock_mutex(loader_platform_thread_mutex *pMutex) { LeaveCriticalSection(pMutex); } +static void loader_platform_thread_delete_mutex(loader_platform_thread_mutex *pMutex) { DeleteCriticalSection(pMutex); } +typedef CONDITION_VARIABLE loader_platform_thread_cond; +static void loader_platform_thread_init_cond(loader_platform_thread_cond *pCond) { InitializeConditionVariable(pCond); } +static void loader_platform_thread_cond_wait(loader_platform_thread_cond *pCond, loader_platform_thread_mutex *pMutex) { + SleepConditionVariableCS(pCond, pMutex, INFINITE); +} +static void loader_platform_thread_cond_broadcast(loader_platform_thread_cond *pCond) { WakeAllConditionVariable(pCond); } + +#define loader_stack_alloc(size) _alloca(size) +#else // defined(_WIN32) + +#error The "loader_platform.h" file must be modified for this OS. + +// NOTE: In order to support another OS, an #elif needs to be added (above the +// "#else // defined(_WIN32)") for that OS, and OS-specific versions of the +// contents of this file must be created. + +// NOTE: Other OS-specific changes are also needed for this OS. Search for +// files with "WIN32" in it, as a quick way to find files that must be changed. + +#endif // defined(_WIN32) + +// returns true if the given string appears to be a relative or absolute +// path, as opposed to a bare filename. +static inline bool loader_platform_is_path(const char *path) { return strchr(path, DIRECTORY_SYMBOL) != NULL; } diff --git a/src/third_party/vulkan/loader/vk_object_types.h b/src/third_party/vulkan/loader/vk_object_types.h new file mode 100644 index 0000000..fce02a5 --- /dev/null +++ b/src/third_party/vulkan/loader/vk_object_types.h @@ -0,0 +1,387 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See helper_file_generator.py for modifications + + +/*************************************************************************** + * + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * Copyright (c) 2015-2017 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Lobodzinski + * Author: Courtney Goeltzenleuchter + * Author: Tobin Ehlis + * Author: Chris Forbes + * Author: John Zulauf + * + ****************************************************************************/ + + +#pragma once + +#include "../vulkan.h" + +// Object Type enum for validation layer internal object handling +typedef enum VulkanObjectType { + kVulkanObjectTypeUnknown = 0, + kVulkanObjectTypeBuffer = 1, + kVulkanObjectTypeImage = 2, + kVulkanObjectTypeInstance = 3, + kVulkanObjectTypePhysicalDevice = 4, + kVulkanObjectTypeDevice = 5, + kVulkanObjectTypeQueue = 6, + kVulkanObjectTypeSemaphore = 7, + kVulkanObjectTypeCommandBuffer = 8, + kVulkanObjectTypeFence = 9, + kVulkanObjectTypeDeviceMemory = 10, + kVulkanObjectTypeEvent = 11, + kVulkanObjectTypeQueryPool = 12, + kVulkanObjectTypeBufferView = 13, + kVulkanObjectTypeImageView = 14, + kVulkanObjectTypeShaderModule = 15, + kVulkanObjectTypePipelineCache = 16, + kVulkanObjectTypePipelineLayout = 17, + kVulkanObjectTypePipeline = 18, + kVulkanObjectTypeRenderPass = 19, + kVulkanObjectTypeDescriptorSetLayout = 20, + kVulkanObjectTypeSampler = 21, + kVulkanObjectTypeDescriptorSet = 22, + kVulkanObjectTypeDescriptorPool = 23, + kVulkanObjectTypeFramebuffer = 24, + kVulkanObjectTypeCommandPool = 25, + kVulkanObjectTypeSamplerYcbcrConversion = 26, + kVulkanObjectTypeDescriptorUpdateTemplate = 27, + kVulkanObjectTypeSurfaceKHR = 28, + kVulkanObjectTypeSwapchainKHR = 29, + kVulkanObjectTypeDisplayKHR = 30, + kVulkanObjectTypeDisplayModeKHR = 31, + kVulkanObjectTypeDeferredOperationKHR = 32, + kVulkanObjectTypeDebugReportCallbackEXT = 33, + kVulkanObjectTypeDebugUtilsMessengerEXT = 34, + kVulkanObjectTypeValidationCacheEXT = 35, + kVulkanObjectTypeAccelerationStructureNV = 36, + kVulkanObjectTypePerformanceConfigurationINTEL = 37, + kVulkanObjectTypeIndirectCommandsLayoutNV = 38, + kVulkanObjectTypePrivateDataSlotEXT = 39, + kVulkanObjectTypeAccelerationStructureKHR = 40, + kVulkanObjectTypeMax = 41, + // Aliases for backwards compatibilty of "promoted" types + kVulkanObjectTypeDescriptorUpdateTemplateKHR = kVulkanObjectTypeDescriptorUpdateTemplate, + kVulkanObjectTypeSamplerYcbcrConversionKHR = kVulkanObjectTypeSamplerYcbcrConversion, +} VulkanObjectType; + +// Array of object name strings for OBJECT_TYPE enum conversion +static const char * const object_string[kVulkanObjectTypeMax] = { + "Unknown", + "Buffer", + "Image", + "Instance", + "PhysicalDevice", + "Device", + "Queue", + "Semaphore", + "CommandBuffer", + "Fence", + "DeviceMemory", + "Event", + "QueryPool", + "BufferView", + "ImageView", + "ShaderModule", + "PipelineCache", + "PipelineLayout", + "Pipeline", + "RenderPass", + "DescriptorSetLayout", + "Sampler", + "DescriptorSet", + "DescriptorPool", + "Framebuffer", + "CommandPool", + "SamplerYcbcrConversion", + "DescriptorUpdateTemplate", + "SurfaceKHR", + "SwapchainKHR", + "DisplayKHR", + "DisplayModeKHR", + "DeferredOperationKHR", + "DebugReportCallbackEXT", + "DebugUtilsMessengerEXT", + "ValidationCacheEXT", + "AccelerationStructureNV", + "PerformanceConfigurationINTEL", + "IndirectCommandsLayoutNV", + "PrivateDataSlotEXT", + "AccelerationStructureKHR", +}; + +// Helper array to get Vulkan VK_EXT_debug_report object type enum from the internal layers version +const VkDebugReportObjectTypeEXT get_debug_report_enum[] = { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeUnknown + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, // kVulkanObjectTypeBuffer + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, // kVulkanObjectTypeImage + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, // kVulkanObjectTypeInstance + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, // kVulkanObjectTypePhysicalDevice + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, // kVulkanObjectTypeDevice + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, // kVulkanObjectTypeQueue + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, // kVulkanObjectTypeSemaphore + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, // kVulkanObjectTypeCommandBuffer + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, // kVulkanObjectTypeFence + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, // kVulkanObjectTypeDeviceMemory + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, // kVulkanObjectTypeEvent + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, // kVulkanObjectTypeQueryPool + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, // kVulkanObjectTypeBufferView + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, // kVulkanObjectTypeImageView + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, // kVulkanObjectTypeShaderModule + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, // kVulkanObjectTypePipelineCache + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, // kVulkanObjectTypePipelineLayout + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, // kVulkanObjectTypePipeline + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, // kVulkanObjectTypeRenderPass + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, // kVulkanObjectTypeDescriptorSetLayout + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, // kVulkanObjectTypeSampler + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, // kVulkanObjectTypeDescriptorSet + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, // kVulkanObjectTypeDescriptorPool + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, // kVulkanObjectTypeFramebuffer + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, // kVulkanObjectTypeCommandPool + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, // kVulkanObjectTypeSamplerYcbcrConversion + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, // kVulkanObjectTypeDescriptorUpdateTemplate + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, // kVulkanObjectTypeSurfaceKHR + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, // kVulkanObjectTypeSwapchainKHR + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT, // kVulkanObjectTypeDisplayKHR + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT, // kVulkanObjectTypeDisplayModeKHR + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeDeferredOperationKHR + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, // kVulkanObjectTypeDebugReportCallbackEXT + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, // kVulkanObjectTypeValidationCacheEXT + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT, // kVulkanObjectTypeAccelerationStructureNV + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePerformanceConfigurationINTEL + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypeIndirectCommandsLayoutNV + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, // kVulkanObjectTypePrivateDataSlotEXT + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, // kVulkanObjectTypeAccelerationStructureKHR +}; + +// Helper array to get Official Vulkan VkObjectType enum from the internal layers version +const VkObjectType get_object_type_enum[] = { + VK_OBJECT_TYPE_UNKNOWN, // kVulkanObjectTypeUnknown + VK_OBJECT_TYPE_BUFFER, // kVulkanObjectTypeBuffer + VK_OBJECT_TYPE_IMAGE, // kVulkanObjectTypeImage + VK_OBJECT_TYPE_INSTANCE, // kVulkanObjectTypeInstance + VK_OBJECT_TYPE_PHYSICAL_DEVICE, // kVulkanObjectTypePhysicalDevice + VK_OBJECT_TYPE_DEVICE, // kVulkanObjectTypeDevice + VK_OBJECT_TYPE_QUEUE, // kVulkanObjectTypeQueue + VK_OBJECT_TYPE_SEMAPHORE, // kVulkanObjectTypeSemaphore + VK_OBJECT_TYPE_COMMAND_BUFFER, // kVulkanObjectTypeCommandBuffer + VK_OBJECT_TYPE_FENCE, // kVulkanObjectTypeFence + VK_OBJECT_TYPE_DEVICE_MEMORY, // kVulkanObjectTypeDeviceMemory + VK_OBJECT_TYPE_EVENT, // kVulkanObjectTypeEvent + VK_OBJECT_TYPE_QUERY_POOL, // kVulkanObjectTypeQueryPool + VK_OBJECT_TYPE_BUFFER_VIEW, // kVulkanObjectTypeBufferView + VK_OBJECT_TYPE_IMAGE_VIEW, // kVulkanObjectTypeImageView + VK_OBJECT_TYPE_SHADER_MODULE, // kVulkanObjectTypeShaderModule + VK_OBJECT_TYPE_PIPELINE_CACHE, // kVulkanObjectTypePipelineCache + VK_OBJECT_TYPE_PIPELINE_LAYOUT, // kVulkanObjectTypePipelineLayout + VK_OBJECT_TYPE_PIPELINE, // kVulkanObjectTypePipeline + VK_OBJECT_TYPE_RENDER_PASS, // kVulkanObjectTypeRenderPass + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, // kVulkanObjectTypeDescriptorSetLayout + VK_OBJECT_TYPE_SAMPLER, // kVulkanObjectTypeSampler + VK_OBJECT_TYPE_DESCRIPTOR_SET, // kVulkanObjectTypeDescriptorSet + VK_OBJECT_TYPE_DESCRIPTOR_POOL, // kVulkanObjectTypeDescriptorPool + VK_OBJECT_TYPE_FRAMEBUFFER, // kVulkanObjectTypeFramebuffer + VK_OBJECT_TYPE_COMMAND_POOL, // kVulkanObjectTypeCommandPool + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, // kVulkanObjectTypeSamplerYcbcrConversion + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, // kVulkanObjectTypeDescriptorUpdateTemplate + VK_OBJECT_TYPE_SURFACE_KHR, // kVulkanObjectTypeSurfaceKHR + VK_OBJECT_TYPE_SWAPCHAIN_KHR, // kVulkanObjectTypeSwapchainKHR + VK_OBJECT_TYPE_DISPLAY_KHR, // kVulkanObjectTypeDisplayKHR + VK_OBJECT_TYPE_DISPLAY_MODE_KHR, // kVulkanObjectTypeDisplayModeKHR + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR, // kVulkanObjectTypeDeferredOperationKHR + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT, // kVulkanObjectTypeDebugReportCallbackEXT + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, // kVulkanObjectTypeDebugUtilsMessengerEXT + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, // kVulkanObjectTypeValidationCacheEXT + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, // kVulkanObjectTypeAccelerationStructureNV + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL, // kVulkanObjectTypePerformanceConfigurationINTEL + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV, // kVulkanObjectTypeIndirectCommandsLayoutNV + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT, // kVulkanObjectTypePrivateDataSlotEXT + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, // kVulkanObjectTypeAccelerationStructureKHR +}; + +// Helper function to convert from VkDebugReportObjectTypeEXT to VkObjectType +static inline VkObjectType convertDebugReportObjectToCoreObject(VkDebugReportObjectTypeEXT debug_report_obj){ + if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) { + return VK_OBJECT_TYPE_UNKNOWN; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) { + return VK_OBJECT_TYPE_UNKNOWN; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT) { + return VK_OBJECT_TYPE_INSTANCE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT) { + return VK_OBJECT_TYPE_PHYSICAL_DEVICE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT) { + return VK_OBJECT_TYPE_DEVICE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT) { + return VK_OBJECT_TYPE_QUEUE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT) { + return VK_OBJECT_TYPE_SEMAPHORE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT) { + return VK_OBJECT_TYPE_COMMAND_BUFFER; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT) { + return VK_OBJECT_TYPE_FENCE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT) { + return VK_OBJECT_TYPE_DEVICE_MEMORY; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) { + return VK_OBJECT_TYPE_BUFFER; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT) { + return VK_OBJECT_TYPE_IMAGE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT) { + return VK_OBJECT_TYPE_EVENT; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT) { + return VK_OBJECT_TYPE_QUERY_POOL; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT) { + return VK_OBJECT_TYPE_BUFFER_VIEW; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT) { + return VK_OBJECT_TYPE_IMAGE_VIEW; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT) { + return VK_OBJECT_TYPE_SHADER_MODULE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT) { + return VK_OBJECT_TYPE_PIPELINE_CACHE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT) { + return VK_OBJECT_TYPE_PIPELINE_LAYOUT; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT) { + return VK_OBJECT_TYPE_RENDER_PASS; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT) { + return VK_OBJECT_TYPE_PIPELINE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT) { + return VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT) { + return VK_OBJECT_TYPE_SAMPLER; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT) { + return VK_OBJECT_TYPE_DESCRIPTOR_POOL; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) { + return VK_OBJECT_TYPE_DESCRIPTOR_SET; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT) { + return VK_OBJECT_TYPE_FRAMEBUFFER; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT) { + return VK_OBJECT_TYPE_COMMAND_POOL; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT) { + return VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT) { + return VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT) { + return VK_OBJECT_TYPE_SURFACE_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT) { + return VK_OBJECT_TYPE_SWAPCHAIN_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT) { + return VK_OBJECT_TYPE_DISPLAY_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT) { + return VK_OBJECT_TYPE_DISPLAY_MODE_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT) { + return VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT) { + return VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT) { + return VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT) { + return VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT) { + return VK_OBJECT_TYPE_VALIDATION_CACHE_EXT; + } else if (debug_report_obj == VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT) { + return VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV; + } + return VK_OBJECT_TYPE_UNKNOWN; +} + +// Helper function to convert from VkDebugReportObjectTypeEXT to VkObjectType +static inline VkDebugReportObjectTypeEXT convertCoreObjectToDebugReportObject(VkObjectType core_report_obj){ + if (core_report_obj == VK_OBJECT_TYPE_UNKNOWN) { + return VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_UNKNOWN) { + return VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_INSTANCE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_PHYSICAL_DEVICE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DEVICE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_QUEUE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SEMAPHORE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_COMMAND_BUFFER) { + return VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_FENCE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DEVICE_MEMORY) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_BUFFER) { + return VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_IMAGE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_EVENT) { + return VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_QUERY_POOL) { + return VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_BUFFER_VIEW) { + return VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_IMAGE_VIEW) { + return VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SHADER_MODULE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_PIPELINE_CACHE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_PIPELINE_LAYOUT) { + return VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_RENDER_PASS) { + return VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_PIPELINE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SAMPLER) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DESCRIPTOR_POOL) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DESCRIPTOR_SET) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_FRAMEBUFFER) { + return VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_COMMAND_POOL) { + return VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SURFACE_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SWAPCHAIN_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DISPLAY_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DISPLAY_MODE_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR) { + return VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_VALIDATION_CACHE_EXT) { + return VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT; + } else if (core_report_obj == VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV) { + return VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT; + } + return VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT; +} diff --git a/src/third_party/vulkan/loader/wsi.c b/src/third_party/vulkan/loader/wsi.c new file mode 100644 index 0000000..d8b1413 --- /dev/null +++ b/src/third_party/vulkan/loader/wsi.c @@ -0,0 +1,2526 @@ +/* + * Copyright (c) 2015-2016, 2019 The Khronos Group Inc. + * Copyright (c) 2015-2016, 2019 Valve Corporation + * Copyright (c) 2015-2016, 2019 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Ian Elliott + * Author: Jon Ashburn + * Author: Ian Elliott + * Author: Mark Lobodzinski + * Author: Lenny Komow + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include "vk_loader_platform.h" +#include "loader.h" +#include "wsi.h" +#include "../vk_icd.h" + +// The first ICD/Loader interface that support querying the SurfaceKHR from +// the ICDs. +#define ICD_VER_SUPPORTS_ICD_SURFACE_KHR 3 + +void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo) { + ptr_instance->wsi_surface_enabled = false; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + ptr_instance->wsi_win32_surface_enabled = false; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + ptr_instance->wsi_wayland_surface_enabled = false; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + ptr_instance->wsi_xcb_surface_enabled = false; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + ptr_instance->wsi_xlib_surface_enabled = false; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + ptr_instance->wsi_directfb_surface_enabled = false; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_ANDROID_KHR + ptr_instance->wsi_android_surface_enabled = false; +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_MACOS_MVK + ptr_instance->wsi_macos_surface_enabled = false; +#endif // VK_USE_PLATFORM_MACOS_MVK +#ifdef VK_USE_PLATFORM_IOS_MVK + ptr_instance->wsi_ios_surface_enabled = false; +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + ptr_instance->wsi_ggp_surface_enabled = false; +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + ptr_instance->wsi_imagepipe_surface_enabled = false; +#endif // VK_USE_PLATFORM_FUCHSIA + ptr_instance->wsi_display_enabled = false; + ptr_instance->wsi_display_props2_enabled = false; +#ifdef VK_USE_PLATFORM_METAL_EXT + ptr_instance->wsi_metal_surface_enabled = false; +#endif // VK_USE_PLATFORM_METAL_EXT + + for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_surface_enabled = true; + continue; + } +#ifdef VK_USE_PLATFORM_WIN32_KHR + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_win32_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_wayland_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_xcb_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_xlib_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_DIRECTFB_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_directfb_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_ANDROID_KHR + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_android_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_ANDROID_KHR +#ifdef VK_USE_PLATFORM_MACOS_MVK + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_MVK_MACOS_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_macos_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_MACOS_MVK +#ifdef VK_USE_PLATFORM_IOS_MVK + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_MVK_IOS_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_ios_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_ggp_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_imagepipe_surface_enabled = true; + continue; + } +#endif // VK_USE_PLATFORM_FUCHSIA + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_headless_surface_enabled = true; + continue; + } +#if defined(VK_USE_PLATFORM_METAL_EXT) + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_EXT_METAL_SURFACE_EXTENSION_NAME) == 0) { + ptr_instance->wsi_metal_surface_enabled = true; + continue; + } +#endif + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME) == 0) { + ptr_instance->wsi_display_enabled = true; + continue; + } + if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME) == 0) { + ptr_instance->wsi_display_props2_enabled = true; + continue; + } + } +} + +// Linux WSI surface extensions are not always compiled into the loader. (Assume +// for Windows the KHR_win32_surface is always compiled into loader). A given +// Linux build environment might not have the headers required for building one +// of the three extensions (Xlib, Xcb, Wayland). Thus, need to check if +// the built loader actually supports the particular Linux surface extension. +// If not supported by the built loader it will not be included in the list of +// enumerated instance extensions. This solves the issue where an ICD or layer +// advertises support for a given Linux surface extension but the loader was not +// built to support the extension. +bool wsi_unsupported_instance_extension(const VkExtensionProperties *ext_prop) { +#ifndef VK_USE_PLATFORM_WAYLAND_KHR + if (!strcmp(ext_prop->extensionName, "VK_KHR_wayland_surface")) return true; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifndef VK_USE_PLATFORM_XCB_KHR + if (!strcmp(ext_prop->extensionName, "VK_KHR_xcb_surface")) return true; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifndef VK_USE_PLATFORM_XLIB_KHR + if (!strcmp(ext_prop->extensionName, "VK_KHR_xlib_surface")) return true; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifndef VK_USE_PLATFORM_DIRECTFB_EXT + if (!strcmp(ext_prop->extensionName, "VK_EXT_directfb_surface")) return true; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + + return false; +} + +// Functions for the VK_KHR_surface extension: + +// This is the trampoline entrypoint for DestroySurfaceKHR +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, + const VkAllocationCallbacks *pAllocator) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + disp->DestroySurfaceKHR(instance, surface, pAllocator); +} + +// TODO probably need to lock around all the loader_get_instance() calls. + +// This is the instance chain terminator function for DestroySurfaceKHR +VKAPI_ATTR void VKAPI_CALL terminator_DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, + const VkAllocationCallbacks *pAllocator) { + struct loader_instance *ptr_instance = loader_get_instance(instance); + + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface) { + if (NULL != icd_surface->real_icd_surfaces) { + uint32_t i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.DestroySurfaceKHR && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[i]) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, icd_surface->real_icd_surfaces[i], pAllocator); + icd_surface->real_icd_surfaces[i] = (VkSurfaceKHR)NULL; + } + } else { + // The real_icd_surface for any ICD not supporting the + // proper interface version should be NULL. If not, then + // we have a problem. + assert((VkSurfaceKHR)NULL == icd_surface->real_icd_surfaces[i]); + } + } + loader_instance_heap_free(ptr_instance, icd_surface->real_icd_surfaces); + } + + loader_instance_heap_free(ptr_instance, (void *)(uintptr_t)surface); + } +} + +// This is the trampoline entrypoint for GetPhysicalDeviceSurfaceSupportKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, VkSurfaceKHR surface, + VkBool32 *pSupported) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceSurfaceSupportKHR(unwrapped_phys_dev, queueFamilyIndex, surface, pSupported); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceSurfaceSupportKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, VkSurfaceKHR surface, + VkBool32 *pSupported) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfaceSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == pSupported) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "NULL pointer passed into vkGetPhysicalDeviceSurfaceSupportKHR for pSupported!\n"); + assert(false && "GetPhysicalDeviceSurfaceSupportKHR: Error, null pSupported"); + } + *pSupported = false; + + if (NULL == icd_term->dispatch.GetPhysicalDeviceSurfaceSupportKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceSurfaceSupportKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceSurfaceSupportKHR ICD pointer"); + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[phys_dev_term->icd_index]) { + return icd_term->dispatch.GetPhysicalDeviceSurfaceSupportKHR( + phys_dev_term->phys_dev, queueFamilyIndex, icd_surface->real_icd_surfaces[phys_dev_term->icd_index], pSupported); + } + + return icd_term->dispatch.GetPhysicalDeviceSurfaceSupportKHR(phys_dev_term->phys_dev, queueFamilyIndex, surface, pSupported); +} + +// This is the trampoline entrypoint for GetPhysicalDeviceSurfaceCapabilitiesKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceSurfaceCapabilitiesKHR(unwrapped_phys_dev, surface, pSurfaceCapabilities); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceSurfaceCapabilitiesKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfaceCapabilitiesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == pSurfaceCapabilities) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "NULL pointer passed into vkGetPhysicalDeviceSurfaceCapabilitiesKHR for pSurfaceCapabilities!\n"); + assert(false && "GetPhysicalDeviceSurfaceCapabilitiesKHR: Error, null pSurfaceCapabilities"); + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilitiesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceSurfaceCapabilitiesKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceSurfaceCapabilitiesKHR ICD pointer"); + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[phys_dev_term->icd_index]) { + return icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilitiesKHR( + phys_dev_term->phys_dev, icd_surface->real_icd_surfaces[phys_dev_term->icd_index], pSurfaceCapabilities); + } + + return icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev_term->phys_dev, surface, pSurfaceCapabilities); +} + +// This is the trampoline entrypoint for GetPhysicalDeviceSurfaceFormatsKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t *pSurfaceFormatCount, + VkSurfaceFormatKHR *pSurfaceFormats) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceSurfaceFormatsKHR(unwrapped_phys_dev, surface, pSurfaceFormatCount, pSurfaceFormats); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceSurfaceFormatsKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, + uint32_t *pSurfaceFormatCount, + VkSurfaceFormatKHR *pSurfaceFormats) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfaceFormatsKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == pSurfaceFormatCount) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "NULL pointer passed into vkGetPhysicalDeviceSurfaceFormatsKHR for pSurfaceFormatCount!\n"); + assert(false && "GetPhysicalDeviceSurfaceFormatsKHR(: Error, null pSurfaceFormatCount"); + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceSurfaceFormatsKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceSurfaceCapabilitiesKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceSurfaceFormatsKHR ICD pointer"); + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[phys_dev_term->icd_index]) { + return icd_term->dispatch.GetPhysicalDeviceSurfaceFormatsKHR(phys_dev_term->phys_dev, + icd_surface->real_icd_surfaces[phys_dev_term->icd_index], + pSurfaceFormatCount, pSurfaceFormats); + } + + return icd_term->dispatch.GetPhysicalDeviceSurfaceFormatsKHR(phys_dev_term->phys_dev, surface, pSurfaceFormatCount, + pSurfaceFormats); +} + +// This is the trampoline entrypoint for GetPhysicalDeviceSurfacePresentModesKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t *pPresentModeCount, + VkPresentModeKHR *pPresentModes) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceSurfacePresentModesKHR(unwrapped_phys_dev, surface, pPresentModeCount, pPresentModes); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceSurfacePresentModesKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, uint32_t *pPresentModeCount, + VkPresentModeKHR *pPresentModes) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfacePresentModesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == pPresentModeCount) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "NULL pointer passed into vkGetPhysicalDeviceSurfacePresentModesKHR for pPresentModeCount!\n"); + assert(false && "GetPhysicalDeviceSurfacePresentModesKHR(: Error, null pPresentModeCount"); + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceSurfacePresentModesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceSurfacePresentModesKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceSurfacePresentModesKHR ICD pointer"); + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[phys_dev_term->icd_index]) { + return icd_term->dispatch.GetPhysicalDeviceSurfacePresentModesKHR( + phys_dev_term->phys_dev, icd_surface->real_icd_surfaces[phys_dev_term->icd_index], pPresentModeCount, pPresentModes); + } + + return icd_term->dispatch.GetPhysicalDeviceSurfacePresentModesKHR(phys_dev_term->phys_dev, surface, pPresentModeCount, + pPresentModes); +} + +// Functions for the VK_KHR_swapchain extension: + +// This is the trampoline entrypoint for CreateSwapchainKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSwapchainKHR *pSwapchain) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(device); + return disp->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSwapchainKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pCreateInfo->surface; + if (NULL != icd_surface->real_icd_surfaces) { + if ((VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[icd_index]) { + // We found the ICD, and there is an ICD KHR surface + // associated with it, so copy the CreateInfo struct + // and point it at the ICD's surface. + VkSwapchainCreateInfoKHR *pCreateCopy = loader_stack_alloc(sizeof(VkSwapchainCreateInfoKHR)); + if (NULL == pCreateCopy) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(pCreateCopy, pCreateInfo, sizeof(VkSwapchainCreateInfoKHR)); + pCreateCopy->surface = icd_surface->real_icd_surfaces[icd_index]; + return icd_term->dispatch.CreateSwapchainKHR(device, pCreateCopy, pAllocator, pSwapchain); + } + } + return icd_term->dispatch.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain); + } + return VK_SUCCESS; +} + +// This is the trampoline entrypoint for DestroySwapchainKHR +LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, + const VkAllocationCallbacks *pAllocator) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(device); + disp->DestroySwapchainKHR(device, swapchain, pAllocator); +} + +// This is the trampoline entrypoint for GetSwapchainImagesKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, + uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(device); + return disp->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); +} + +// This is the trampoline entrypoint for AcquireNextImageKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, + VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(device); + return disp->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex); +} + +// This is the trampoline entrypoint for QueuePresentKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(queue); + return disp->QueuePresentKHR(queue, pPresentInfo); +} + +static VkIcdSurface *AllocateIcdSurfaceStruct(struct loader_instance *instance, size_t base_size, size_t platform_size) { + // Next, if so, proceed with the implementation of this function: + VkIcdSurface *pIcdSurface = loader_instance_heap_alloc(instance, sizeof(VkIcdSurface), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (pIcdSurface != NULL) { + // Setup the new sizes and offsets so we can grow the structures in the + // future without having problems + pIcdSurface->base_size = (uint32_t)base_size; + pIcdSurface->platform_size = (uint32_t)platform_size; + pIcdSurface->non_platform_offset = (uint32_t)((uint8_t *)(&pIcdSurface->base_size) - (uint8_t *)pIcdSurface); + pIcdSurface->entire_size = sizeof(VkIcdSurface); + + pIcdSurface->real_icd_surfaces = loader_instance_heap_alloc(instance, sizeof(VkSurfaceKHR) * instance->total_icd_count, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (pIcdSurface->real_icd_surfaces == NULL) { + loader_instance_heap_free(instance, pIcdSurface); + pIcdSurface = NULL; + } else { + memset(pIcdSurface->real_icd_surfaces, 0, sizeof(VkSurfaceKHR) * instance->total_icd_count); + } + } + return pIcdSurface; +} + +#ifdef VK_USE_PLATFORM_WIN32_KHR + +// Functions for the VK_KHR_win32_surface extension: + +// This is the trampoline entrypoint for CreateWin32SurfaceKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(VkInstance instance, + const VkWin32SurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateWin32SurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // Initialize pSurface to NULL just to be safe. + *pSurface = VK_NULL_HANDLE; + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_win32_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_win32_surface extension not enabled. vkCreateWin32SurfaceKHR not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->win_surf.base), sizeof(pIcdSurface->win_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->win_surf.base.platform = VK_ICD_WSI_PLATFORM_WIN32; + pIcdSurface->win_surf.hinstance = pCreateInfo->hinstance; + pIcdSurface->win_surf.hwnd = pCreateInfo->hwnd; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateWin32SurfaceKHR) { + vkRes = icd_term->dispatch.CreateWin32SurfaceKHR(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)(pIcdSurface); + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +// This is the trampoline entrypoint for +// GetPhysicalDeviceWin32PresentationSupportKHR +LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkBool32 res = disp->GetPhysicalDeviceWin32PresentationSupportKHR(unwrapped_phys_dev, queueFamilyIndex); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceWin32PresentationSupportKHR +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_win32_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_win32_surface extension not enabled. vkGetPhysicalDeviceWin32PresentationSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceWin32PresentationSupportKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceWin32PresentationSupportKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceWin32PresentationSupportKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceWin32PresentationSupportKHR(phys_dev_term->phys_dev, queueFamilyIndex); +} +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + +// This is the trampoline entrypoint for CreateWaylandSurfaceKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateWaylandSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateWaylandSurfaceKHR(VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_wayland_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_wayland_surface extension not enabled. vkCreateWaylandSurfaceKHR not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->wayland_surf.base), sizeof(pIcdSurface->wayland_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->wayland_surf.base.platform = VK_ICD_WSI_PLATFORM_WAYLAND; + pIcdSurface->wayland_surf.display = pCreateInfo->display; + pIcdSurface->wayland_surf.surface = pCreateInfo->surface; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateWaylandSurfaceKHR) { + vkRes = icd_term->dispatch.CreateWaylandSurfaceKHR(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +// This is the trampoline entrypoint for +// GetPhysicalDeviceWaylandPresentationSupportKHR +LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display *display) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkBool32 res = disp->GetPhysicalDeviceWaylandPresentationSupportKHR(unwrapped_phys_dev, queueFamilyIndex, display); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceWaylandPresentationSupportKHR +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display *display) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_wayland_surface_enabled) { + loader_log( + ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_wayland_surface extension not enabled. vkGetPhysicalDeviceWaylandPresentationSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceWaylandPresentationSupportKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceWaylandPresentationSupportKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceWaylandPresentationSupportKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceWaylandPresentationSupportKHR(phys_dev_term->phys_dev, queueFamilyIndex, display); +} +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR + +// Functions for the VK_KHR_xcb_surface extension: + +// This is the trampoline entrypoint for CreateXcbSurfaceKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(VkInstance instance, + const VkXcbSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateXcbSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_xcb_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_xcb_surface extension not enabled. vkCreateXcbSurfaceKHR not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->xcb_surf.base), sizeof(pIcdSurface->xcb_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->xcb_surf.base.platform = VK_ICD_WSI_PLATFORM_XCB; + pIcdSurface->xcb_surf.connection = pCreateInfo->connection; + pIcdSurface->xcb_surf.window = pCreateInfo->window; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateXcbSurfaceKHR) { + vkRes = icd_term->dispatch.CreateXcbSurfaceKHR(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +// This is the trampoline entrypoint for +// GetPhysicalDeviceXcbPresentationSupportKHR +LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t *connection, + xcb_visualid_t visual_id) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkBool32 res = disp->GetPhysicalDeviceXcbPresentationSupportKHR(unwrapped_phys_dev, queueFamilyIndex, connection, visual_id); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceXcbPresentationSupportKHR +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t *connection, + xcb_visualid_t visual_id) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_xcb_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_xcb_surface extension not enabled. vkGetPhysicalDeviceXcbPresentationSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceXcbPresentationSupportKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceXcbPresentationSupportKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceXcbPresentationSupportKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceXcbPresentationSupportKHR(phys_dev_term->phys_dev, queueFamilyIndex, connection, + visual_id); +} +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR + +// Functions for the VK_KHR_xlib_surface extension: + +// This is the trampoline entrypoint for CreateXlibSurfaceKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(VkInstance instance, + const VkXlibSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateXlibSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_xlib_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_xlib_surface extension not enabled. vkCreateXlibSurfaceKHR not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->xlib_surf.base), sizeof(pIcdSurface->xlib_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->xlib_surf.base.platform = VK_ICD_WSI_PLATFORM_XLIB; + pIcdSurface->xlib_surf.dpy = pCreateInfo->dpy; + pIcdSurface->xlib_surf.window = pCreateInfo->window; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateXlibSurfaceKHR) { + vkRes = icd_term->dispatch.CreateXlibSurfaceKHR(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +// This is the trampoline entrypoint for +// GetPhysicalDeviceXlibPresentationSupportKHR +LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, Display *dpy, + VisualID visualID) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkBool32 res = disp->GetPhysicalDeviceXlibPresentationSupportKHR(unwrapped_phys_dev, queueFamilyIndex, dpy, visualID); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceXlibPresentationSupportKHR +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, Display *dpy, + VisualID visualID) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_xlib_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_xlib_surface extension not enabled. vkGetPhysicalDeviceXlibPresentationSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceXlibPresentationSupportKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceXlibPresentationSupportKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceXlibPresentationSupportKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceXlibPresentationSupportKHR(phys_dev_term->phys_dev, queueFamilyIndex, dpy, visualID); +} +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + +// Functions for the VK_EXT_directfb_surface extension: + +// This is the trampoline entrypoint for CreateDirectFBSurfaceEXT +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDirectFBSurfaceEXT(VkInstance instance, + const VkDirectFBSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateDirectFBSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateDirectFBSurfaceEXT +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDirectFBSurfaceEXT(VkInstance instance, + const VkDirectFBSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_directfb_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_EXT_directfb_surface extension not enabled. vkCreateDirectFBSurfaceEXT not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = + AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->directfb_surf.base), sizeof(pIcdSurface->directfb_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->directfb_surf.base.platform = VK_ICD_WSI_PLATFORM_DIRECTFB; + pIcdSurface->directfb_surf.dfb = pCreateInfo->dfb; + pIcdSurface->directfb_surf.surface = pCreateInfo->surface; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateDirectFBSurfaceEXT) { + vkRes = icd_term->dispatch.CreateDirectFBSurfaceEXT(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +// This is the trampoline entrypoint for +// GetPhysicalDeviceDirectFBPresentationSupportEXT +LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceDirectFBPresentationSupportEXT(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + IDirectFB *dfb) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkBool32 res = disp->GetPhysicalDeviceDirectFBPresentationSupportEXT(unwrapped_phys_dev, queueFamilyIndex, dfb); + return res; +} + +// This is the instance chain terminator function for +// GetPhysicalDeviceDirectFBPresentationSupportEXT +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceDirectFBPresentationSupportEXT(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + IDirectFB *dfb) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_directfb_surface_enabled) { + loader_log( + ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_EXT_directfb_surface extension not enabled. vkGetPhysicalDeviceWaylandPresentationSupportKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceDirectFBPresentationSupportEXT) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceDirectFBPresentationSupportEXT!\n"); + assert(false && "loader: null GetPhysicalDeviceDirectFBPresentationSupportEXT ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceDirectFBPresentationSupportEXT(phys_dev_term->phys_dev, queueFamilyIndex, dfb); +} +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + +// Functions for the VK_KHR_android_surface extension: + +// This is the trampoline entrypoint for CreateAndroidSurfaceKHR +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(VkInstance instance, ANativeWindow *window, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateAndroidSurfaceKHR(instance, window, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateAndroidSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateAndroidSurfaceKHR(VkInstance instance, Window window, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkCreateAndroidSurfaceKHR not executed!\n"); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + // Next, if so, proceed with the implementation of this function: + VkIcdSurfaceAndroid *pIcdSurface = + loader_instance_heap_alloc(ptr_instance, sizeof(VkIcdSurfaceAndroid), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (pIcdSurface == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + pIcdSurface->base.platform = VK_ICD_WSI_PLATFORM_ANDROID; + pIcdSurface->window = window; + + *pSurface = (VkSurfaceKHR)pIcdSurface; + + return VK_SUCCESS; +} + +#endif // VK_USE_PLATFORM_ANDROID_KHR + +// Functions for the VK_EXT_headless_surface extension: + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateHeadlessSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateHeadlessSurfaceEXT(VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + struct loader_instance *inst = loader_get_instance(instance); + VkIcdSurface *pIcdSurface = NULL; + VkResult vkRes = VK_SUCCESS; + uint32_t i = 0; + + if (!inst->wsi_headless_surface_enabled) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_EXT_headless_surface extension not enabled. " + "vkCreateHeadlessSurfaceEXT not executed!\n"); + return VK_SUCCESS; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(inst, sizeof(pIcdSurface->headless_surf.base), sizeof(pIcdSurface->headless_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->headless_surf.base.platform = VK_ICD_WSI_PLATFORM_HEADLESS; + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateHeadlessSurfaceEXT) { + vkRes = icd_term->dispatch.CreateHeadlessSurfaceEXT(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(inst, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(inst, pIcdSurface); + } + + return vkRes; +} + +#ifdef VK_USE_PLATFORM_MACOS_MVK + +// Functions for the VK_MVK_macos_surface extension: + +// This is the trampoline entrypoint for CreateMacOSSurfaceMVK +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateMacOSSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_macos_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_MVK_macos_surface extension not enabled. vkCreateMacOSSurfaceMVK not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->macos_surf.base), sizeof(pIcdSurface->macos_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->macos_surf.base.platform = VK_ICD_WSI_PLATFORM_MACOS; + pIcdSurface->macos_surf.pView = pCreateInfo->pView; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateMacOSSurfaceMVK) { + vkRes = icd_term->dispatch.CreateMacOSSurfaceMVK(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} + +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK + +// Functions for the VK_MVK_ios_surface extension: + +// This is the trampoline entrypoint for CreateIOSSurfaceMVK +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(VkInstance instance, + const VkIOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateIOSSurfaceKHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_ios_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_MVK_ios_surface extension not enabled. vkCreateIOSSurfaceMVK not executed!\n"); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + // Next, if so, proceed with the implementation of this function: + VkIcdSurfaceIOS *pIcdSurface = + loader_instance_heap_alloc(ptr_instance, sizeof(VkIcdSurfaceIOS), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (pIcdSurface == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + pIcdSurface->base.platform = VK_ICD_WSI_PLATFORM_IOS; + pIcdSurface->pView = pCreateInfo->pView; + + *pSurface = (VkSurfaceKHR)pIcdSurface; + + return VK_SUCCESS; +} + +#endif // VK_USE_PLATFORM_IOS_MVK + +#ifdef VK_USE_PLATFORM_GGP + +// Functions for the VK_GGP_stream_descriptor_surface extension: + +// This is the trampoline entrypoint for CreateStreamDescriptorSurfaceGGP +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL +vkCreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateStreamDescriptorSurfaceGGP +VKAPI_ATTR VkResult VKAPI_CALL +terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_ggp_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_GGP_stream_descriptor_surface extension not enabled. vkCreateStreamDescriptorSurfaceGGP not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->ggp_surf.base), sizeof(pIcdSurface->ggp_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->ggp_surf.base.platform = VK_ICD_WSI_PLATFORM_GGP; + pIcdSurface->ggp_surf.streamDescriptor = pCreateInfo->streamDescriptor; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateStreamDescriptorSurfaceGGP) { + vkRes = icd_term->dispatch.CreateStreamDescriptorSurfaceGGP(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + return vkRes; +} + +#endif // VK_USE_PLATFORM_GGP + +#if defined(VK_USE_PLATFORM_METAL_EXT) + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(VkInstance instance, + const VkMetalSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp = loader_get_instance_layer_dispatch(instance); + return disp->CreateMetalSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) { + VkResult result = VK_SUCCESS; + VkIcdSurface *icd_surface = NULL; + uint32_t i; + + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_metal_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_EXT_metal_surface extension not enabled. vkCreateMetalSurfaceEXT will not be executed.\n"); + } + + // Next, if so, proceed with the implementation of this function: + icd_surface = AllocateIcdSurfaceStruct(ptr_instance, sizeof(icd_surface->metal_surf.base), sizeof(icd_surface->metal_surf)); + if (icd_surface == NULL) { + result = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + icd_surface->metal_surf.base.platform = VK_ICD_WSI_PLATFORM_METAL; + icd_surface->metal_surf.pLayer = pCreateInfo->pLayer; + + // Loop through each ICD and determine if they need to create a surface + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, ++i) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (icd_term->dispatch.CreateMetalSurfaceEXT != NULL) { + result = icd_term->dispatch.CreateMetalSurfaceEXT(icd_term->instance, pCreateInfo, pAllocator, + &icd_surface->real_icd_surfaces[i]); + if (result != VK_SUCCESS) { + goto out; + } + } + } + } + *pSurface = (VkSurfaceKHR)icd_surface; + +out: + if (result != VK_SUCCESS && icd_surface != NULL) { + if (icd_surface->real_icd_surfaces != NULL) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, ++i) { + if (icd_surface->real_icd_surfaces[i] == VK_NULL_HANDLE && icd_term->dispatch.DestroySurfaceKHR != NULL) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, icd_surface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, icd_surface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, icd_surface); + } + return result; +} + +#endif + +// Functions for the VK_KHR_display instance extension: +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPropertiesKHR *pProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceDisplayPropertiesKHR(unwrapped_phys_dev, pPropertyCount, pProperties); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPropertiesKHR *pProperties) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkGetPhysicalDeviceDisplayPropertiesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceDisplayPropertiesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceDisplayPropertiesKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceDisplayPropertiesKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceDisplayPropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, pProperties); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlanePropertiesKHR *pProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetPhysicalDeviceDisplayPlanePropertiesKHR(unwrapped_phys_dev, pPropertyCount, pProperties); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPlanePropertiesKHR *pProperties) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkGetPhysicalDeviceDisplayPlanePropertiesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetPhysicalDeviceDisplayPlanePropertiesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetPhysicalDeviceDisplayPlanePropertiesKHR!\n"); + assert(false && "loader: null GetPhysicalDeviceDisplayPlanePropertiesKHR ICD pointer"); + } + + return icd_term->dispatch.GetPhysicalDeviceDisplayPlanePropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, pProperties); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, + uint32_t planeIndex, uint32_t *pDisplayCount, + VkDisplayKHR *pDisplays) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetDisplayPlaneSupportedDisplaysKHR(unwrapped_phys_dev, planeIndex, pDisplayCount, pDisplays); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, + uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkGetDisplayPlaneSupportedDisplaysKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetDisplayPlaneSupportedDisplaysKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetDisplayPlaneSupportedDisplaysKHR!\n"); + assert(false && "loader: null GetDisplayPlaneSupportedDisplaysKHR ICD pointer"); + } + + return icd_term->dispatch.GetDisplayPlaneSupportedDisplaysKHR(phys_dev_term->phys_dev, planeIndex, pDisplayCount, pDisplays); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModePropertiesKHR *pProperties) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetDisplayModePropertiesKHR(unwrapped_phys_dev, display, pPropertyCount, pProperties); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModePropertiesKHR *pProperties) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkGetDisplayModePropertiesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetDisplayModePropertiesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetDisplayModePropertiesKHR!\n"); + assert(false && "loader: null GetDisplayModePropertiesKHR ICD pointer"); + } + + return icd_term->dispatch.GetDisplayModePropertiesKHR(phys_dev_term->phys_dev, display, pPropertyCount, pProperties); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkDisplayModeKHR *pMode) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->CreateDisplayModeKHR(unwrapped_phys_dev, display, pCreateInfo, pAllocator, pMode); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDisplayModeKHR *pMode) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkCreateDisplayModeKHR not executed!\n"); + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + + if (NULL == icd_term->dispatch.CreateDisplayModeKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkCreateDisplayModeKHR!\n"); + assert(false && "loader: null CreateDisplayModeKHR ICD pointer"); + } + + return icd_term->dispatch.CreateDisplayModeKHR(phys_dev_term->phys_dev, display, pCreateInfo, pAllocator, pMode); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR *pCapabilities) { + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(physicalDevice); + VkResult res = disp->GetDisplayPlaneCapabilitiesKHR(unwrapped_phys_dev, mode, planeIndex, pCapabilities); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR *pCapabilities) { + // First, check to ensure the appropriate extension was enabled: + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + if (!ptr_instance->wsi_display_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_display extension not enabled. vkGetDisplayPlaneCapabilitiesKHR not executed!\n"); + return VK_SUCCESS; + } + + if (NULL == icd_term->dispatch.GetDisplayPlaneCapabilitiesKHR) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD for selected physical device is not exporting vkGetDisplayPlaneCapabilitiesKHR!\n"); + assert(false && "loader: null GetDisplayPlaneCapabilitiesKHR ICD pointer"); + } + + return icd_term->dispatch.GetDisplayPlaneCapabilitiesKHR(phys_dev_term->phys_dev, mode, planeIndex, pCapabilities); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR(VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDisplayPlaneSurfaceKHR(VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + struct loader_instance *inst = loader_get_instance(instance); + VkIcdSurface *pIcdSurface = NULL; + VkResult vkRes = VK_SUCCESS; + uint32_t i = 0; + + if (!inst->wsi_display_enabled) { + loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkCreateDisplayPlaneSurfaceKHR not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = AllocateIcdSurfaceStruct(inst, sizeof(pIcdSurface->display_surf.base), sizeof(pIcdSurface->display_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->display_surf.base.platform = VK_ICD_WSI_PLATFORM_DISPLAY; + pIcdSurface->display_surf.displayMode = pCreateInfo->displayMode; + pIcdSurface->display_surf.planeIndex = pCreateInfo->planeIndex; + pIcdSurface->display_surf.planeStackIndex = pCreateInfo->planeStackIndex; + pIcdSurface->display_surf.transform = pCreateInfo->transform; + pIcdSurface->display_surf.globalAlpha = pCreateInfo->globalAlpha; + pIcdSurface->display_surf.alphaMode = pCreateInfo->alphaMode; + pIcdSurface->display_surf.imageExtent = pCreateInfo->imageExtent; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateDisplayPlaneSurfaceKHR) { + vkRes = icd_term->dispatch.CreateDisplayPlaneSurfaceKHR(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)pIcdSurface; + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(inst, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(inst, pIcdSurface); + } + + return vkRes; +} + +// EXT_display_swapchain Extension command + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR *pCreateInfos, + const VkAllocationCallbacks *pAllocator, + VkSwapchainKHR *pSwapchains) { + const VkLayerDispatchTable *disp; + disp = loader_get_dispatch(device); + return disp->CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR *pCreateInfos, + const VkAllocationCallbacks *pAllocator, + VkSwapchainKHR *pSwapchains) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.CreateSharedSwapchainsKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)pCreateInfos->surface; + if (NULL != icd_surface->real_icd_surfaces) { + if ((VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[icd_index]) { + // We found the ICD, and there is an ICD KHR surface + // associated with it, so copy the CreateInfo struct + // and point it at the ICD's surface. + VkSwapchainCreateInfoKHR *pCreateCopy = loader_stack_alloc(sizeof(VkSwapchainCreateInfoKHR) * swapchainCount); + if (NULL == pCreateCopy) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + memcpy(pCreateCopy, pCreateInfos, sizeof(VkSwapchainCreateInfoKHR) * swapchainCount); + for (uint32_t sc = 0; sc < swapchainCount; sc++) { + pCreateCopy[sc].surface = icd_surface->real_icd_surfaces[icd_index]; + } + return icd_term->dispatch.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateCopy, pAllocator, pSwapchains); + } + } + return icd_term->dispatch.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains); + } + return VK_SUCCESS; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->GetDeviceGroupSurfacePresentModesKHR(device, surface, pModes); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes) { + uint32_t icd_index = 0; + struct loader_device *dev; + struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index); + if (NULL != icd_term && NULL != icd_term->dispatch.GetDeviceGroupSurfacePresentModesKHR) { + VkIcdSurface *icd_surface = (VkIcdSurface *)(uintptr_t)surface; + if (NULL != icd_surface->real_icd_surfaces && (VkSurfaceKHR)NULL != icd_surface->real_icd_surfaces[icd_index]) { + return icd_term->dispatch.GetDeviceGroupSurfacePresentModesKHR(device, icd_surface->real_icd_surfaces[icd_index], pModes); + } + return icd_term->dispatch.GetDeviceGroupSurfacePresentModesKHR(device, surface, pModes); + } + return VK_SUCCESS; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDevicePresentRectanglesKHR(unwrapped_phys_dev, surface, pRectCount, pRects); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + if (NULL == icd_term->dispatch.GetPhysicalDevicePresentRectanglesKHR) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "ICD associated with VkPhysicalDevice does not support GetPhysicalDevicePresentRectanglesKHX"); + } + VkIcdSurface *icd_surface = (VkIcdSurface *)(surface); + uint8_t icd_index = phys_dev_term->icd_index; + if (NULL != icd_surface->real_icd_surfaces && NULL != (void *)icd_surface->real_icd_surfaces[icd_index]) { + return icd_term->dispatch.GetPhysicalDevicePresentRectanglesKHR(phys_dev_term->phys_dev, icd_surface->real_icd_surfaces[icd_index], pRectCount, pRects); + } + return icd_term->dispatch.GetPhysicalDevicePresentRectanglesKHR(phys_dev_term->phys_dev, surface, pRectCount, pRects); +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex) { + const VkLayerDispatchTable *disp = loader_get_dispatch(device); + return disp->AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex); +} + +// ---- VK_KHR_get_display_properties2 extension trampoline/terminators + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayProperties2KHR *pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceDisplayProperties2KHR(unwrapped_phys_dev, pPropertyCount, pProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayProperties2KHR *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + // If the function is available in the driver, just call into it + if (icd_term->dispatch.GetPhysicalDeviceDisplayProperties2KHR != NULL) { + return icd_term->dispatch.GetPhysicalDeviceDisplayProperties2KHR(phys_dev_term->phys_dev, pPropertyCount, pProperties); + } + + // We have to emulate the function. + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceDisplayProperties2KHR: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + // If the icd doesn't support VK_KHR_display, then no properties are available + if (icd_term->dispatch.GetPhysicalDeviceDisplayPropertiesKHR == NULL) { + *pPropertyCount = 0; + return VK_SUCCESS; + } + + // If we aren't writing to pProperties, then emulation is straightforward + if (pProperties == NULL || *pPropertyCount == 0) { + return icd_term->dispatch.GetPhysicalDeviceDisplayPropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, NULL); + } + + // If we do have to write to pProperties, then we need to write to a temporary array of VkDisplayPropertiesKHR and copy it + VkDisplayPropertiesKHR *properties = loader_stack_alloc(*pPropertyCount * sizeof(VkDisplayPropertiesKHR)); + if (properties == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + VkResult res = icd_term->dispatch.GetPhysicalDeviceDisplayPropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, properties); + if (res < 0) { + return res; + } + for (uint32_t i = 0; i < *pPropertyCount; ++i) { + memcpy(&pProperties[i].displayProperties, &properties[i], sizeof(VkDisplayPropertiesKHR)); + } + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, VkDisplayPlaneProperties2KHR *pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceDisplayPlaneProperties2KHR(unwrapped_phys_dev, pPropertyCount, pProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPlaneProperties2KHR *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + // If the function is available in the driver, just call into it + if (icd_term->dispatch.GetPhysicalDeviceDisplayPlaneProperties2KHR != NULL) { + return icd_term->dispatch.GetPhysicalDeviceDisplayPlaneProperties2KHR(phys_dev_term->phys_dev, pPropertyCount, pProperties); + } + + // We have to emulate the function. + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceDisplayPlaneProperties2KHR: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + // If the icd doesn't support VK_KHR_display, then no properties are available + if (icd_term->dispatch.GetPhysicalDeviceDisplayPlanePropertiesKHR == NULL) { + *pPropertyCount = 0; + return VK_SUCCESS; + } + + // If we aren't writing to pProperties, then emulation is straightforward + if (pProperties == NULL || *pPropertyCount == 0) { + return icd_term->dispatch.GetPhysicalDeviceDisplayPlanePropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, NULL); + } + + // If we do have to write to pProperties, then we need to write to a temporary array of VkDisplayPlanePropertiesKHR and copy it + VkDisplayPlanePropertiesKHR *properties = loader_stack_alloc(*pPropertyCount * sizeof(VkDisplayPlanePropertiesKHR)); + if (properties == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + VkResult res = + icd_term->dispatch.GetPhysicalDeviceDisplayPlanePropertiesKHR(phys_dev_term->phys_dev, pPropertyCount, properties); + if (res < 0) { + return res; + } + for (uint32_t i = 0; i < *pPropertyCount; ++i) { + memcpy(&pProperties[i].displayPlaneProperties, &properties[i], sizeof(VkDisplayPlanePropertiesKHR)); + } + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModeProperties2KHR *pProperties) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetDisplayModeProperties2KHR(unwrapped_phys_dev, display, pPropertyCount, pProperties); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModeProperties2KHR *pProperties) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + // If the function is available in the driver, just call into it + if (icd_term->dispatch.GetDisplayModeProperties2KHR != NULL) { + return icd_term->dispatch.GetDisplayModeProperties2KHR(phys_dev_term->phys_dev, display, pPropertyCount, pProperties); + } + + // We have to emulate the function. + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetDisplayModeProperties2KHR: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + // If the icd doesn't support VK_KHR_display, then no properties are available + if (icd_term->dispatch.GetDisplayModePropertiesKHR == NULL) { + *pPropertyCount = 0; + return VK_SUCCESS; + } + + // If we aren't writing to pProperties, then emulation is straightforward + if (pProperties == NULL || *pPropertyCount == 0) { + return icd_term->dispatch.GetDisplayModePropertiesKHR(phys_dev_term->phys_dev, display, pPropertyCount, NULL); + } + + // If we do have to write to pProperties, then we need to write to a temporary array of VkDisplayModePropertiesKHR and copy it + VkDisplayModePropertiesKHR *properties = loader_stack_alloc(*pPropertyCount * sizeof(VkDisplayModePropertiesKHR)); + if (properties == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + VkResult res = icd_term->dispatch.GetDisplayModePropertiesKHR(phys_dev_term->phys_dev, display, pPropertyCount, properties); + if (res < 0) { + return res; + } + for (uint32_t i = 0; i < *pPropertyCount; ++i) { + memcpy(&pProperties[i].displayModeProperties, &properties[i], sizeof(VkDisplayModePropertiesKHR)); + } + return res; +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR *pCapabilities) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetDisplayPlaneCapabilities2KHR(unwrapped_phys_dev, pDisplayPlaneInfo, pCapabilities); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR *pCapabilities) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + + // If the function is abailable in the driver, just call into it + if (icd_term->dispatch.GetDisplayPlaneCapabilities2KHR != NULL) { + return icd_term->dispatch.GetDisplayPlaneCapabilities2KHR(phys_dev_term->phys_dev, pDisplayPlaneInfo, pCapabilities); + } + + // We have to emulate the function. + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetDisplayPlaneCapabilities2KHR: Emulating call in ICD \"%s\"", icd_term->scanned_icd->lib_name); + + // Just call into the old version of the function. + // If the icd doesn't support VK_KHR_display, there are zero planes and this call is invalid (and will crash) + return icd_term->dispatch.GetDisplayPlaneCapabilitiesKHR(phys_dev_term->phys_dev, pDisplayPlaneInfo->mode, + pDisplayPlaneInfo->planeIndex, &pCapabilities->capabilities); +} + +#ifdef VK_USE_PLATFORM_FUCHSIA + +// This is the trampoline entrypoint for CreateImagePipeSurfaceFUCHSIA +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA(VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + const VkLayerInstanceDispatchTable *disp; + disp = loader_get_instance_layer_dispatch(instance); + VkResult res; + + res = disp->CreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface); + return res; +} + +// This is the instance chain terminator function for CreateImagePipeSurfaceFUCHSIA +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface) { + VkResult vkRes = VK_SUCCESS; + VkIcdSurface *pIcdSurface = NULL; + uint32_t i = 0; + + // Initialize pSurface to NULL just to be safe. + *pSurface = VK_NULL_HANDLE; + // First, check to ensure the appropriate extension was enabled: + struct loader_instance *ptr_instance = loader_get_instance(instance); + if (!ptr_instance->wsi_imagepipe_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_FUCHSIA_imagepipe_surface extension not enabled. " + "vkCreateImagePipeSurfaceFUCHSIA not executed!\n"); + vkRes = VK_ERROR_EXTENSION_NOT_PRESENT; + goto out; + } + + // Next, if so, proceed with the implementation of this function: + pIcdSurface = + AllocateIcdSurfaceStruct(ptr_instance, sizeof(pIcdSurface->imagepipe_surf.base), sizeof(pIcdSurface->imagepipe_surf)); + if (pIcdSurface == NULL) { + vkRes = VK_ERROR_OUT_OF_HOST_MEMORY; + goto out; + } + + pIcdSurface->imagepipe_surf.base.platform = VK_ICD_WSI_PLATFORM_FUCHSIA; + + // Loop through each ICD and determine if they need to create a surface + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if (icd_term->scanned_icd->interface_version >= ICD_VER_SUPPORTS_ICD_SURFACE_KHR) { + if (NULL != icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA) { + vkRes = icd_term->dispatch.CreateImagePipeSurfaceFUCHSIA(icd_term->instance, pCreateInfo, pAllocator, + &pIcdSurface->real_icd_surfaces[i]); + if (VK_SUCCESS != vkRes) { + goto out; + } + } + } + } + + *pSurface = (VkSurfaceKHR)(pIcdSurface); + +out: + + if (VK_SUCCESS != vkRes && NULL != pIcdSurface) { + if (NULL != pIcdSurface->real_icd_surfaces) { + i = 0; + for (struct loader_icd_term *icd_term = ptr_instance->icd_terms; icd_term != NULL; icd_term = icd_term->next, i++) { + if ((VkSurfaceKHR)NULL != pIcdSurface->real_icd_surfaces[i] && NULL != icd_term->dispatch.DestroySurfaceKHR) { + icd_term->dispatch.DestroySurfaceKHR(icd_term->instance, pIcdSurface->real_icd_surfaces[i], pAllocator); + } + } + loader_instance_heap_free(ptr_instance, pIcdSurface->real_icd_surfaces); + } + loader_instance_heap_free(ptr_instance, pIcdSurface); + } + + return vkRes; +} +#endif // VK_USE_PLATFORM_FUCHSIA + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL +vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + VkSurfaceCapabilities2KHR *pSurfaceCapabilities) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceSurfaceCapabilities2KHR(unwrapped_phys_dev, pSurfaceInfo, pSurfaceCapabilities); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + VkSurfaceCapabilities2KHR *pSurfaceCapabilities) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfaceCapabilities2KHR not executed!\n"); + return VK_SUCCESS; + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(pSurfaceInfo->surface); + uint8_t icd_index = phys_dev_term->icd_index; + + if (icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilities2KHR != NULL) { + VkBaseOutStructure *pNext = (VkBaseOutStructure *)pSurfaceCapabilities->pNext; + while (pNext != NULL) { + if ((int)pNext->sType == VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR) { + // Not all ICDs may be supporting VK_KHR_surface_protected_capabilities + // Initialize VkSurfaceProtectedCapabilitiesKHR.supportsProtected to false and + // if an ICD supports protected surfaces, it will reset it to true accordingly. + ((VkSurfaceProtectedCapabilitiesKHR *)pNext)->supportsProtected = VK_FALSE; + } + pNext = (VkBaseOutStructure *)pNext->pNext; + } + + // Pass the call to the driver, possibly unwrapping the ICD surface + if (icd_surface->real_icd_surfaces != NULL && (void *)icd_surface->real_icd_surfaces[icd_index] != NULL) { + VkPhysicalDeviceSurfaceInfo2KHR info_copy = *pSurfaceInfo; + info_copy.surface = icd_surface->real_icd_surfaces[icd_index]; + return icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilities2KHR(phys_dev_term->phys_dev, &info_copy, + pSurfaceCapabilities); + } else { + return icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilities2KHR(phys_dev_term->phys_dev, pSurfaceInfo, + pSurfaceCapabilities); + } + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceCapabilities2KHR: Emulating call in ICD \"%s\" using " + "vkGetPhysicalDeviceSurfaceCapabilitiesKHR", + icd_term->scanned_icd->lib_name); + + if (pSurfaceInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceCapabilities2KHR: Emulation found unrecognized structure type in " + "pSurfaceInfo->pNext - this struct will be ignored"); + } + + // Write to the VkSurfaceCapabilities2KHR struct + VkSurfaceKHR surface = pSurfaceInfo->surface; + if (icd_surface->real_icd_surfaces != NULL && (void *)icd_surface->real_icd_surfaces[icd_index] != NULL) { + surface = icd_surface->real_icd_surfaces[icd_index]; + } + VkResult res = icd_term->dispatch.GetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev_term->phys_dev, surface, + &pSurfaceCapabilities->surfaceCapabilities); + + if (pSurfaceCapabilities->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceCapabilities2KHR: Emulation found unrecognized structure type in " + "pSurfaceCapabilities->pNext - this struct will be ignored"); + } + return res; + } +} + +LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL +vkGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) { + const VkLayerInstanceDispatchTable *disp; + VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice); + disp = loader_get_instance_layer_dispatch(physicalDevice); + return disp->GetPhysicalDeviceSurfaceFormats2KHR(unwrapped_phys_dev, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats); +} + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + uint32_t *pSurfaceFormatCount, + VkSurfaceFormat2KHR *pSurfaceFormats) { + struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice; + struct loader_icd_term *icd_term = phys_dev_term->this_icd_term; + struct loader_instance *ptr_instance = (struct loader_instance *)icd_term->this_instance; + + if (!ptr_instance->wsi_surface_enabled) { + loader_log(ptr_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, + "VK_KHR_surface extension not enabled. vkGetPhysicalDeviceSurfaceFormats2KHR not executed!\n"); + return VK_SUCCESS; + } + + VkIcdSurface *icd_surface = (VkIcdSurface *)(pSurfaceInfo->surface); + uint8_t icd_index = phys_dev_term->icd_index; + + if (icd_term->dispatch.GetPhysicalDeviceSurfaceFormats2KHR != NULL) { + // Pass the call to the driver, possibly unwrapping the ICD surface + if (icd_surface->real_icd_surfaces != NULL && (void *)icd_surface->real_icd_surfaces[icd_index] != NULL) { + VkPhysicalDeviceSurfaceInfo2KHR info_copy = *pSurfaceInfo; + info_copy.surface = icd_surface->real_icd_surfaces[icd_index]; + return icd_term->dispatch.GetPhysicalDeviceSurfaceFormats2KHR(phys_dev_term->phys_dev, &info_copy, pSurfaceFormatCount, + pSurfaceFormats); + } else { + return icd_term->dispatch.GetPhysicalDeviceSurfaceFormats2KHR(phys_dev_term->phys_dev, pSurfaceInfo, + pSurfaceFormatCount, pSurfaceFormats); + } + } else { + // Emulate the call + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceFormats2KHR: Emulating call in ICD \"%s\" using vkGetPhysicalDeviceSurfaceFormatsKHR", + icd_term->scanned_icd->lib_name); + + if (pSurfaceInfo->pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceFormats2KHR: Emulation found unrecognized structure type in pSurfaceInfo->pNext " + "- this struct will be ignored"); + } + + VkSurfaceKHR surface = pSurfaceInfo->surface; + if (icd_surface->real_icd_surfaces != NULL && (void *)icd_surface->real_icd_surfaces[icd_index] != NULL) { + surface = icd_surface->real_icd_surfaces[icd_index]; + } + + if (*pSurfaceFormatCount == 0 || pSurfaceFormats == NULL) { + // Write to pSurfaceFormatCount + return icd_term->dispatch.GetPhysicalDeviceSurfaceFormatsKHR(phys_dev_term->phys_dev, surface, pSurfaceFormatCount, + NULL); + } else { + // Allocate a temporary array for the output of the old function + VkSurfaceFormatKHR *formats = loader_stack_alloc(*pSurfaceFormatCount * sizeof(VkSurfaceFormatKHR)); + if (formats == NULL) { + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + + VkResult res = icd_term->dispatch.GetPhysicalDeviceSurfaceFormatsKHR(phys_dev_term->phys_dev, surface, + pSurfaceFormatCount, formats); + for (uint32_t i = 0; i < *pSurfaceFormatCount; ++i) { + pSurfaceFormats[i].surfaceFormat = formats[i]; + if (pSurfaceFormats[i].pNext != NULL) { + loader_log(icd_term->this_instance, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0, + "vkGetPhysicalDeviceSurfaceFormats2KHR: Emulation found unrecognized structure type in " + "pSurfaceFormats[%d].pNext - this struct will be ignored", + i); + } + } + return res; + } + } +} + +bool wsi_swapchain_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr) { + *addr = NULL; + + // Functions for the VK_KHR_surface extension: + if (!strcmp("vkDestroySurfaceKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkDestroySurfaceKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceSurfaceSupportKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfaceSupportKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceSurfaceCapabilitiesKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfaceCapabilitiesKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceSurfaceFormatsKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfaceFormatsKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceSurfacePresentModesKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfacePresentModesKHR : NULL; + return true; + } + + if (!strcmp("vkGetDeviceGroupPresentCapabilitiesKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetDeviceGroupPresentCapabilitiesKHR : NULL; + return true; + } + + if (!strcmp("vkGetDeviceGroupSurfacePresentModesKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetDeviceGroupSurfacePresentModesKHR : NULL; + return true; + } + + if (!strcmp("vkGetPhysicalDevicePresentRectanglesKHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDevicePresentRectanglesKHR : NULL; + return true; + } + + // Functions for VK_KHR_get_surface_capabilities2 extension: + if (!strcmp("vkGetPhysicalDeviceSurfaceCapabilities2KHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfaceCapabilities2KHR : NULL; + return true; + } + + if (!strcmp("vkGetPhysicalDeviceSurfaceFormats2KHR", name)) { + *addr = ptr_instance->wsi_surface_enabled ? (void *)vkGetPhysicalDeviceSurfaceFormats2KHR : NULL; + return true; + } + + // Functions for the VK_KHR_swapchain extension: + + // Note: This is a device extension, and its functions are statically + // exported from the loader. Per Khronos decisions, the loader's GIPA + // function will return the trampoline function for such device-extension + // functions, regardless of whether the extension has been enabled. + if (!strcmp("vkCreateSwapchainKHR", name)) { + *addr = (void *)vkCreateSwapchainKHR; + return true; + } + if (!strcmp("vkDestroySwapchainKHR", name)) { + *addr = (void *)vkDestroySwapchainKHR; + return true; + } + if (!strcmp("vkGetSwapchainImagesKHR", name)) { + *addr = (void *)vkGetSwapchainImagesKHR; + return true; + } + if (!strcmp("vkAcquireNextImageKHR", name)) { + *addr = (void *)vkAcquireNextImageKHR; + return true; + } + if (!strcmp("vkQueuePresentKHR", name)) { + *addr = (void *)vkQueuePresentKHR; + return true; + } + if (!strcmp("vkAcquireNextImage2KHR", name)) { + *addr = (void *)vkAcquireNextImage2KHR; + return true; + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + + // Functions for the VK_KHR_win32_surface extension: + if (!strcmp("vkCreateWin32SurfaceKHR", name)) { + *addr = ptr_instance->wsi_win32_surface_enabled ? (void *)vkCreateWin32SurfaceKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceWin32PresentationSupportKHR", name)) { + *addr = ptr_instance->wsi_win32_surface_enabled ? (void *)vkGetPhysicalDeviceWin32PresentationSupportKHR : NULL; + return true; + } +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + + // Functions for the VK_KHR_wayland_surface extension: + if (!strcmp("vkCreateWaylandSurfaceKHR", name)) { + *addr = ptr_instance->wsi_wayland_surface_enabled ? (void *)vkCreateWaylandSurfaceKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceWaylandPresentationSupportKHR", name)) { + *addr = ptr_instance->wsi_wayland_surface_enabled ? (void *)vkGetPhysicalDeviceWaylandPresentationSupportKHR : NULL; + return true; + } +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + + // Functions for the VK_KHR_xcb_surface extension: + if (!strcmp("vkCreateXcbSurfaceKHR", name)) { + *addr = ptr_instance->wsi_xcb_surface_enabled ? (void *)vkCreateXcbSurfaceKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceXcbPresentationSupportKHR", name)) { + *addr = ptr_instance->wsi_xcb_surface_enabled ? (void *)vkGetPhysicalDeviceXcbPresentationSupportKHR : NULL; + return true; + } +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + + // Functions for the VK_KHR_xlib_surface extension: + if (!strcmp("vkCreateXlibSurfaceKHR", name)) { + *addr = ptr_instance->wsi_xlib_surface_enabled ? (void *)vkCreateXlibSurfaceKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceXlibPresentationSupportKHR", name)) { + *addr = ptr_instance->wsi_xlib_surface_enabled ? (void *)vkGetPhysicalDeviceXlibPresentationSupportKHR : NULL; + return true; + } +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + + // Functions for the VK_EXT_directfb_surface extension: + if (!strcmp("vkCreateDirectFBSurfaceEXT", name)) { + *addr = ptr_instance->wsi_directfb_surface_enabled ? (void *)vkCreateDirectFBSurfaceEXT : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceDirectFBPresentationSupportEXT", name)) { + *addr = ptr_instance->wsi_directfb_surface_enabled ? (void *)vkGetPhysicalDeviceDirectFBPresentationSupportEXT : NULL; + return true; + } +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_ANDROID_KHR + + // Functions for the VK_KHR_android_surface extension: + if (!strcmp("vkCreateAndroidSurfaceKHR", name)) { + *addr = ptr_instance->wsi_android_surface_enabled ? (void *)vkCreateAndroidSurfaceKHR : NULL; + return true; + } +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK + + // Functions for the VK_MVK_macos_surface extension: + if (!strcmp("vkCreateMacOSSurfaceMVK", name)) { + *addr = ptr_instance->wsi_macos_surface_enabled ? (void *)vkCreateMacOSSurfaceMVK : NULL; + return true; + } +#endif // VK_USE_PLATFORM_MACOS_MVK +#ifdef VK_USE_PLATFORM_IOS_MVK + + // Functions for the VK_MVK_ios_surface extension: + if (!strcmp("vkCreateIOSSurfaceMVK", name)) { + *addr = ptr_instance->wsi_ios_surface_enabled ? (void *)vkCreateIOSSurfaceMVK : NULL; + return true; + } +#endif // VK_USE_PLATFORM_IOS_MVK +#ifdef VK_USE_PLATFORM_GGP + + // Functions for the VK_GGP_stream_descriptor_surface extension: + if (!strcmp("vkCreateStreamDescriptorSurfaceGGP", name)) { + *addr = ptr_instance->wsi_ggp_surface_enabled ? (void *)vkCreateStreamDescriptorSurfaceGGP : NULL; + return true; + } +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + + // Functions for the VK_FUCHSIA_imagepipe_surface extension: + if (!strcmp("vkCreateImagePipeSurfaceFUCHSIA", name)) { + *addr = ptr_instance->wsi_imagepipe_surface_enabled ? (void *)vkCreateImagePipeSurfaceFUCHSIA : NULL; + return true; + } + +#endif // VK_USE_PLATFORM_FUCHSIA + + // Functions for the VK_EXT_headless_surface extension: + if (!strcmp("vkCreateHeadlessSurfaceEXT", name)) { + *addr = ptr_instance->wsi_headless_surface_enabled ? (void *)vkCreateHeadlessSurfaceEXT : NULL; + return true; + } + +#if defined(VK_USE_PLATFORM_METAL_EXT) + // Functions for the VK_MVK_macos_surface extension: + if (!strcmp("vkCreateMetalSurfaceEXT", name)) { + *addr = ptr_instance->wsi_metal_surface_enabled ? (void *)vkCreateMetalSurfaceEXT : NULL; + return true; + } +#endif // VK_USE_PLATFORM_METAL_EXT + + // Functions for VK_KHR_display extension: + if (!strcmp("vkGetPhysicalDeviceDisplayPropertiesKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkGetPhysicalDeviceDisplayPropertiesKHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceDisplayPlanePropertiesKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkGetPhysicalDeviceDisplayPlanePropertiesKHR : NULL; + return true; + } + if (!strcmp("vkGetDisplayPlaneSupportedDisplaysKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkGetDisplayPlaneSupportedDisplaysKHR : NULL; + return true; + } + if (!strcmp("vkGetDisplayModePropertiesKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkGetDisplayModePropertiesKHR : NULL; + return true; + } + if (!strcmp("vkCreateDisplayModeKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkCreateDisplayModeKHR : NULL; + return true; + } + if (!strcmp("vkGetDisplayPlaneCapabilitiesKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkGetDisplayPlaneCapabilitiesKHR : NULL; + return true; + } + if (!strcmp("vkCreateDisplayPlaneSurfaceKHR", name)) { + *addr = ptr_instance->wsi_display_enabled ? (void *)vkCreateDisplayPlaneSurfaceKHR : NULL; + return true; + } + + // Functions for KHR_display_swapchain extension: + if (!strcmp("vkCreateSharedSwapchainsKHR", name)) { + *addr = (void *)vkCreateSharedSwapchainsKHR; + return true; + } + + // Functions for KHR_get_display_properties2 + if (!strcmp("vkGetPhysicalDeviceDisplayProperties2KHR", name)) { + *addr = ptr_instance->wsi_display_props2_enabled ? (void *)vkGetPhysicalDeviceDisplayProperties2KHR : NULL; + return true; + } + if (!strcmp("vkGetPhysicalDeviceDisplayPlaneProperties2KHR", name)) { + *addr = ptr_instance->wsi_display_props2_enabled ? (void *)vkGetPhysicalDeviceDisplayPlaneProperties2KHR : NULL; + return true; + } + if (!strcmp("vkGetDisplayModeProperties2KHR", name)) { + *addr = ptr_instance->wsi_display_props2_enabled ? (void *)vkGetDisplayModeProperties2KHR : NULL; + return true; + } + if (!strcmp("vkGetDisplayPlaneCapabilities2KHR", name)) { + *addr = ptr_instance->wsi_display_props2_enabled ? (void *)vkGetDisplayPlaneCapabilities2KHR : NULL; + return true; + } + + return false; +} diff --git a/src/third_party/vulkan/loader/wsi.h b/src/third_party/vulkan/loader/wsi.h new file mode 100644 index 0000000..c9f4654 --- /dev/null +++ b/src/third_party/vulkan/loader/wsi.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Ian Elliott + * + */ + +#ifndef WSI_H +#define WSI_H + +#include "vk_loader_platform.h" +#include "loader.h" + +typedef struct { + union { +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VkIcdSurfaceWayland wayland_surf; +#endif // VK_USE_PLATFORM_WAYLAND_KHR +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkIcdSurfaceWin32 win_surf; +#endif // VK_USE_PLATFORM_WIN32_KHR +#ifdef VK_USE_PLATFORM_XCB_KHR + VkIcdSurfaceXcb xcb_surf; +#endif // VK_USE_PLATFORM_XCB_KHR +#ifdef VK_USE_PLATFORM_XLIB_KHR + VkIcdSurfaceXlib xlib_surf; +#endif // VK_USE_PLATFORM_XLIB_KHR +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VkIcdSurfaceDirectFB directfb_surf; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT +#ifdef VK_USE_PLATFORM_MACOS_MVK + VkIcdSurfaceMacOS macos_surf; +#endif // VK_USE_PLATFORM_MACOS_MVK +#ifdef VK_USE_PLATFORM_GGP + VkIcdSurfaceGgp ggp_surf; +#endif // VK_USE_PLATFORM_GGP +#ifdef VK_USE_PLATFORM_FUCHSIA + VkIcdSurfaceImagePipe imagepipe_surf; +#endif // VK_USE_PLATFORM_FUCHSIA +#ifdef VK_USE_PLATFORM_METAL_EXT + VkIcdSurfaceMetal metal_surf; +#endif // VK_USE_PLATFORM_METAL_EXT + VkIcdSurfaceDisplay display_surf; + VkIcdSurfaceHeadless headless_surf; + }; + uint32_t base_size; // Size of VkIcdSurfaceBase + uint32_t platform_size; // Size of corresponding VkIcdSurfaceXXX + uint32_t non_platform_offset; // Start offset to base_size + uint32_t entire_size; // Size of entire VkIcdSurface + VkSurfaceKHR *real_icd_surfaces; +} VkIcdSurface; + +bool wsi_swapchain_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr); + +void wsi_create_instance(struct loader_instance *ptr_instance, const VkInstanceCreateInfo *pCreateInfo); +bool wsi_unsupported_instance_extension(const VkExtensionProperties *ext_prop); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateHeadlessSurfaceEXT(VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain); + +VKAPI_ATTR void VKAPI_CALL terminator_DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, + const VkAllocationCallbacks *pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, VkSurfaceKHR surface, + VkBool32 *pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR *pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, + uint32_t *pSurfaceFormatCount, + VkSurfaceFormatKHR *pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, uint32_t *pPresentModeCount, + VkPresentModeKHR *pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +#ifdef VK_USE_PLATFORM_WIN32_KHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateWaylandSurfaceKHR(VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display *display); +#endif +#ifdef VK_USE_PLATFORM_XCB_KHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t *connection, + xcb_visualid_t visual_id); +#endif +#ifdef VK_USE_PLATFORM_XLIB_KHR +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, Display *dpy, + VisualID visualID); +#endif +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDirectFBSurfaceEXT(VkInstance instance, + const VkDirectFBSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +VKAPI_ATTR VkBool32 VKAPI_CALL terminator_GetPhysicalDeviceDirectFBPresentationSupportEXT(VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + IDirectFB *dfb); +#endif +#ifdef VK_USE_PLATFORM_MACOS_MVK +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif +#ifdef VK_USE_PLATFORM_IOS_MVK +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif +#ifdef VK_USE_PLATFORM_GGP +VKAPI_ATTR VkResult VKAPI_CALL +terminator_CreateStreamDescriptorSurfaceGGP(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif +#if defined(VK_USE_PLATFORM_METAL_EXT) +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMetalSurfaceEXT(VkInstance instance, const VkMetalSurfaceCreateInfoEXT *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPropertiesKHR *pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPlanePropertiesKHR *pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, + uint32_t *pDisplayCount, VkDisplayKHR *pDisplays); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModePropertiesKHR *pProperties); +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDisplayModeKHR *pMode); +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR *pCapabilities); +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDisplayPlaneSurfaceKHR(VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, + const VkAllocationCallbacks *pAllocator, + VkSurfaceKHR *pSurface); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR *pCreateInfos, + const VkAllocationCallbacks *pAllocator, + VkSwapchainKHR *pSwapchains); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayProperties2KHR *pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, + uint32_t *pPropertyCount, + VkDisplayPlaneProperties2KHR *pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, + uint32_t *pPropertyCount, + VkDisplayModeProperties2KHR *pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR *pCapabilities); +#ifdef VK_USE_PLATFORM_FUCHSIA +VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateImagePipeSurfaceFUCHSIA(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface); +#endif + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + VkSurfaceCapabilities2KHR *pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo, + uint32_t *pSurfaceFormatCount, + VkSurfaceFormat2KHR *pSurfaceFormats); + +#endif // WSI_H diff --git a/src/third_party/vulkan/vk_enum_string_helper.h b/src/third_party/vulkan/vk_enum_string_helper.h new file mode 100644 index 0000000..3cb9225 --- /dev/null +++ b/src/third_party/vulkan/vk_enum_string_helper.h @@ -0,0 +1,7062 @@ +// *** THIS FILE IS GENERATED - DO NOT EDIT *** +// See helper_file_generator.py for modifications + + +/*************************************************************************** + * + * Copyright (c) 2015-2020 The Khronos Group Inc. + * Copyright (c) 2015-2020 Valve Corporation + * Copyright (c) 2015-2020 LunarG, Inc. + * Copyright (c) 2015-2020 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Author: Mark Lobodzinski + * Author: Courtney Goeltzenleuchter + * Author: Tobin Ehlis + * Author: Chris Forbes + * Author: John Zulauf + * + ****************************************************************************/ + + +#pragma once +#ifdef _MSC_VER +#pragma warning( disable : 4065 ) +#endif + +#include +#include "vulkan.h" + + +static inline const char* string_VkResult(VkResult input_value) +{ + switch ((VkResult)input_value) + { + case VK_ERROR_DEVICE_LOST: + return "VK_ERROR_DEVICE_LOST"; + case VK_ERROR_EXTENSION_NOT_PRESENT: + return "VK_ERROR_EXTENSION_NOT_PRESENT"; + case VK_ERROR_FEATURE_NOT_PRESENT: + return "VK_ERROR_FEATURE_NOT_PRESENT"; + case VK_ERROR_FORMAT_NOT_SUPPORTED: + return "VK_ERROR_FORMAT_NOT_SUPPORTED"; + case VK_ERROR_FRAGMENTATION: + return "VK_ERROR_FRAGMENTATION"; + case VK_ERROR_FRAGMENTED_POOL: + return "VK_ERROR_FRAGMENTED_POOL"; + case VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT: + return "VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT"; + case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: + return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR"; + case VK_ERROR_INCOMPATIBLE_DRIVER: + return "VK_ERROR_INCOMPATIBLE_DRIVER"; + case VK_ERROR_INITIALIZATION_FAILED: + return "VK_ERROR_INITIALIZATION_FAILED"; + case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT: + return "VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT"; + case VK_ERROR_INVALID_EXTERNAL_HANDLE: + return "VK_ERROR_INVALID_EXTERNAL_HANDLE"; + case VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS: + return "VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS"; + case VK_ERROR_INVALID_SHADER_NV: + return "VK_ERROR_INVALID_SHADER_NV"; + case VK_ERROR_LAYER_NOT_PRESENT: + return "VK_ERROR_LAYER_NOT_PRESENT"; + case VK_ERROR_MEMORY_MAP_FAILED: + return "VK_ERROR_MEMORY_MAP_FAILED"; + case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: + return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR"; + case VK_ERROR_NOT_PERMITTED_EXT: + return "VK_ERROR_NOT_PERMITTED_EXT"; + case VK_ERROR_OUT_OF_DATE_KHR: + return "VK_ERROR_OUT_OF_DATE_KHR"; + case VK_ERROR_OUT_OF_DEVICE_MEMORY: + return "VK_ERROR_OUT_OF_DEVICE_MEMORY"; + case VK_ERROR_OUT_OF_HOST_MEMORY: + return "VK_ERROR_OUT_OF_HOST_MEMORY"; + case VK_ERROR_OUT_OF_POOL_MEMORY: + return "VK_ERROR_OUT_OF_POOL_MEMORY"; + case VK_ERROR_SURFACE_LOST_KHR: + return "VK_ERROR_SURFACE_LOST_KHR"; + case VK_ERROR_TOO_MANY_OBJECTS: + return "VK_ERROR_TOO_MANY_OBJECTS"; + case VK_ERROR_UNKNOWN: + return "VK_ERROR_UNKNOWN"; + case VK_ERROR_VALIDATION_FAILED_EXT: + return "VK_ERROR_VALIDATION_FAILED_EXT"; + case VK_EVENT_RESET: + return "VK_EVENT_RESET"; + case VK_EVENT_SET: + return "VK_EVENT_SET"; + case VK_INCOMPLETE: + return "VK_INCOMPLETE"; + case VK_NOT_READY: + return "VK_NOT_READY"; + case VK_OPERATION_DEFERRED_KHR: + return "VK_OPERATION_DEFERRED_KHR"; + case VK_OPERATION_NOT_DEFERRED_KHR: + return "VK_OPERATION_NOT_DEFERRED_KHR"; + case VK_PIPELINE_COMPILE_REQUIRED_EXT: + return "VK_PIPELINE_COMPILE_REQUIRED_EXT"; + case VK_SUBOPTIMAL_KHR: + return "VK_SUBOPTIMAL_KHR"; + case VK_SUCCESS: + return "VK_SUCCESS"; + case VK_THREAD_DONE_KHR: + return "VK_THREAD_DONE_KHR"; + case VK_THREAD_IDLE_KHR: + return "VK_THREAD_IDLE_KHR"; + case VK_TIMEOUT: + return "VK_TIMEOUT"; + default: + return "Unhandled VkResult"; + } +} + +static inline const char* string_VkStructureType(VkStructureType input_value) +{ + switch ((VkStructureType)input_value) + { + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV"; + case VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR"; + case VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR: + return "VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR"; + case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID: + return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID"; + case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID: + return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID"; + case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID: + return "VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID"; + case VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_APPLICATION_INFO: + return "VK_STRUCTURE_TYPE_APPLICATION_INFO"; + case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2: + return "VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2"; + case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT: + return "VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT"; + case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2: + return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2"; + case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT: + return "VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT"; + case VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV: + return "VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV"; + case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO: + return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO"; + case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO: + return "VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO"; + case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO: + return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO"; + case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO: + return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO"; + case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR: + return "VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR"; + case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO: + return "VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO"; + case VK_STRUCTURE_TYPE_BIND_SPARSE_INFO: + return "VK_STRUCTURE_TYPE_BIND_SPARSE_INFO"; + case VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR: + return "VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR"; + case VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO: + return "VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO"; + case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO: + return "VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO"; + case VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR: + return "VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR"; + case VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER: + return "VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER"; + case VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2: + return "VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2"; + case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: + return "VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO"; + case VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO: + return "VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO"; + case VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT: + return "VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT"; + case VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV: + return "VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV"; + case VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT: + return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT"; + case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO: + return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO"; + case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM: + return "VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM"; + case VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO: + return "VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO"; + case VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT: + return "VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT"; + case VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR: + return "VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR"; + case VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR: + return "VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR"; + case VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM: + return "VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM"; + case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET: + return "VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET"; + case VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR: + return "VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR"; + case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR: + return "VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR"; + case VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV: + return "VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT"; + case VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD: + return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD"; + case VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT"; + case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2: + return "VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2"; + case VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT: + return "VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT"; + case VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD: + return "VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD"; + case VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT: + return "VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT"; + case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR"; + case VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT: + return "VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT"; + case VK_STRUCTURE_TYPE_EVENT_CREATE_INFO: + return "VK_STRUCTURE_TYPE_EVENT_CREATE_INFO"; + case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV: + return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV"; + case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV: + return "VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV"; + case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES: + return "VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES"; + case VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES: + return "VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES"; + case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID: + return "VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID"; + case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES: + return "VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES"; + case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO: + return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO"; + case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES: + return "VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES"; + case VK_STRUCTURE_TYPE_FENCE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_FENCE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR: + return "VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR"; + case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO: + return "VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO"; + case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO: + return "VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO"; + case VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO: + return "VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO"; + case VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV: + return "VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV"; + case VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV: + return "VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV"; + case VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV: + return "VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV"; + case VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV: + return "VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV"; + case VK_STRUCTURE_TYPE_GEOMETRY_NV: + return "VK_STRUCTURE_TYPE_GEOMETRY_NV"; + case VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV: + return "VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV"; + case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_HDR_METADATA_EXT: + return "VK_STRUCTURE_TYPE_HDR_METADATA_EXT"; + case VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA: + return "VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA"; + case VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR: + return "VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR"; + case VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR: + return "VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR"; + case VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER: + return "VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER"; + case VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2: + return "VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2"; + case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO"; + case VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR: + return "VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR"; + case VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2: + return "VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2"; + case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX: + return "VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX"; + case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT: + return "VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT"; + case VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX: + return "VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX"; + case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: + return "VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID"; + case VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT: + return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT"; + case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV: + return "VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV"; + case VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV: + return "VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV"; + case VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL: + return "VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL"; + case VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK: + return "VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK"; + case VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK: + return "VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK"; + case VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE: + return "VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE"; + case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO: + return "VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO"; + case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_MEMORY_BARRIER: + return "VK_STRUCTURE_TYPE_MEMORY_BARRIER"; + case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: + return "VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS"; + case VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID: + return "VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID"; + case VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO: + return "VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO"; + case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2: + return "VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2"; + case VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL: + return "VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL"; + case VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR: + return "VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR"; + case VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR: + return "VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR"; + case VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL: + return "VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL"; + case VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL: + return "VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL"; + case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR: + return "VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR"; + case VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL: + return "VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES"; + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD: + return "VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD"; + case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD"; + case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP: + return "VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP"; + case VK_STRUCTURE_TYPE_PRESENT_INFO_KHR: + return "VK_STRUCTURE_TYPE_PRESENT_INFO_KHR"; + case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR: + return "VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR"; + case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE: + return "VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE"; + case VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO: + return "VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO"; + case VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO: + return "VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO"; + case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL: + return "VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL"; + case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV: + return "VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV"; + case VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV: + return "VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV"; + case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO: + return "VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO"; + case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2: + return "VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2"; + case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO: + return "VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO"; + case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO: + return "VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO"; + case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT: + return "VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT"; + case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM: + return "VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM"; + case VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR: + return "VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR"; + case VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES: + return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES"; + case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO: + return "VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO"; + case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT: + return "VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT"; + case VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR: + return "VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR"; + case VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR: + return "VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR"; + case VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO: + return "VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO"; + case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO: + return "VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO"; + case VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO: + return "VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO"; + case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR: + return "VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR"; + case VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2: + return "VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2"; + case VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2: + return "VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2"; + case VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP: + return "VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP"; + case VK_STRUCTURE_TYPE_SUBMIT_INFO: + return "VK_STRUCTURE_TYPE_SUBMIT_INFO"; + case VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO: + return "VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO"; + case VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2: + return "VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2"; + case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2: + return "VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2"; + case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE: + return "VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE"; + case VK_STRUCTURE_TYPE_SUBPASS_END_INFO: + return "VK_STRUCTURE_TYPE_SUBPASS_END_INFO"; + case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT: + return "VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT"; + case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR: + return "VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR"; + case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT: + return "VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT"; + case VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR: + return "VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR"; + case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT: + return "VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT"; + case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT: + return "VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT"; + case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: + return "VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR"; + case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD: + return "VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD"; + case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD: + return "VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD"; + case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO: + return "VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO"; + case VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT: + return "VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT"; + case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT: + return "VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT"; + case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT: + return "VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT"; + case VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN: + return "VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN"; + case VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR: + return "VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR"; + case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV: + return "VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV"; + case VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET: + return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"; + case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR: + return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR"; + case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV: + return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV"; + case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT: + return "VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT"; + case VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR"; + case VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR: + return "VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR"; + default: + return "Unhandled VkStructureType"; + } +} + +static inline const char* string_VkAccessFlagBits(VkAccessFlagBits input_value) +{ + switch ((VkAccessFlagBits)input_value) + { + case VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR: + return "VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR"; + case VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR: + return "VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR"; + case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: + return "VK_ACCESS_COLOR_ATTACHMENT_READ_BIT"; + case VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT: + return "VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT"; + case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: + return "VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT"; + case VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV: + return "VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV"; + case VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV: + return "VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV"; + case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT: + return "VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT"; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: + return "VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT"; + case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: + return "VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT"; + case VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT: + return "VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT"; + case VK_ACCESS_HOST_READ_BIT: + return "VK_ACCESS_HOST_READ_BIT"; + case VK_ACCESS_HOST_WRITE_BIT: + return "VK_ACCESS_HOST_WRITE_BIT"; + case VK_ACCESS_INDEX_READ_BIT: + return "VK_ACCESS_INDEX_READ_BIT"; + case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: + return "VK_ACCESS_INDIRECT_COMMAND_READ_BIT"; + case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: + return "VK_ACCESS_INPUT_ATTACHMENT_READ_BIT"; + case VK_ACCESS_MEMORY_READ_BIT: + return "VK_ACCESS_MEMORY_READ_BIT"; + case VK_ACCESS_MEMORY_WRITE_BIT: + return "VK_ACCESS_MEMORY_WRITE_BIT"; + case VK_ACCESS_SHADER_READ_BIT: + return "VK_ACCESS_SHADER_READ_BIT"; + case VK_ACCESS_SHADER_WRITE_BIT: + return "VK_ACCESS_SHADER_WRITE_BIT"; + case VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV: + return "VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV"; + case VK_ACCESS_TRANSFER_READ_BIT: + return "VK_ACCESS_TRANSFER_READ_BIT"; + case VK_ACCESS_TRANSFER_WRITE_BIT: + return "VK_ACCESS_TRANSFER_WRITE_BIT"; + case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT: + return "VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT"; + case VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT: + return "VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT"; + case VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT: + return "VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT"; + case VK_ACCESS_UNIFORM_READ_BIT: + return "VK_ACCESS_UNIFORM_READ_BIT"; + case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: + return "VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT"; + default: + return "Unhandled VkAccessFlagBits"; + } +} + +static inline std::string string_VkAccessFlags(VkAccessFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkAccessFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkAccessFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkImageLayout(VkImageLayout input_value) +{ + switch ((VkImageLayout)input_value) + { + case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: + return "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: + return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT: + return "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT"; + case VK_IMAGE_LAYOUT_GENERAL: + return "VK_IMAGE_LAYOUT_GENERAL"; + case VK_IMAGE_LAYOUT_PREINITIALIZED: + return "VK_IMAGE_LAYOUT_PREINITIALIZED"; + case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: + return "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR"; + case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: + return "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV: + return "VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV"; + case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: + return "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR"; + case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: + return "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL"; + case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: + return "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL"; + case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: + return "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL"; + case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: + return "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL"; + case VK_IMAGE_LAYOUT_UNDEFINED: + return "VK_IMAGE_LAYOUT_UNDEFINED"; + default: + return "Unhandled VkImageLayout"; + } +} + +static inline const char* string_VkImageAspectFlagBits(VkImageAspectFlagBits input_value) +{ + switch ((VkImageAspectFlagBits)input_value) + { + case VK_IMAGE_ASPECT_COLOR_BIT: + return "VK_IMAGE_ASPECT_COLOR_BIT"; + case VK_IMAGE_ASPECT_DEPTH_BIT: + return "VK_IMAGE_ASPECT_DEPTH_BIT"; + case VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT: + return "VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT"; + case VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT: + return "VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT"; + case VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT: + return "VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT"; + case VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT: + return "VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT"; + case VK_IMAGE_ASPECT_METADATA_BIT: + return "VK_IMAGE_ASPECT_METADATA_BIT"; + case VK_IMAGE_ASPECT_PLANE_0_BIT: + return "VK_IMAGE_ASPECT_PLANE_0_BIT"; + case VK_IMAGE_ASPECT_PLANE_1_BIT: + return "VK_IMAGE_ASPECT_PLANE_1_BIT"; + case VK_IMAGE_ASPECT_PLANE_2_BIT: + return "VK_IMAGE_ASPECT_PLANE_2_BIT"; + case VK_IMAGE_ASPECT_STENCIL_BIT: + return "VK_IMAGE_ASPECT_STENCIL_BIT"; + default: + return "Unhandled VkImageAspectFlagBits"; + } +} + +static inline std::string string_VkImageAspectFlags(VkImageAspectFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkImageAspectFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkImageAspectFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkObjectType(VkObjectType input_value) +{ + switch ((VkObjectType)input_value) + { + case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR: + return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR"; + case VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV: + return "VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV"; + case VK_OBJECT_TYPE_BUFFER: + return "VK_OBJECT_TYPE_BUFFER"; + case VK_OBJECT_TYPE_BUFFER_VIEW: + return "VK_OBJECT_TYPE_BUFFER_VIEW"; + case VK_OBJECT_TYPE_COMMAND_BUFFER: + return "VK_OBJECT_TYPE_COMMAND_BUFFER"; + case VK_OBJECT_TYPE_COMMAND_POOL: + return "VK_OBJECT_TYPE_COMMAND_POOL"; + case VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT: + return "VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT"; + case VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT: + return "VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT"; + case VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR: + return "VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR"; + case VK_OBJECT_TYPE_DESCRIPTOR_POOL: + return "VK_OBJECT_TYPE_DESCRIPTOR_POOL"; + case VK_OBJECT_TYPE_DESCRIPTOR_SET: + return "VK_OBJECT_TYPE_DESCRIPTOR_SET"; + case VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT: + return "VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT"; + case VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE: + return "VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE"; + case VK_OBJECT_TYPE_DEVICE: + return "VK_OBJECT_TYPE_DEVICE"; + case VK_OBJECT_TYPE_DEVICE_MEMORY: + return "VK_OBJECT_TYPE_DEVICE_MEMORY"; + case VK_OBJECT_TYPE_DISPLAY_KHR: + return "VK_OBJECT_TYPE_DISPLAY_KHR"; + case VK_OBJECT_TYPE_DISPLAY_MODE_KHR: + return "VK_OBJECT_TYPE_DISPLAY_MODE_KHR"; + case VK_OBJECT_TYPE_EVENT: + return "VK_OBJECT_TYPE_EVENT"; + case VK_OBJECT_TYPE_FENCE: + return "VK_OBJECT_TYPE_FENCE"; + case VK_OBJECT_TYPE_FRAMEBUFFER: + return "VK_OBJECT_TYPE_FRAMEBUFFER"; + case VK_OBJECT_TYPE_IMAGE: + return "VK_OBJECT_TYPE_IMAGE"; + case VK_OBJECT_TYPE_IMAGE_VIEW: + return "VK_OBJECT_TYPE_IMAGE_VIEW"; + case VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV: + return "VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV"; + case VK_OBJECT_TYPE_INSTANCE: + return "VK_OBJECT_TYPE_INSTANCE"; + case VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL: + return "VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL"; + case VK_OBJECT_TYPE_PHYSICAL_DEVICE: + return "VK_OBJECT_TYPE_PHYSICAL_DEVICE"; + case VK_OBJECT_TYPE_PIPELINE: + return "VK_OBJECT_TYPE_PIPELINE"; + case VK_OBJECT_TYPE_PIPELINE_CACHE: + return "VK_OBJECT_TYPE_PIPELINE_CACHE"; + case VK_OBJECT_TYPE_PIPELINE_LAYOUT: + return "VK_OBJECT_TYPE_PIPELINE_LAYOUT"; + case VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT: + return "VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT"; + case VK_OBJECT_TYPE_QUERY_POOL: + return "VK_OBJECT_TYPE_QUERY_POOL"; + case VK_OBJECT_TYPE_QUEUE: + return "VK_OBJECT_TYPE_QUEUE"; + case VK_OBJECT_TYPE_RENDER_PASS: + return "VK_OBJECT_TYPE_RENDER_PASS"; + case VK_OBJECT_TYPE_SAMPLER: + return "VK_OBJECT_TYPE_SAMPLER"; + case VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION: + return "VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION"; + case VK_OBJECT_TYPE_SEMAPHORE: + return "VK_OBJECT_TYPE_SEMAPHORE"; + case VK_OBJECT_TYPE_SHADER_MODULE: + return "VK_OBJECT_TYPE_SHADER_MODULE"; + case VK_OBJECT_TYPE_SURFACE_KHR: + return "VK_OBJECT_TYPE_SURFACE_KHR"; + case VK_OBJECT_TYPE_SWAPCHAIN_KHR: + return "VK_OBJECT_TYPE_SWAPCHAIN_KHR"; + case VK_OBJECT_TYPE_UNKNOWN: + return "VK_OBJECT_TYPE_UNKNOWN"; + case VK_OBJECT_TYPE_VALIDATION_CACHE_EXT: + return "VK_OBJECT_TYPE_VALIDATION_CACHE_EXT"; + default: + return "Unhandled VkObjectType"; + } +} + +static inline const char* string_VkVendorId(VkVendorId input_value) +{ + switch ((VkVendorId)input_value) + { + case VK_VENDOR_ID_CODEPLAY: + return "VK_VENDOR_ID_CODEPLAY"; + case VK_VENDOR_ID_KAZAN: + return "VK_VENDOR_ID_KAZAN"; + case VK_VENDOR_ID_MESA: + return "VK_VENDOR_ID_MESA"; + case VK_VENDOR_ID_VIV: + return "VK_VENDOR_ID_VIV"; + case VK_VENDOR_ID_VSI: + return "VK_VENDOR_ID_VSI"; + default: + return "Unhandled VkVendorId"; + } +} + +static inline const char* string_VkPipelineCacheHeaderVersion(VkPipelineCacheHeaderVersion input_value) +{ + switch ((VkPipelineCacheHeaderVersion)input_value) + { + case VK_PIPELINE_CACHE_HEADER_VERSION_ONE: + return "VK_PIPELINE_CACHE_HEADER_VERSION_ONE"; + default: + return "Unhandled VkPipelineCacheHeaderVersion"; + } +} + +static inline const char* string_VkSystemAllocationScope(VkSystemAllocationScope input_value) +{ + switch ((VkSystemAllocationScope)input_value) + { + case VK_SYSTEM_ALLOCATION_SCOPE_CACHE: + return "VK_SYSTEM_ALLOCATION_SCOPE_CACHE"; + case VK_SYSTEM_ALLOCATION_SCOPE_COMMAND: + return "VK_SYSTEM_ALLOCATION_SCOPE_COMMAND"; + case VK_SYSTEM_ALLOCATION_SCOPE_DEVICE: + return "VK_SYSTEM_ALLOCATION_SCOPE_DEVICE"; + case VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE: + return "VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE"; + case VK_SYSTEM_ALLOCATION_SCOPE_OBJECT: + return "VK_SYSTEM_ALLOCATION_SCOPE_OBJECT"; + default: + return "Unhandled VkSystemAllocationScope"; + } +} + +static inline const char* string_VkInternalAllocationType(VkInternalAllocationType input_value) +{ + switch ((VkInternalAllocationType)input_value) + { + case VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE: + return "VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE"; + default: + return "Unhandled VkInternalAllocationType"; + } +} + +static inline const char* string_VkFormat(VkFormat input_value) +{ + switch ((VkFormat)input_value) + { + case VK_FORMAT_A1R5G5B5_UNORM_PACK16: + return "VK_FORMAT_A1R5G5B5_UNORM_PACK16"; + case VK_FORMAT_A2B10G10R10_SINT_PACK32: + return "VK_FORMAT_A2B10G10R10_SINT_PACK32"; + case VK_FORMAT_A2B10G10R10_SNORM_PACK32: + return "VK_FORMAT_A2B10G10R10_SNORM_PACK32"; + case VK_FORMAT_A2B10G10R10_SSCALED_PACK32: + return "VK_FORMAT_A2B10G10R10_SSCALED_PACK32"; + case VK_FORMAT_A2B10G10R10_UINT_PACK32: + return "VK_FORMAT_A2B10G10R10_UINT_PACK32"; + case VK_FORMAT_A2B10G10R10_UNORM_PACK32: + return "VK_FORMAT_A2B10G10R10_UNORM_PACK32"; + case VK_FORMAT_A2B10G10R10_USCALED_PACK32: + return "VK_FORMAT_A2B10G10R10_USCALED_PACK32"; + case VK_FORMAT_A2R10G10B10_SINT_PACK32: + return "VK_FORMAT_A2R10G10B10_SINT_PACK32"; + case VK_FORMAT_A2R10G10B10_SNORM_PACK32: + return "VK_FORMAT_A2R10G10B10_SNORM_PACK32"; + case VK_FORMAT_A2R10G10B10_SSCALED_PACK32: + return "VK_FORMAT_A2R10G10B10_SSCALED_PACK32"; + case VK_FORMAT_A2R10G10B10_UINT_PACK32: + return "VK_FORMAT_A2R10G10B10_UINT_PACK32"; + case VK_FORMAT_A2R10G10B10_UNORM_PACK32: + return "VK_FORMAT_A2R10G10B10_UNORM_PACK32"; + case VK_FORMAT_A2R10G10B10_USCALED_PACK32: + return "VK_FORMAT_A2R10G10B10_USCALED_PACK32"; + case VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT: + return "VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT"; + case VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT: + return "VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT"; + case VK_FORMAT_A8B8G8R8_SINT_PACK32: + return "VK_FORMAT_A8B8G8R8_SINT_PACK32"; + case VK_FORMAT_A8B8G8R8_SNORM_PACK32: + return "VK_FORMAT_A8B8G8R8_SNORM_PACK32"; + case VK_FORMAT_A8B8G8R8_SRGB_PACK32: + return "VK_FORMAT_A8B8G8R8_SRGB_PACK32"; + case VK_FORMAT_A8B8G8R8_SSCALED_PACK32: + return "VK_FORMAT_A8B8G8R8_SSCALED_PACK32"; + case VK_FORMAT_A8B8G8R8_UINT_PACK32: + return "VK_FORMAT_A8B8G8R8_UINT_PACK32"; + case VK_FORMAT_A8B8G8R8_UNORM_PACK32: + return "VK_FORMAT_A8B8G8R8_UNORM_PACK32"; + case VK_FORMAT_A8B8G8R8_USCALED_PACK32: + return "VK_FORMAT_A8B8G8R8_USCALED_PACK32"; + case VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: + return "VK_FORMAT_ASTC_10x10_SRGB_BLOCK"; + case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: + return "VK_FORMAT_ASTC_10x10_UNORM_BLOCK"; + case VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: + return "VK_FORMAT_ASTC_10x5_SRGB_BLOCK"; + case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: + return "VK_FORMAT_ASTC_10x5_UNORM_BLOCK"; + case VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: + return "VK_FORMAT_ASTC_10x6_SRGB_BLOCK"; + case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: + return "VK_FORMAT_ASTC_10x6_UNORM_BLOCK"; + case VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: + return "VK_FORMAT_ASTC_10x8_SRGB_BLOCK"; + case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: + return "VK_FORMAT_ASTC_10x8_UNORM_BLOCK"; + case VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: + return "VK_FORMAT_ASTC_12x10_SRGB_BLOCK"; + case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: + return "VK_FORMAT_ASTC_12x10_UNORM_BLOCK"; + case VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: + return "VK_FORMAT_ASTC_12x12_SRGB_BLOCK"; + case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: + return "VK_FORMAT_ASTC_12x12_UNORM_BLOCK"; + case VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: + return "VK_FORMAT_ASTC_4x4_SRGB_BLOCK"; + case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: + return "VK_FORMAT_ASTC_4x4_UNORM_BLOCK"; + case VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: + return "VK_FORMAT_ASTC_5x4_SRGB_BLOCK"; + case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: + return "VK_FORMAT_ASTC_5x4_UNORM_BLOCK"; + case VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: + return "VK_FORMAT_ASTC_5x5_SRGB_BLOCK"; + case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: + return "VK_FORMAT_ASTC_5x5_UNORM_BLOCK"; + case VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: + return "VK_FORMAT_ASTC_6x5_SRGB_BLOCK"; + case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: + return "VK_FORMAT_ASTC_6x5_UNORM_BLOCK"; + case VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: + return "VK_FORMAT_ASTC_6x6_SRGB_BLOCK"; + case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: + return "VK_FORMAT_ASTC_6x6_UNORM_BLOCK"; + case VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: + return "VK_FORMAT_ASTC_8x5_SRGB_BLOCK"; + case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: + return "VK_FORMAT_ASTC_8x5_UNORM_BLOCK"; + case VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: + return "VK_FORMAT_ASTC_8x6_SRGB_BLOCK"; + case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: + return "VK_FORMAT_ASTC_8x6_UNORM_BLOCK"; + case VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT: + return "VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT"; + case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: + return "VK_FORMAT_ASTC_8x8_SRGB_BLOCK"; + case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: + return "VK_FORMAT_ASTC_8x8_UNORM_BLOCK"; + case VK_FORMAT_B10G11R11_UFLOAT_PACK32: + return "VK_FORMAT_B10G11R11_UFLOAT_PACK32"; + case VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16: + return "VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16"; + case VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16: + return "VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16"; + case VK_FORMAT_B16G16R16G16_422_UNORM: + return "VK_FORMAT_B16G16R16G16_422_UNORM"; + case VK_FORMAT_B4G4R4A4_UNORM_PACK16: + return "VK_FORMAT_B4G4R4A4_UNORM_PACK16"; + case VK_FORMAT_B5G5R5A1_UNORM_PACK16: + return "VK_FORMAT_B5G5R5A1_UNORM_PACK16"; + case VK_FORMAT_B5G6R5_UNORM_PACK16: + return "VK_FORMAT_B5G6R5_UNORM_PACK16"; + case VK_FORMAT_B8G8R8A8_SINT: + return "VK_FORMAT_B8G8R8A8_SINT"; + case VK_FORMAT_B8G8R8A8_SNORM: + return "VK_FORMAT_B8G8R8A8_SNORM"; + case VK_FORMAT_B8G8R8A8_SRGB: + return "VK_FORMAT_B8G8R8A8_SRGB"; + case VK_FORMAT_B8G8R8A8_SSCALED: + return "VK_FORMAT_B8G8R8A8_SSCALED"; + case VK_FORMAT_B8G8R8A8_UINT: + return "VK_FORMAT_B8G8R8A8_UINT"; + case VK_FORMAT_B8G8R8A8_UNORM: + return "VK_FORMAT_B8G8R8A8_UNORM"; + case VK_FORMAT_B8G8R8A8_USCALED: + return "VK_FORMAT_B8G8R8A8_USCALED"; + case VK_FORMAT_B8G8R8G8_422_UNORM: + return "VK_FORMAT_B8G8R8G8_422_UNORM"; + case VK_FORMAT_B8G8R8_SINT: + return "VK_FORMAT_B8G8R8_SINT"; + case VK_FORMAT_B8G8R8_SNORM: + return "VK_FORMAT_B8G8R8_SNORM"; + case VK_FORMAT_B8G8R8_SRGB: + return "VK_FORMAT_B8G8R8_SRGB"; + case VK_FORMAT_B8G8R8_SSCALED: + return "VK_FORMAT_B8G8R8_SSCALED"; + case VK_FORMAT_B8G8R8_UINT: + return "VK_FORMAT_B8G8R8_UINT"; + case VK_FORMAT_B8G8R8_UNORM: + return "VK_FORMAT_B8G8R8_UNORM"; + case VK_FORMAT_B8G8R8_USCALED: + return "VK_FORMAT_B8G8R8_USCALED"; + case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: + return "VK_FORMAT_BC1_RGBA_SRGB_BLOCK"; + case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: + return "VK_FORMAT_BC1_RGBA_UNORM_BLOCK"; + case VK_FORMAT_BC1_RGB_SRGB_BLOCK: + return "VK_FORMAT_BC1_RGB_SRGB_BLOCK"; + case VK_FORMAT_BC1_RGB_UNORM_BLOCK: + return "VK_FORMAT_BC1_RGB_UNORM_BLOCK"; + case VK_FORMAT_BC2_SRGB_BLOCK: + return "VK_FORMAT_BC2_SRGB_BLOCK"; + case VK_FORMAT_BC2_UNORM_BLOCK: + return "VK_FORMAT_BC2_UNORM_BLOCK"; + case VK_FORMAT_BC3_SRGB_BLOCK: + return "VK_FORMAT_BC3_SRGB_BLOCK"; + case VK_FORMAT_BC3_UNORM_BLOCK: + return "VK_FORMAT_BC3_UNORM_BLOCK"; + case VK_FORMAT_BC4_SNORM_BLOCK: + return "VK_FORMAT_BC4_SNORM_BLOCK"; + case VK_FORMAT_BC4_UNORM_BLOCK: + return "VK_FORMAT_BC4_UNORM_BLOCK"; + case VK_FORMAT_BC5_SNORM_BLOCK: + return "VK_FORMAT_BC5_SNORM_BLOCK"; + case VK_FORMAT_BC5_UNORM_BLOCK: + return "VK_FORMAT_BC5_UNORM_BLOCK"; + case VK_FORMAT_BC6H_SFLOAT_BLOCK: + return "VK_FORMAT_BC6H_SFLOAT_BLOCK"; + case VK_FORMAT_BC6H_UFLOAT_BLOCK: + return "VK_FORMAT_BC6H_UFLOAT_BLOCK"; + case VK_FORMAT_BC7_SRGB_BLOCK: + return "VK_FORMAT_BC7_SRGB_BLOCK"; + case VK_FORMAT_BC7_UNORM_BLOCK: + return "VK_FORMAT_BC7_UNORM_BLOCK"; + case VK_FORMAT_D16_UNORM: + return "VK_FORMAT_D16_UNORM"; + case VK_FORMAT_D16_UNORM_S8_UINT: + return "VK_FORMAT_D16_UNORM_S8_UINT"; + case VK_FORMAT_D24_UNORM_S8_UINT: + return "VK_FORMAT_D24_UNORM_S8_UINT"; + case VK_FORMAT_D32_SFLOAT: + return "VK_FORMAT_D32_SFLOAT"; + case VK_FORMAT_D32_SFLOAT_S8_UINT: + return "VK_FORMAT_D32_SFLOAT_S8_UINT"; + case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: + return "VK_FORMAT_E5B9G9R9_UFLOAT_PACK32"; + case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: + return "VK_FORMAT_EAC_R11G11_SNORM_BLOCK"; + case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: + return "VK_FORMAT_EAC_R11G11_UNORM_BLOCK"; + case VK_FORMAT_EAC_R11_SNORM_BLOCK: + return "VK_FORMAT_EAC_R11_SNORM_BLOCK"; + case VK_FORMAT_EAC_R11_UNORM_BLOCK: + return "VK_FORMAT_EAC_R11_UNORM_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK"; + case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: + return "VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK"; + case VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16: + return "VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16"; + case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16: + return "VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16"; + case VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16: + return "VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16"; + case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16: + return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16"; + case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16: + return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16"; + case VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16: + return "VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16"; + case VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16: + return "VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16"; + case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16: + return "VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16"; + case VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16: + return "VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16"; + case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16: + return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16"; + case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16: + return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16"; + case VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16: + return "VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16"; + case VK_FORMAT_G16B16G16R16_422_UNORM: + return "VK_FORMAT_G16B16G16R16_422_UNORM"; + case VK_FORMAT_G16_B16R16_2PLANE_420_UNORM: + return "VK_FORMAT_G16_B16R16_2PLANE_420_UNORM"; + case VK_FORMAT_G16_B16R16_2PLANE_422_UNORM: + return "VK_FORMAT_G16_B16R16_2PLANE_422_UNORM"; + case VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM: + return "VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM"; + case VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM: + return "VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM"; + case VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM: + return "VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM"; + case VK_FORMAT_G8B8G8R8_422_UNORM: + return "VK_FORMAT_G8B8G8R8_422_UNORM"; + case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: + return "VK_FORMAT_G8_B8R8_2PLANE_420_UNORM"; + case VK_FORMAT_G8_B8R8_2PLANE_422_UNORM: + return "VK_FORMAT_G8_B8R8_2PLANE_422_UNORM"; + case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: + return "VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM"; + case VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM: + return "VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM"; + case VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM: + return "VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM"; + case VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG: + return "VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG"; + case VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG: + return "VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG"; + case VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG: + return "VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG"; + case VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG: + return "VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG"; + case VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG: + return "VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG"; + case VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG: + return "VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG"; + case VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG: + return "VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG"; + case VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG: + return "VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG"; + case VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16: + return "VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16"; + case VK_FORMAT_R10X6G10X6_UNORM_2PACK16: + return "VK_FORMAT_R10X6G10X6_UNORM_2PACK16"; + case VK_FORMAT_R10X6_UNORM_PACK16: + return "VK_FORMAT_R10X6_UNORM_PACK16"; + case VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16: + return "VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16"; + case VK_FORMAT_R12X4G12X4_UNORM_2PACK16: + return "VK_FORMAT_R12X4G12X4_UNORM_2PACK16"; + case VK_FORMAT_R12X4_UNORM_PACK16: + return "VK_FORMAT_R12X4_UNORM_PACK16"; + case VK_FORMAT_R16G16B16A16_SFLOAT: + return "VK_FORMAT_R16G16B16A16_SFLOAT"; + case VK_FORMAT_R16G16B16A16_SINT: + return "VK_FORMAT_R16G16B16A16_SINT"; + case VK_FORMAT_R16G16B16A16_SNORM: + return "VK_FORMAT_R16G16B16A16_SNORM"; + case VK_FORMAT_R16G16B16A16_SSCALED: + return "VK_FORMAT_R16G16B16A16_SSCALED"; + case VK_FORMAT_R16G16B16A16_UINT: + return "VK_FORMAT_R16G16B16A16_UINT"; + case VK_FORMAT_R16G16B16A16_UNORM: + return "VK_FORMAT_R16G16B16A16_UNORM"; + case VK_FORMAT_R16G16B16A16_USCALED: + return "VK_FORMAT_R16G16B16A16_USCALED"; + case VK_FORMAT_R16G16B16_SFLOAT: + return "VK_FORMAT_R16G16B16_SFLOAT"; + case VK_FORMAT_R16G16B16_SINT: + return "VK_FORMAT_R16G16B16_SINT"; + case VK_FORMAT_R16G16B16_SNORM: + return "VK_FORMAT_R16G16B16_SNORM"; + case VK_FORMAT_R16G16B16_SSCALED: + return "VK_FORMAT_R16G16B16_SSCALED"; + case VK_FORMAT_R16G16B16_UINT: + return "VK_FORMAT_R16G16B16_UINT"; + case VK_FORMAT_R16G16B16_UNORM: + return "VK_FORMAT_R16G16B16_UNORM"; + case VK_FORMAT_R16G16B16_USCALED: + return "VK_FORMAT_R16G16B16_USCALED"; + case VK_FORMAT_R16G16_SFLOAT: + return "VK_FORMAT_R16G16_SFLOAT"; + case VK_FORMAT_R16G16_SINT: + return "VK_FORMAT_R16G16_SINT"; + case VK_FORMAT_R16G16_SNORM: + return "VK_FORMAT_R16G16_SNORM"; + case VK_FORMAT_R16G16_SSCALED: + return "VK_FORMAT_R16G16_SSCALED"; + case VK_FORMAT_R16G16_UINT: + return "VK_FORMAT_R16G16_UINT"; + case VK_FORMAT_R16G16_UNORM: + return "VK_FORMAT_R16G16_UNORM"; + case VK_FORMAT_R16G16_USCALED: + return "VK_FORMAT_R16G16_USCALED"; + case VK_FORMAT_R16_SFLOAT: + return "VK_FORMAT_R16_SFLOAT"; + case VK_FORMAT_R16_SINT: + return "VK_FORMAT_R16_SINT"; + case VK_FORMAT_R16_SNORM: + return "VK_FORMAT_R16_SNORM"; + case VK_FORMAT_R16_SSCALED: + return "VK_FORMAT_R16_SSCALED"; + case VK_FORMAT_R16_UINT: + return "VK_FORMAT_R16_UINT"; + case VK_FORMAT_R16_UNORM: + return "VK_FORMAT_R16_UNORM"; + case VK_FORMAT_R16_USCALED: + return "VK_FORMAT_R16_USCALED"; + case VK_FORMAT_R32G32B32A32_SFLOAT: + return "VK_FORMAT_R32G32B32A32_SFLOAT"; + case VK_FORMAT_R32G32B32A32_SINT: + return "VK_FORMAT_R32G32B32A32_SINT"; + case VK_FORMAT_R32G32B32A32_UINT: + return "VK_FORMAT_R32G32B32A32_UINT"; + case VK_FORMAT_R32G32B32_SFLOAT: + return "VK_FORMAT_R32G32B32_SFLOAT"; + case VK_FORMAT_R32G32B32_SINT: + return "VK_FORMAT_R32G32B32_SINT"; + case VK_FORMAT_R32G32B32_UINT: + return "VK_FORMAT_R32G32B32_UINT"; + case VK_FORMAT_R32G32_SFLOAT: + return "VK_FORMAT_R32G32_SFLOAT"; + case VK_FORMAT_R32G32_SINT: + return "VK_FORMAT_R32G32_SINT"; + case VK_FORMAT_R32G32_UINT: + return "VK_FORMAT_R32G32_UINT"; + case VK_FORMAT_R32_SFLOAT: + return "VK_FORMAT_R32_SFLOAT"; + case VK_FORMAT_R32_SINT: + return "VK_FORMAT_R32_SINT"; + case VK_FORMAT_R32_UINT: + return "VK_FORMAT_R32_UINT"; + case VK_FORMAT_R4G4B4A4_UNORM_PACK16: + return "VK_FORMAT_R4G4B4A4_UNORM_PACK16"; + case VK_FORMAT_R4G4_UNORM_PACK8: + return "VK_FORMAT_R4G4_UNORM_PACK8"; + case VK_FORMAT_R5G5B5A1_UNORM_PACK16: + return "VK_FORMAT_R5G5B5A1_UNORM_PACK16"; + case VK_FORMAT_R5G6B5_UNORM_PACK16: + return "VK_FORMAT_R5G6B5_UNORM_PACK16"; + case VK_FORMAT_R64G64B64A64_SFLOAT: + return "VK_FORMAT_R64G64B64A64_SFLOAT"; + case VK_FORMAT_R64G64B64A64_SINT: + return "VK_FORMAT_R64G64B64A64_SINT"; + case VK_FORMAT_R64G64B64A64_UINT: + return "VK_FORMAT_R64G64B64A64_UINT"; + case VK_FORMAT_R64G64B64_SFLOAT: + return "VK_FORMAT_R64G64B64_SFLOAT"; + case VK_FORMAT_R64G64B64_SINT: + return "VK_FORMAT_R64G64B64_SINT"; + case VK_FORMAT_R64G64B64_UINT: + return "VK_FORMAT_R64G64B64_UINT"; + case VK_FORMAT_R64G64_SFLOAT: + return "VK_FORMAT_R64G64_SFLOAT"; + case VK_FORMAT_R64G64_SINT: + return "VK_FORMAT_R64G64_SINT"; + case VK_FORMAT_R64G64_UINT: + return "VK_FORMAT_R64G64_UINT"; + case VK_FORMAT_R64_SFLOAT: + return "VK_FORMAT_R64_SFLOAT"; + case VK_FORMAT_R64_SINT: + return "VK_FORMAT_R64_SINT"; + case VK_FORMAT_R64_UINT: + return "VK_FORMAT_R64_UINT"; + case VK_FORMAT_R8G8B8A8_SINT: + return "VK_FORMAT_R8G8B8A8_SINT"; + case VK_FORMAT_R8G8B8A8_SNORM: + return "VK_FORMAT_R8G8B8A8_SNORM"; + case VK_FORMAT_R8G8B8A8_SRGB: + return "VK_FORMAT_R8G8B8A8_SRGB"; + case VK_FORMAT_R8G8B8A8_SSCALED: + return "VK_FORMAT_R8G8B8A8_SSCALED"; + case VK_FORMAT_R8G8B8A8_UINT: + return "VK_FORMAT_R8G8B8A8_UINT"; + case VK_FORMAT_R8G8B8A8_UNORM: + return "VK_FORMAT_R8G8B8A8_UNORM"; + case VK_FORMAT_R8G8B8A8_USCALED: + return "VK_FORMAT_R8G8B8A8_USCALED"; + case VK_FORMAT_R8G8B8_SINT: + return "VK_FORMAT_R8G8B8_SINT"; + case VK_FORMAT_R8G8B8_SNORM: + return "VK_FORMAT_R8G8B8_SNORM"; + case VK_FORMAT_R8G8B8_SRGB: + return "VK_FORMAT_R8G8B8_SRGB"; + case VK_FORMAT_R8G8B8_SSCALED: + return "VK_FORMAT_R8G8B8_SSCALED"; + case VK_FORMAT_R8G8B8_UINT: + return "VK_FORMAT_R8G8B8_UINT"; + case VK_FORMAT_R8G8B8_UNORM: + return "VK_FORMAT_R8G8B8_UNORM"; + case VK_FORMAT_R8G8B8_USCALED: + return "VK_FORMAT_R8G8B8_USCALED"; + case VK_FORMAT_R8G8_SINT: + return "VK_FORMAT_R8G8_SINT"; + case VK_FORMAT_R8G8_SNORM: + return "VK_FORMAT_R8G8_SNORM"; + case VK_FORMAT_R8G8_SRGB: + return "VK_FORMAT_R8G8_SRGB"; + case VK_FORMAT_R8G8_SSCALED: + return "VK_FORMAT_R8G8_SSCALED"; + case VK_FORMAT_R8G8_UINT: + return "VK_FORMAT_R8G8_UINT"; + case VK_FORMAT_R8G8_UNORM: + return "VK_FORMAT_R8G8_UNORM"; + case VK_FORMAT_R8G8_USCALED: + return "VK_FORMAT_R8G8_USCALED"; + case VK_FORMAT_R8_SINT: + return "VK_FORMAT_R8_SINT"; + case VK_FORMAT_R8_SNORM: + return "VK_FORMAT_R8_SNORM"; + case VK_FORMAT_R8_SRGB: + return "VK_FORMAT_R8_SRGB"; + case VK_FORMAT_R8_SSCALED: + return "VK_FORMAT_R8_SSCALED"; + case VK_FORMAT_R8_UINT: + return "VK_FORMAT_R8_UINT"; + case VK_FORMAT_R8_UNORM: + return "VK_FORMAT_R8_UNORM"; + case VK_FORMAT_R8_USCALED: + return "VK_FORMAT_R8_USCALED"; + case VK_FORMAT_S8_UINT: + return "VK_FORMAT_S8_UINT"; + case VK_FORMAT_UNDEFINED: + return "VK_FORMAT_UNDEFINED"; + case VK_FORMAT_X8_D24_UNORM_PACK32: + return "VK_FORMAT_X8_D24_UNORM_PACK32"; + default: + return "Unhandled VkFormat"; + } +} + +static inline const char* string_VkFormatFeatureFlagBits(VkFormatFeatureFlagBits input_value) +{ + switch ((VkFormatFeatureFlagBits)input_value) + { + case VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR: + return "VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR"; + case VK_FORMAT_FEATURE_BLIT_DST_BIT: + return "VK_FORMAT_FEATURE_BLIT_DST_BIT"; + case VK_FORMAT_FEATURE_BLIT_SRC_BIT: + return "VK_FORMAT_FEATURE_BLIT_SRC_BIT"; + case VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT: + return "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT"; + case VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT: + return "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT"; + case VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT: + return "VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT"; + case VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT: + return "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT"; + case VK_FORMAT_FEATURE_DISJOINT_BIT: + return "VK_FORMAT_FEATURE_DISJOINT_BIT"; + case VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT: + return "VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT"; + case VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR: + return "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR"; + case VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT: + return "VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT"; + case VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT: + return "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT"; + case VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT: + return "VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT"; + case VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT: + return "VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT"; + case VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT: + return "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT"; + case VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT: + return "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT"; + case VK_FORMAT_FEATURE_TRANSFER_DST_BIT: + return "VK_FORMAT_FEATURE_TRANSFER_DST_BIT"; + case VK_FORMAT_FEATURE_TRANSFER_SRC_BIT: + return "VK_FORMAT_FEATURE_TRANSFER_SRC_BIT"; + case VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT: + return "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT"; + case VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT: + return "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT"; + default: + return "Unhandled VkFormatFeatureFlagBits"; + } +} + +static inline std::string string_VkFormatFeatureFlags(VkFormatFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkFormatFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkFormatFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkImageCreateFlagBits(VkImageCreateFlagBits input_value) +{ + switch ((VkImageCreateFlagBits)input_value) + { + case VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT: + return "VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT"; + case VK_IMAGE_CREATE_ALIAS_BIT: + return "VK_IMAGE_CREATE_ALIAS_BIT"; + case VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT: + return "VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT"; + case VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV: + return "VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV"; + case VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT: + return "VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT"; + case VK_IMAGE_CREATE_DISJOINT_BIT: + return "VK_IMAGE_CREATE_DISJOINT_BIT"; + case VK_IMAGE_CREATE_EXTENDED_USAGE_BIT: + return "VK_IMAGE_CREATE_EXTENDED_USAGE_BIT"; + case VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT: + return "VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT"; + case VK_IMAGE_CREATE_PROTECTED_BIT: + return "VK_IMAGE_CREATE_PROTECTED_BIT"; + case VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT: + return "VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT"; + case VK_IMAGE_CREATE_SPARSE_ALIASED_BIT: + return "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT"; + case VK_IMAGE_CREATE_SPARSE_BINDING_BIT: + return "VK_IMAGE_CREATE_SPARSE_BINDING_BIT"; + case VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT: + return "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT"; + case VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT: + return "VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT"; + case VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT: + return "VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT"; + default: + return "Unhandled VkImageCreateFlagBits"; + } +} + +static inline std::string string_VkImageCreateFlags(VkImageCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkImageCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkImageCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSampleCountFlagBits(VkSampleCountFlagBits input_value) +{ + switch ((VkSampleCountFlagBits)input_value) + { + case VK_SAMPLE_COUNT_16_BIT: + return "VK_SAMPLE_COUNT_16_BIT"; + case VK_SAMPLE_COUNT_1_BIT: + return "VK_SAMPLE_COUNT_1_BIT"; + case VK_SAMPLE_COUNT_2_BIT: + return "VK_SAMPLE_COUNT_2_BIT"; + case VK_SAMPLE_COUNT_32_BIT: + return "VK_SAMPLE_COUNT_32_BIT"; + case VK_SAMPLE_COUNT_4_BIT: + return "VK_SAMPLE_COUNT_4_BIT"; + case VK_SAMPLE_COUNT_64_BIT: + return "VK_SAMPLE_COUNT_64_BIT"; + case VK_SAMPLE_COUNT_8_BIT: + return "VK_SAMPLE_COUNT_8_BIT"; + default: + return "Unhandled VkSampleCountFlagBits"; + } +} + +static inline std::string string_VkSampleCountFlags(VkSampleCountFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSampleCountFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSampleCountFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkImageTiling(VkImageTiling input_value) +{ + switch ((VkImageTiling)input_value) + { + case VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT: + return "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT"; + case VK_IMAGE_TILING_LINEAR: + return "VK_IMAGE_TILING_LINEAR"; + case VK_IMAGE_TILING_OPTIMAL: + return "VK_IMAGE_TILING_OPTIMAL"; + default: + return "Unhandled VkImageTiling"; + } +} + +static inline const char* string_VkImageType(VkImageType input_value) +{ + switch ((VkImageType)input_value) + { + case VK_IMAGE_TYPE_1D: + return "VK_IMAGE_TYPE_1D"; + case VK_IMAGE_TYPE_2D: + return "VK_IMAGE_TYPE_2D"; + case VK_IMAGE_TYPE_3D: + return "VK_IMAGE_TYPE_3D"; + default: + return "Unhandled VkImageType"; + } +} + +static inline const char* string_VkImageUsageFlagBits(VkImageUsageFlagBits input_value) +{ + switch ((VkImageUsageFlagBits)input_value) + { + case VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT: + return "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT"; + case VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT: + return "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT"; + case VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT: + return "VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT"; + case VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT: + return "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"; + case VK_IMAGE_USAGE_SAMPLED_BIT: + return "VK_IMAGE_USAGE_SAMPLED_BIT"; + case VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV: + return "VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV"; + case VK_IMAGE_USAGE_STORAGE_BIT: + return "VK_IMAGE_USAGE_STORAGE_BIT"; + case VK_IMAGE_USAGE_TRANSFER_DST_BIT: + return "VK_IMAGE_USAGE_TRANSFER_DST_BIT"; + case VK_IMAGE_USAGE_TRANSFER_SRC_BIT: + return "VK_IMAGE_USAGE_TRANSFER_SRC_BIT"; + case VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT: + return "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT"; + default: + return "Unhandled VkImageUsageFlagBits"; + } +} + +static inline std::string string_VkImageUsageFlags(VkImageUsageFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkImageUsageFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkImageUsageFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkMemoryHeapFlagBits(VkMemoryHeapFlagBits input_value) +{ + switch ((VkMemoryHeapFlagBits)input_value) + { + case VK_MEMORY_HEAP_DEVICE_LOCAL_BIT: + return "VK_MEMORY_HEAP_DEVICE_LOCAL_BIT"; + case VK_MEMORY_HEAP_MULTI_INSTANCE_BIT: + return "VK_MEMORY_HEAP_MULTI_INSTANCE_BIT"; + default: + return "Unhandled VkMemoryHeapFlagBits"; + } +} + +static inline std::string string_VkMemoryHeapFlags(VkMemoryHeapFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkMemoryHeapFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkMemoryHeapFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkMemoryPropertyFlagBits(VkMemoryPropertyFlagBits input_value) +{ + switch ((VkMemoryPropertyFlagBits)input_value) + { + case VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD: + return "VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD"; + case VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT: + return "VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT"; + case VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD: + return "VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD"; + case VK_MEMORY_PROPERTY_HOST_CACHED_BIT: + return "VK_MEMORY_PROPERTY_HOST_CACHED_BIT"; + case VK_MEMORY_PROPERTY_HOST_COHERENT_BIT: + return "VK_MEMORY_PROPERTY_HOST_COHERENT_BIT"; + case VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT: + return "VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT"; + case VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT: + return "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT"; + case VK_MEMORY_PROPERTY_PROTECTED_BIT: + return "VK_MEMORY_PROPERTY_PROTECTED_BIT"; + default: + return "Unhandled VkMemoryPropertyFlagBits"; + } +} + +static inline std::string string_VkMemoryPropertyFlags(VkMemoryPropertyFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkMemoryPropertyFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkMemoryPropertyFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPhysicalDeviceType(VkPhysicalDeviceType input_value) +{ + switch ((VkPhysicalDeviceType)input_value) + { + case VK_PHYSICAL_DEVICE_TYPE_CPU: + return "VK_PHYSICAL_DEVICE_TYPE_CPU"; + case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: + return "VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU"; + case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: + return "VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU"; + case VK_PHYSICAL_DEVICE_TYPE_OTHER: + return "VK_PHYSICAL_DEVICE_TYPE_OTHER"; + case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: + return "VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU"; + default: + return "Unhandled VkPhysicalDeviceType"; + } +} + +static inline const char* string_VkQueueFlagBits(VkQueueFlagBits input_value) +{ + switch ((VkQueueFlagBits)input_value) + { + case VK_QUEUE_COMPUTE_BIT: + return "VK_QUEUE_COMPUTE_BIT"; + case VK_QUEUE_GRAPHICS_BIT: + return "VK_QUEUE_GRAPHICS_BIT"; + case VK_QUEUE_PROTECTED_BIT: + return "VK_QUEUE_PROTECTED_BIT"; + case VK_QUEUE_SPARSE_BINDING_BIT: + return "VK_QUEUE_SPARSE_BINDING_BIT"; + case VK_QUEUE_TRANSFER_BIT: + return "VK_QUEUE_TRANSFER_BIT"; + default: + return "Unhandled VkQueueFlagBits"; + } +} + +static inline std::string string_VkQueueFlags(VkQueueFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkQueueFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkQueueFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkDeviceQueueCreateFlagBits(VkDeviceQueueCreateFlagBits input_value) +{ + switch ((VkDeviceQueueCreateFlagBits)input_value) + { + case VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT: + return "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT"; + default: + return "Unhandled VkDeviceQueueCreateFlagBits"; + } +} + +static inline std::string string_VkDeviceQueueCreateFlags(VkDeviceQueueCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDeviceQueueCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDeviceQueueCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPipelineStageFlagBits(VkPipelineStageFlagBits input_value) +{ + switch ((VkPipelineStageFlagBits)input_value) + { + case VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR: + return "VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR"; + case VK_PIPELINE_STAGE_ALL_COMMANDS_BIT: + return "VK_PIPELINE_STAGE_ALL_COMMANDS_BIT"; + case VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT: + return "VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT"; + case VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: + return "VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT"; + case VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: + return "VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT"; + case VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV: + return "VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV"; + case VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: + return "VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT"; + case VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT: + return "VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT"; + case VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: + return "VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT"; + case VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: + return "VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT"; + case VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT: + return "VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT"; + case VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: + return "VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT"; + case VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: + return "VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT"; + case VK_PIPELINE_STAGE_HOST_BIT: + return "VK_PIPELINE_STAGE_HOST_BIT"; + case VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: + return "VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT"; + case VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV: + return "VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV"; + case VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR: + return "VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR"; + case VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV: + return "VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV"; + case VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV: + return "VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV"; + case VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: + return "VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT"; + case VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: + return "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT"; + case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: + return "VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT"; + case VK_PIPELINE_STAGE_TRANSFER_BIT: + return "VK_PIPELINE_STAGE_TRANSFER_BIT"; + case VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT: + return "VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT"; + case VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: + return "VK_PIPELINE_STAGE_VERTEX_INPUT_BIT"; + case VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: + return "VK_PIPELINE_STAGE_VERTEX_SHADER_BIT"; + default: + return "Unhandled VkPipelineStageFlagBits"; + } +} + +static inline std::string string_VkPipelineStageFlags(VkPipelineStageFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPipelineStageFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPipelineStageFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSparseMemoryBindFlagBits(VkSparseMemoryBindFlagBits input_value) +{ + switch ((VkSparseMemoryBindFlagBits)input_value) + { + case VK_SPARSE_MEMORY_BIND_METADATA_BIT: + return "VK_SPARSE_MEMORY_BIND_METADATA_BIT"; + default: + return "Unhandled VkSparseMemoryBindFlagBits"; + } +} + +static inline std::string string_VkSparseMemoryBindFlags(VkSparseMemoryBindFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSparseMemoryBindFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSparseMemoryBindFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSparseImageFormatFlagBits(VkSparseImageFormatFlagBits input_value) +{ + switch ((VkSparseImageFormatFlagBits)input_value) + { + case VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT: + return "VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT"; + case VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT: + return "VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT"; + case VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT: + return "VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT"; + default: + return "Unhandled VkSparseImageFormatFlagBits"; + } +} + +static inline std::string string_VkSparseImageFormatFlags(VkSparseImageFormatFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSparseImageFormatFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSparseImageFormatFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkFenceCreateFlagBits(VkFenceCreateFlagBits input_value) +{ + switch ((VkFenceCreateFlagBits)input_value) + { + case VK_FENCE_CREATE_SIGNALED_BIT: + return "VK_FENCE_CREATE_SIGNALED_BIT"; + default: + return "Unhandled VkFenceCreateFlagBits"; + } +} + +static inline std::string string_VkFenceCreateFlags(VkFenceCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkFenceCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkFenceCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkQueryPipelineStatisticFlagBits(VkQueryPipelineStatisticFlagBits input_value) +{ + switch ((VkQueryPipelineStatisticFlagBits)input_value) + { + case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT"; + case VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT: + return "VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT"; + default: + return "Unhandled VkQueryPipelineStatisticFlagBits"; + } +} + +static inline std::string string_VkQueryPipelineStatisticFlags(VkQueryPipelineStatisticFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkQueryPipelineStatisticFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkQueryPipelineStatisticFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkQueryType(VkQueryType input_value) +{ + switch ((VkQueryType)input_value) + { + case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: + return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR"; + case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV: + return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV"; + case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: + return "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR"; + case VK_QUERY_TYPE_OCCLUSION: + return "VK_QUERY_TYPE_OCCLUSION"; + case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: + return "VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL"; + case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: + return "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"; + case VK_QUERY_TYPE_PIPELINE_STATISTICS: + return "VK_QUERY_TYPE_PIPELINE_STATISTICS"; + case VK_QUERY_TYPE_TIMESTAMP: + return "VK_QUERY_TYPE_TIMESTAMP"; + case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: + return "VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT"; + default: + return "Unhandled VkQueryType"; + } +} + +static inline const char* string_VkQueryResultFlagBits(VkQueryResultFlagBits input_value) +{ + switch ((VkQueryResultFlagBits)input_value) + { + case VK_QUERY_RESULT_64_BIT: + return "VK_QUERY_RESULT_64_BIT"; + case VK_QUERY_RESULT_PARTIAL_BIT: + return "VK_QUERY_RESULT_PARTIAL_BIT"; + case VK_QUERY_RESULT_WAIT_BIT: + return "VK_QUERY_RESULT_WAIT_BIT"; + case VK_QUERY_RESULT_WITH_AVAILABILITY_BIT: + return "VK_QUERY_RESULT_WITH_AVAILABILITY_BIT"; + default: + return "Unhandled VkQueryResultFlagBits"; + } +} + +static inline std::string string_VkQueryResultFlags(VkQueryResultFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkQueryResultFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkQueryResultFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkBufferCreateFlagBits(VkBufferCreateFlagBits input_value) +{ + switch ((VkBufferCreateFlagBits)input_value) + { + case VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT: + return "VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT"; + case VK_BUFFER_CREATE_PROTECTED_BIT: + return "VK_BUFFER_CREATE_PROTECTED_BIT"; + case VK_BUFFER_CREATE_SPARSE_ALIASED_BIT: + return "VK_BUFFER_CREATE_SPARSE_ALIASED_BIT"; + case VK_BUFFER_CREATE_SPARSE_BINDING_BIT: + return "VK_BUFFER_CREATE_SPARSE_BINDING_BIT"; + case VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT: + return "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT"; + default: + return "Unhandled VkBufferCreateFlagBits"; + } +} + +static inline std::string string_VkBufferCreateFlags(VkBufferCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkBufferCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkBufferCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkBufferUsageFlagBits(VkBufferUsageFlagBits input_value) +{ + switch ((VkBufferUsageFlagBits)input_value) + { + case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR: + return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR"; + case VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR: + return "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR"; + case VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT: + return "VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT"; + case VK_BUFFER_USAGE_INDEX_BUFFER_BIT: + return "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"; + case VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT: + return "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"; + case VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR: + return "VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR"; + case VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT: + return "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT"; + case VK_BUFFER_USAGE_STORAGE_BUFFER_BIT: + return "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"; + case VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT: + return "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"; + case VK_BUFFER_USAGE_TRANSFER_DST_BIT: + return "VK_BUFFER_USAGE_TRANSFER_DST_BIT"; + case VK_BUFFER_USAGE_TRANSFER_SRC_BIT: + return "VK_BUFFER_USAGE_TRANSFER_SRC_BIT"; + case VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT: + return "VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT"; + case VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT: + return "VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT"; + case VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT: + return "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"; + case VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT: + return "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"; + case VK_BUFFER_USAGE_VERTEX_BUFFER_BIT: + return "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"; + default: + return "Unhandled VkBufferUsageFlagBits"; + } +} + +static inline std::string string_VkBufferUsageFlags(VkBufferUsageFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkBufferUsageFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkBufferUsageFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSharingMode(VkSharingMode input_value) +{ + switch ((VkSharingMode)input_value) + { + case VK_SHARING_MODE_CONCURRENT: + return "VK_SHARING_MODE_CONCURRENT"; + case VK_SHARING_MODE_EXCLUSIVE: + return "VK_SHARING_MODE_EXCLUSIVE"; + default: + return "Unhandled VkSharingMode"; + } +} + +static inline const char* string_VkComponentSwizzle(VkComponentSwizzle input_value) +{ + switch ((VkComponentSwizzle)input_value) + { + case VK_COMPONENT_SWIZZLE_A: + return "VK_COMPONENT_SWIZZLE_A"; + case VK_COMPONENT_SWIZZLE_B: + return "VK_COMPONENT_SWIZZLE_B"; + case VK_COMPONENT_SWIZZLE_G: + return "VK_COMPONENT_SWIZZLE_G"; + case VK_COMPONENT_SWIZZLE_IDENTITY: + return "VK_COMPONENT_SWIZZLE_IDENTITY"; + case VK_COMPONENT_SWIZZLE_ONE: + return "VK_COMPONENT_SWIZZLE_ONE"; + case VK_COMPONENT_SWIZZLE_R: + return "VK_COMPONENT_SWIZZLE_R"; + case VK_COMPONENT_SWIZZLE_ZERO: + return "VK_COMPONENT_SWIZZLE_ZERO"; + default: + return "Unhandled VkComponentSwizzle"; + } +} + +static inline const char* string_VkImageViewCreateFlagBits(VkImageViewCreateFlagBits input_value) +{ + switch ((VkImageViewCreateFlagBits)input_value) + { + case VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT: + return "VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT"; + case VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT: + return "VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT"; + default: + return "Unhandled VkImageViewCreateFlagBits"; + } +} + +static inline std::string string_VkImageViewCreateFlags(VkImageViewCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkImageViewCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkImageViewCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkImageViewType(VkImageViewType input_value) +{ + switch ((VkImageViewType)input_value) + { + case VK_IMAGE_VIEW_TYPE_1D: + return "VK_IMAGE_VIEW_TYPE_1D"; + case VK_IMAGE_VIEW_TYPE_1D_ARRAY: + return "VK_IMAGE_VIEW_TYPE_1D_ARRAY"; + case VK_IMAGE_VIEW_TYPE_2D: + return "VK_IMAGE_VIEW_TYPE_2D"; + case VK_IMAGE_VIEW_TYPE_2D_ARRAY: + return "VK_IMAGE_VIEW_TYPE_2D_ARRAY"; + case VK_IMAGE_VIEW_TYPE_3D: + return "VK_IMAGE_VIEW_TYPE_3D"; + case VK_IMAGE_VIEW_TYPE_CUBE: + return "VK_IMAGE_VIEW_TYPE_CUBE"; + case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: + return "VK_IMAGE_VIEW_TYPE_CUBE_ARRAY"; + default: + return "Unhandled VkImageViewType"; + } +} + +static inline const char* string_VkPipelineCacheCreateFlagBits(VkPipelineCacheCreateFlagBits input_value) +{ + switch ((VkPipelineCacheCreateFlagBits)input_value) + { + case VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT: + return "VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT"; + default: + return "Unhandled VkPipelineCacheCreateFlagBits"; + } +} + +static inline std::string string_VkPipelineCacheCreateFlags(VkPipelineCacheCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPipelineCacheCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPipelineCacheCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkBlendFactor(VkBlendFactor input_value) +{ + switch ((VkBlendFactor)input_value) + { + case VK_BLEND_FACTOR_CONSTANT_ALPHA: + return "VK_BLEND_FACTOR_CONSTANT_ALPHA"; + case VK_BLEND_FACTOR_CONSTANT_COLOR: + return "VK_BLEND_FACTOR_CONSTANT_COLOR"; + case VK_BLEND_FACTOR_DST_ALPHA: + return "VK_BLEND_FACTOR_DST_ALPHA"; + case VK_BLEND_FACTOR_DST_COLOR: + return "VK_BLEND_FACTOR_DST_COLOR"; + case VK_BLEND_FACTOR_ONE: + return "VK_BLEND_FACTOR_ONE"; + case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA: + return "VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA"; + case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR: + return "VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR"; + case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA: + return "VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA"; + case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR: + return "VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR"; + case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA: + return "VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA"; + case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR: + return "VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR"; + case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA: + return "VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA"; + case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR: + return "VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR"; + case VK_BLEND_FACTOR_SRC1_ALPHA: + return "VK_BLEND_FACTOR_SRC1_ALPHA"; + case VK_BLEND_FACTOR_SRC1_COLOR: + return "VK_BLEND_FACTOR_SRC1_COLOR"; + case VK_BLEND_FACTOR_SRC_ALPHA: + return "VK_BLEND_FACTOR_SRC_ALPHA"; + case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE: + return "VK_BLEND_FACTOR_SRC_ALPHA_SATURATE"; + case VK_BLEND_FACTOR_SRC_COLOR: + return "VK_BLEND_FACTOR_SRC_COLOR"; + case VK_BLEND_FACTOR_ZERO: + return "VK_BLEND_FACTOR_ZERO"; + default: + return "Unhandled VkBlendFactor"; + } +} + +static inline const char* string_VkBlendOp(VkBlendOp input_value) +{ + switch ((VkBlendOp)input_value) + { + case VK_BLEND_OP_ADD: + return "VK_BLEND_OP_ADD"; + case VK_BLEND_OP_BLUE_EXT: + return "VK_BLEND_OP_BLUE_EXT"; + case VK_BLEND_OP_COLORBURN_EXT: + return "VK_BLEND_OP_COLORBURN_EXT"; + case VK_BLEND_OP_COLORDODGE_EXT: + return "VK_BLEND_OP_COLORDODGE_EXT"; + case VK_BLEND_OP_CONTRAST_EXT: + return "VK_BLEND_OP_CONTRAST_EXT"; + case VK_BLEND_OP_DARKEN_EXT: + return "VK_BLEND_OP_DARKEN_EXT"; + case VK_BLEND_OP_DIFFERENCE_EXT: + return "VK_BLEND_OP_DIFFERENCE_EXT"; + case VK_BLEND_OP_DST_ATOP_EXT: + return "VK_BLEND_OP_DST_ATOP_EXT"; + case VK_BLEND_OP_DST_EXT: + return "VK_BLEND_OP_DST_EXT"; + case VK_BLEND_OP_DST_IN_EXT: + return "VK_BLEND_OP_DST_IN_EXT"; + case VK_BLEND_OP_DST_OUT_EXT: + return "VK_BLEND_OP_DST_OUT_EXT"; + case VK_BLEND_OP_DST_OVER_EXT: + return "VK_BLEND_OP_DST_OVER_EXT"; + case VK_BLEND_OP_EXCLUSION_EXT: + return "VK_BLEND_OP_EXCLUSION_EXT"; + case VK_BLEND_OP_GREEN_EXT: + return "VK_BLEND_OP_GREEN_EXT"; + case VK_BLEND_OP_HARDLIGHT_EXT: + return "VK_BLEND_OP_HARDLIGHT_EXT"; + case VK_BLEND_OP_HARDMIX_EXT: + return "VK_BLEND_OP_HARDMIX_EXT"; + case VK_BLEND_OP_HSL_COLOR_EXT: + return "VK_BLEND_OP_HSL_COLOR_EXT"; + case VK_BLEND_OP_HSL_HUE_EXT: + return "VK_BLEND_OP_HSL_HUE_EXT"; + case VK_BLEND_OP_HSL_LUMINOSITY_EXT: + return "VK_BLEND_OP_HSL_LUMINOSITY_EXT"; + case VK_BLEND_OP_HSL_SATURATION_EXT: + return "VK_BLEND_OP_HSL_SATURATION_EXT"; + case VK_BLEND_OP_INVERT_EXT: + return "VK_BLEND_OP_INVERT_EXT"; + case VK_BLEND_OP_INVERT_OVG_EXT: + return "VK_BLEND_OP_INVERT_OVG_EXT"; + case VK_BLEND_OP_INVERT_RGB_EXT: + return "VK_BLEND_OP_INVERT_RGB_EXT"; + case VK_BLEND_OP_LIGHTEN_EXT: + return "VK_BLEND_OP_LIGHTEN_EXT"; + case VK_BLEND_OP_LINEARBURN_EXT: + return "VK_BLEND_OP_LINEARBURN_EXT"; + case VK_BLEND_OP_LINEARDODGE_EXT: + return "VK_BLEND_OP_LINEARDODGE_EXT"; + case VK_BLEND_OP_LINEARLIGHT_EXT: + return "VK_BLEND_OP_LINEARLIGHT_EXT"; + case VK_BLEND_OP_MAX: + return "VK_BLEND_OP_MAX"; + case VK_BLEND_OP_MIN: + return "VK_BLEND_OP_MIN"; + case VK_BLEND_OP_MINUS_CLAMPED_EXT: + return "VK_BLEND_OP_MINUS_CLAMPED_EXT"; + case VK_BLEND_OP_MINUS_EXT: + return "VK_BLEND_OP_MINUS_EXT"; + case VK_BLEND_OP_MULTIPLY_EXT: + return "VK_BLEND_OP_MULTIPLY_EXT"; + case VK_BLEND_OP_OVERLAY_EXT: + return "VK_BLEND_OP_OVERLAY_EXT"; + case VK_BLEND_OP_PINLIGHT_EXT: + return "VK_BLEND_OP_PINLIGHT_EXT"; + case VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT: + return "VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT"; + case VK_BLEND_OP_PLUS_CLAMPED_EXT: + return "VK_BLEND_OP_PLUS_CLAMPED_EXT"; + case VK_BLEND_OP_PLUS_DARKER_EXT: + return "VK_BLEND_OP_PLUS_DARKER_EXT"; + case VK_BLEND_OP_PLUS_EXT: + return "VK_BLEND_OP_PLUS_EXT"; + case VK_BLEND_OP_RED_EXT: + return "VK_BLEND_OP_RED_EXT"; + case VK_BLEND_OP_REVERSE_SUBTRACT: + return "VK_BLEND_OP_REVERSE_SUBTRACT"; + case VK_BLEND_OP_SCREEN_EXT: + return "VK_BLEND_OP_SCREEN_EXT"; + case VK_BLEND_OP_SOFTLIGHT_EXT: + return "VK_BLEND_OP_SOFTLIGHT_EXT"; + case VK_BLEND_OP_SRC_ATOP_EXT: + return "VK_BLEND_OP_SRC_ATOP_EXT"; + case VK_BLEND_OP_SRC_EXT: + return "VK_BLEND_OP_SRC_EXT"; + case VK_BLEND_OP_SRC_IN_EXT: + return "VK_BLEND_OP_SRC_IN_EXT"; + case VK_BLEND_OP_SRC_OUT_EXT: + return "VK_BLEND_OP_SRC_OUT_EXT"; + case VK_BLEND_OP_SRC_OVER_EXT: + return "VK_BLEND_OP_SRC_OVER_EXT"; + case VK_BLEND_OP_SUBTRACT: + return "VK_BLEND_OP_SUBTRACT"; + case VK_BLEND_OP_VIVIDLIGHT_EXT: + return "VK_BLEND_OP_VIVIDLIGHT_EXT"; + case VK_BLEND_OP_XOR_EXT: + return "VK_BLEND_OP_XOR_EXT"; + case VK_BLEND_OP_ZERO_EXT: + return "VK_BLEND_OP_ZERO_EXT"; + default: + return "Unhandled VkBlendOp"; + } +} + +static inline const char* string_VkColorComponentFlagBits(VkColorComponentFlagBits input_value) +{ + switch ((VkColorComponentFlagBits)input_value) + { + case VK_COLOR_COMPONENT_A_BIT: + return "VK_COLOR_COMPONENT_A_BIT"; + case VK_COLOR_COMPONENT_B_BIT: + return "VK_COLOR_COMPONENT_B_BIT"; + case VK_COLOR_COMPONENT_G_BIT: + return "VK_COLOR_COMPONENT_G_BIT"; + case VK_COLOR_COMPONENT_R_BIT: + return "VK_COLOR_COMPONENT_R_BIT"; + default: + return "Unhandled VkColorComponentFlagBits"; + } +} + +static inline std::string string_VkColorComponentFlags(VkColorComponentFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkColorComponentFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkColorComponentFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCompareOp(VkCompareOp input_value) +{ + switch ((VkCompareOp)input_value) + { + case VK_COMPARE_OP_ALWAYS: + return "VK_COMPARE_OP_ALWAYS"; + case VK_COMPARE_OP_EQUAL: + return "VK_COMPARE_OP_EQUAL"; + case VK_COMPARE_OP_GREATER: + return "VK_COMPARE_OP_GREATER"; + case VK_COMPARE_OP_GREATER_OR_EQUAL: + return "VK_COMPARE_OP_GREATER_OR_EQUAL"; + case VK_COMPARE_OP_LESS: + return "VK_COMPARE_OP_LESS"; + case VK_COMPARE_OP_LESS_OR_EQUAL: + return "VK_COMPARE_OP_LESS_OR_EQUAL"; + case VK_COMPARE_OP_NEVER: + return "VK_COMPARE_OP_NEVER"; + case VK_COMPARE_OP_NOT_EQUAL: + return "VK_COMPARE_OP_NOT_EQUAL"; + default: + return "Unhandled VkCompareOp"; + } +} + +static inline const char* string_VkPipelineCreateFlagBits(VkPipelineCreateFlagBits input_value) +{ + switch ((VkPipelineCreateFlagBits)input_value) + { + case VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT: + return "VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT"; + case VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR: + return "VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR"; + case VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR: + return "VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR"; + case VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV: + return "VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV"; + case VK_PIPELINE_CREATE_DERIVATIVE_BIT: + return "VK_PIPELINE_CREATE_DERIVATIVE_BIT"; + case VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT: + return "VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT"; + case VK_PIPELINE_CREATE_DISPATCH_BASE_BIT: + return "VK_PIPELINE_CREATE_DISPATCH_BASE_BIT"; + case VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT: + return "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT"; + case VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT: + return "VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT"; + case VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV: + return "VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV"; + case VK_PIPELINE_CREATE_LIBRARY_BIT_KHR: + return "VK_PIPELINE_CREATE_LIBRARY_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR"; + case VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR: + return "VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR"; + case VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT: + return "VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT"; + default: + return "Unhandled VkPipelineCreateFlagBits"; + } +} + +static inline std::string string_VkPipelineCreateFlags(VkPipelineCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPipelineCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPipelineCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPipelineShaderStageCreateFlagBits(VkPipelineShaderStageCreateFlagBits input_value) +{ + switch ((VkPipelineShaderStageCreateFlagBits)input_value) + { + case VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT: + return "VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT"; + case VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT: + return "VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT"; + default: + return "Unhandled VkPipelineShaderStageCreateFlagBits"; + } +} + +static inline std::string string_VkPipelineShaderStageCreateFlags(VkPipelineShaderStageCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPipelineShaderStageCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPipelineShaderStageCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkShaderStageFlagBits(VkShaderStageFlagBits input_value) +{ + switch ((VkShaderStageFlagBits)input_value) + { + case VK_SHADER_STAGE_ALL: + return "VK_SHADER_STAGE_ALL"; + case VK_SHADER_STAGE_ALL_GRAPHICS: + return "VK_SHADER_STAGE_ALL_GRAPHICS"; + case VK_SHADER_STAGE_ANY_HIT_BIT_KHR: + return "VK_SHADER_STAGE_ANY_HIT_BIT_KHR"; + case VK_SHADER_STAGE_CALLABLE_BIT_KHR: + return "VK_SHADER_STAGE_CALLABLE_BIT_KHR"; + case VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR: + return "VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR"; + case VK_SHADER_STAGE_COMPUTE_BIT: + return "VK_SHADER_STAGE_COMPUTE_BIT"; + case VK_SHADER_STAGE_FRAGMENT_BIT: + return "VK_SHADER_STAGE_FRAGMENT_BIT"; + case VK_SHADER_STAGE_GEOMETRY_BIT: + return "VK_SHADER_STAGE_GEOMETRY_BIT"; + case VK_SHADER_STAGE_INTERSECTION_BIT_KHR: + return "VK_SHADER_STAGE_INTERSECTION_BIT_KHR"; + case VK_SHADER_STAGE_MESH_BIT_NV: + return "VK_SHADER_STAGE_MESH_BIT_NV"; + case VK_SHADER_STAGE_MISS_BIT_KHR: + return "VK_SHADER_STAGE_MISS_BIT_KHR"; + case VK_SHADER_STAGE_RAYGEN_BIT_KHR: + return "VK_SHADER_STAGE_RAYGEN_BIT_KHR"; + case VK_SHADER_STAGE_TASK_BIT_NV: + return "VK_SHADER_STAGE_TASK_BIT_NV"; + case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: + return "VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT"; + case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: + return "VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT"; + case VK_SHADER_STAGE_VERTEX_BIT: + return "VK_SHADER_STAGE_VERTEX_BIT"; + default: + return "Unhandled VkShaderStageFlagBits"; + } +} + +static inline std::string string_VkShaderStageFlags(VkShaderStageFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkShaderStageFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkShaderStageFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCullModeFlagBits(VkCullModeFlagBits input_value) +{ + switch ((VkCullModeFlagBits)input_value) + { + case VK_CULL_MODE_BACK_BIT: + return "VK_CULL_MODE_BACK_BIT"; + case VK_CULL_MODE_FRONT_AND_BACK: + return "VK_CULL_MODE_FRONT_AND_BACK"; + case VK_CULL_MODE_FRONT_BIT: + return "VK_CULL_MODE_FRONT_BIT"; + case VK_CULL_MODE_NONE: + return "VK_CULL_MODE_NONE"; + default: + return "Unhandled VkCullModeFlagBits"; + } +} + +static inline std::string string_VkCullModeFlags(VkCullModeFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCullModeFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCullModeFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkDynamicState(VkDynamicState input_value) +{ + switch ((VkDynamicState)input_value) + { + case VK_DYNAMIC_STATE_BLEND_CONSTANTS: + return "VK_DYNAMIC_STATE_BLEND_CONSTANTS"; + case VK_DYNAMIC_STATE_CULL_MODE_EXT: + return "VK_DYNAMIC_STATE_CULL_MODE_EXT"; + case VK_DYNAMIC_STATE_DEPTH_BIAS: + return "VK_DYNAMIC_STATE_DEPTH_BIAS"; + case VK_DYNAMIC_STATE_DEPTH_BOUNDS: + return "VK_DYNAMIC_STATE_DEPTH_BOUNDS"; + case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT: + return "VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT"; + case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT: + return "VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT"; + case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT: + return "VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT"; + case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT: + return "VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT"; + case VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT: + return "VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT"; + case VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV: + return "VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV"; + case VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR: + return "VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR"; + case VK_DYNAMIC_STATE_FRONT_FACE_EXT: + return "VK_DYNAMIC_STATE_FRONT_FACE_EXT"; + case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT: + return "VK_DYNAMIC_STATE_LINE_STIPPLE_EXT"; + case VK_DYNAMIC_STATE_LINE_WIDTH: + return "VK_DYNAMIC_STATE_LINE_WIDTH"; + case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT: + return "VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT"; + case VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR: + return "VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR"; + case VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT: + return "VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT"; + case VK_DYNAMIC_STATE_SCISSOR: + return "VK_DYNAMIC_STATE_SCISSOR"; + case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT: + return "VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT"; + case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK: + return "VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK"; + case VK_DYNAMIC_STATE_STENCIL_OP_EXT: + return "VK_DYNAMIC_STATE_STENCIL_OP_EXT"; + case VK_DYNAMIC_STATE_STENCIL_REFERENCE: + return "VK_DYNAMIC_STATE_STENCIL_REFERENCE"; + case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT: + return "VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT"; + case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK: + return "VK_DYNAMIC_STATE_STENCIL_WRITE_MASK"; + case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT: + return "VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT"; + case VK_DYNAMIC_STATE_VIEWPORT: + return "VK_DYNAMIC_STATE_VIEWPORT"; + case VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV: + return "VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV"; + case VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV: + return "VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV"; + case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT: + return "VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT"; + case VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV: + return "VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV"; + default: + return "Unhandled VkDynamicState"; + } +} + +static inline const char* string_VkFrontFace(VkFrontFace input_value) +{ + switch ((VkFrontFace)input_value) + { + case VK_FRONT_FACE_CLOCKWISE: + return "VK_FRONT_FACE_CLOCKWISE"; + case VK_FRONT_FACE_COUNTER_CLOCKWISE: + return "VK_FRONT_FACE_COUNTER_CLOCKWISE"; + default: + return "Unhandled VkFrontFace"; + } +} + +static inline const char* string_VkVertexInputRate(VkVertexInputRate input_value) +{ + switch ((VkVertexInputRate)input_value) + { + case VK_VERTEX_INPUT_RATE_INSTANCE: + return "VK_VERTEX_INPUT_RATE_INSTANCE"; + case VK_VERTEX_INPUT_RATE_VERTEX: + return "VK_VERTEX_INPUT_RATE_VERTEX"; + default: + return "Unhandled VkVertexInputRate"; + } +} + +static inline const char* string_VkPrimitiveTopology(VkPrimitiveTopology input_value) +{ + switch ((VkPrimitiveTopology)input_value) + { + case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: + return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST"; + case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: + return "VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY"; + case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: + return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP"; + case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: + return "VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY"; + case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: + return "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST"; + case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: + return "VK_PRIMITIVE_TOPOLOGY_POINT_LIST"; + case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: + return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN"; + case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: + return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST"; + case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: + return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY"; + case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: + return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP"; + case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: + return "VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY"; + default: + return "Unhandled VkPrimitiveTopology"; + } +} + +static inline const char* string_VkPolygonMode(VkPolygonMode input_value) +{ + switch ((VkPolygonMode)input_value) + { + case VK_POLYGON_MODE_FILL: + return "VK_POLYGON_MODE_FILL"; + case VK_POLYGON_MODE_FILL_RECTANGLE_NV: + return "VK_POLYGON_MODE_FILL_RECTANGLE_NV"; + case VK_POLYGON_MODE_LINE: + return "VK_POLYGON_MODE_LINE"; + case VK_POLYGON_MODE_POINT: + return "VK_POLYGON_MODE_POINT"; + default: + return "Unhandled VkPolygonMode"; + } +} + +static inline const char* string_VkStencilOp(VkStencilOp input_value) +{ + switch ((VkStencilOp)input_value) + { + case VK_STENCIL_OP_DECREMENT_AND_CLAMP: + return "VK_STENCIL_OP_DECREMENT_AND_CLAMP"; + case VK_STENCIL_OP_DECREMENT_AND_WRAP: + return "VK_STENCIL_OP_DECREMENT_AND_WRAP"; + case VK_STENCIL_OP_INCREMENT_AND_CLAMP: + return "VK_STENCIL_OP_INCREMENT_AND_CLAMP"; + case VK_STENCIL_OP_INCREMENT_AND_WRAP: + return "VK_STENCIL_OP_INCREMENT_AND_WRAP"; + case VK_STENCIL_OP_INVERT: + return "VK_STENCIL_OP_INVERT"; + case VK_STENCIL_OP_KEEP: + return "VK_STENCIL_OP_KEEP"; + case VK_STENCIL_OP_REPLACE: + return "VK_STENCIL_OP_REPLACE"; + case VK_STENCIL_OP_ZERO: + return "VK_STENCIL_OP_ZERO"; + default: + return "Unhandled VkStencilOp"; + } +} + +static inline const char* string_VkLogicOp(VkLogicOp input_value) +{ + switch ((VkLogicOp)input_value) + { + case VK_LOGIC_OP_AND: + return "VK_LOGIC_OP_AND"; + case VK_LOGIC_OP_AND_INVERTED: + return "VK_LOGIC_OP_AND_INVERTED"; + case VK_LOGIC_OP_AND_REVERSE: + return "VK_LOGIC_OP_AND_REVERSE"; + case VK_LOGIC_OP_CLEAR: + return "VK_LOGIC_OP_CLEAR"; + case VK_LOGIC_OP_COPY: + return "VK_LOGIC_OP_COPY"; + case VK_LOGIC_OP_COPY_INVERTED: + return "VK_LOGIC_OP_COPY_INVERTED"; + case VK_LOGIC_OP_EQUIVALENT: + return "VK_LOGIC_OP_EQUIVALENT"; + case VK_LOGIC_OP_INVERT: + return "VK_LOGIC_OP_INVERT"; + case VK_LOGIC_OP_NAND: + return "VK_LOGIC_OP_NAND"; + case VK_LOGIC_OP_NOR: + return "VK_LOGIC_OP_NOR"; + case VK_LOGIC_OP_NO_OP: + return "VK_LOGIC_OP_NO_OP"; + case VK_LOGIC_OP_OR: + return "VK_LOGIC_OP_OR"; + case VK_LOGIC_OP_OR_INVERTED: + return "VK_LOGIC_OP_OR_INVERTED"; + case VK_LOGIC_OP_OR_REVERSE: + return "VK_LOGIC_OP_OR_REVERSE"; + case VK_LOGIC_OP_SET: + return "VK_LOGIC_OP_SET"; + case VK_LOGIC_OP_XOR: + return "VK_LOGIC_OP_XOR"; + default: + return "Unhandled VkLogicOp"; + } +} + +static inline const char* string_VkBorderColor(VkBorderColor input_value) +{ + switch ((VkBorderColor)input_value) + { + case VK_BORDER_COLOR_FLOAT_CUSTOM_EXT: + return "VK_BORDER_COLOR_FLOAT_CUSTOM_EXT"; + case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK: + return "VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK"; + case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE: + return "VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE"; + case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK: + return "VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK"; + case VK_BORDER_COLOR_INT_CUSTOM_EXT: + return "VK_BORDER_COLOR_INT_CUSTOM_EXT"; + case VK_BORDER_COLOR_INT_OPAQUE_BLACK: + return "VK_BORDER_COLOR_INT_OPAQUE_BLACK"; + case VK_BORDER_COLOR_INT_OPAQUE_WHITE: + return "VK_BORDER_COLOR_INT_OPAQUE_WHITE"; + case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK: + return "VK_BORDER_COLOR_INT_TRANSPARENT_BLACK"; + default: + return "Unhandled VkBorderColor"; + } +} + +static inline const char* string_VkFilter(VkFilter input_value) +{ + switch ((VkFilter)input_value) + { + case VK_FILTER_CUBIC_IMG: + return "VK_FILTER_CUBIC_IMG"; + case VK_FILTER_LINEAR: + return "VK_FILTER_LINEAR"; + case VK_FILTER_NEAREST: + return "VK_FILTER_NEAREST"; + default: + return "Unhandled VkFilter"; + } +} + +static inline const char* string_VkSamplerAddressMode(VkSamplerAddressMode input_value) +{ + switch ((VkSamplerAddressMode)input_value) + { + case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER: + return "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER"; + case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE: + return "VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE"; + case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT: + return "VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT"; + case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE: + return "VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE"; + case VK_SAMPLER_ADDRESS_MODE_REPEAT: + return "VK_SAMPLER_ADDRESS_MODE_REPEAT"; + default: + return "Unhandled VkSamplerAddressMode"; + } +} + +static inline const char* string_VkSamplerCreateFlagBits(VkSamplerCreateFlagBits input_value) +{ + switch ((VkSamplerCreateFlagBits)input_value) + { + case VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT: + return "VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT"; + case VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT: + return "VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT"; + default: + return "Unhandled VkSamplerCreateFlagBits"; + } +} + +static inline std::string string_VkSamplerCreateFlags(VkSamplerCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSamplerCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSamplerCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSamplerMipmapMode(VkSamplerMipmapMode input_value) +{ + switch ((VkSamplerMipmapMode)input_value) + { + case VK_SAMPLER_MIPMAP_MODE_LINEAR: + return "VK_SAMPLER_MIPMAP_MODE_LINEAR"; + case VK_SAMPLER_MIPMAP_MODE_NEAREST: + return "VK_SAMPLER_MIPMAP_MODE_NEAREST"; + default: + return "Unhandled VkSamplerMipmapMode"; + } +} + +static inline const char* string_VkDescriptorPoolCreateFlagBits(VkDescriptorPoolCreateFlagBits input_value) +{ + switch ((VkDescriptorPoolCreateFlagBits)input_value) + { + case VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT: + return "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT"; + case VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT: + return "VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT"; + default: + return "Unhandled VkDescriptorPoolCreateFlagBits"; + } +} + +static inline std::string string_VkDescriptorPoolCreateFlags(VkDescriptorPoolCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDescriptorPoolCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDescriptorPoolCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkDescriptorType(VkDescriptorType input_value) +{ + switch ((VkDescriptorType)input_value) + { + case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: + return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR"; + case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: + return "VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV"; + case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: + return "VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"; + case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: + return "VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT"; + case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: + return "VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT"; + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + return "VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE"; + case VK_DESCRIPTOR_TYPE_SAMPLER: + return "VK_DESCRIPTOR_TYPE_SAMPLER"; + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: + return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER"; + case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: + return "VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC"; + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: + return "VK_DESCRIPTOR_TYPE_STORAGE_IMAGE"; + case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: + return "VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER"; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER"; + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: + return "VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC"; + case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + return "VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER"; + default: + return "Unhandled VkDescriptorType"; + } +} + +static inline const char* string_VkDescriptorSetLayoutCreateFlagBits(VkDescriptorSetLayoutCreateFlagBits input_value) +{ + switch ((VkDescriptorSetLayoutCreateFlagBits)input_value) + { + case VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR: + return "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR"; + case VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT: + return "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT"; + default: + return "Unhandled VkDescriptorSetLayoutCreateFlagBits"; + } +} + +static inline std::string string_VkDescriptorSetLayoutCreateFlags(VkDescriptorSetLayoutCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDescriptorSetLayoutCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDescriptorSetLayoutCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkAttachmentDescriptionFlagBits(VkAttachmentDescriptionFlagBits input_value) +{ + switch ((VkAttachmentDescriptionFlagBits)input_value) + { + case VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT: + return "VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT"; + default: + return "Unhandled VkAttachmentDescriptionFlagBits"; + } +} + +static inline std::string string_VkAttachmentDescriptionFlags(VkAttachmentDescriptionFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkAttachmentDescriptionFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkAttachmentDescriptionFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkAttachmentLoadOp(VkAttachmentLoadOp input_value) +{ + switch ((VkAttachmentLoadOp)input_value) + { + case VK_ATTACHMENT_LOAD_OP_CLEAR: + return "VK_ATTACHMENT_LOAD_OP_CLEAR"; + case VK_ATTACHMENT_LOAD_OP_DONT_CARE: + return "VK_ATTACHMENT_LOAD_OP_DONT_CARE"; + case VK_ATTACHMENT_LOAD_OP_LOAD: + return "VK_ATTACHMENT_LOAD_OP_LOAD"; + default: + return "Unhandled VkAttachmentLoadOp"; + } +} + +static inline const char* string_VkAttachmentStoreOp(VkAttachmentStoreOp input_value) +{ + switch ((VkAttachmentStoreOp)input_value) + { + case VK_ATTACHMENT_STORE_OP_DONT_CARE: + return "VK_ATTACHMENT_STORE_OP_DONT_CARE"; + case VK_ATTACHMENT_STORE_OP_NONE_QCOM: + return "VK_ATTACHMENT_STORE_OP_NONE_QCOM"; + case VK_ATTACHMENT_STORE_OP_STORE: + return "VK_ATTACHMENT_STORE_OP_STORE"; + default: + return "Unhandled VkAttachmentStoreOp"; + } +} + +static inline const char* string_VkDependencyFlagBits(VkDependencyFlagBits input_value) +{ + switch ((VkDependencyFlagBits)input_value) + { + case VK_DEPENDENCY_BY_REGION_BIT: + return "VK_DEPENDENCY_BY_REGION_BIT"; + case VK_DEPENDENCY_DEVICE_GROUP_BIT: + return "VK_DEPENDENCY_DEVICE_GROUP_BIT"; + case VK_DEPENDENCY_VIEW_LOCAL_BIT: + return "VK_DEPENDENCY_VIEW_LOCAL_BIT"; + default: + return "Unhandled VkDependencyFlagBits"; + } +} + +static inline std::string string_VkDependencyFlags(VkDependencyFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDependencyFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDependencyFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkFramebufferCreateFlagBits(VkFramebufferCreateFlagBits input_value) +{ + switch ((VkFramebufferCreateFlagBits)input_value) + { + case VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT: + return "VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT"; + default: + return "Unhandled VkFramebufferCreateFlagBits"; + } +} + +static inline std::string string_VkFramebufferCreateFlags(VkFramebufferCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkFramebufferCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkFramebufferCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPipelineBindPoint(VkPipelineBindPoint input_value) +{ + switch ((VkPipelineBindPoint)input_value) + { + case VK_PIPELINE_BIND_POINT_COMPUTE: + return "VK_PIPELINE_BIND_POINT_COMPUTE"; + case VK_PIPELINE_BIND_POINT_GRAPHICS: + return "VK_PIPELINE_BIND_POINT_GRAPHICS"; + case VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR: + return "VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR"; + default: + return "Unhandled VkPipelineBindPoint"; + } +} + +static inline const char* string_VkRenderPassCreateFlagBits(VkRenderPassCreateFlagBits input_value) +{ + switch ((VkRenderPassCreateFlagBits)input_value) + { + case VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM: + return "VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM"; + default: + return "Unhandled VkRenderPassCreateFlagBits"; + } +} + +static inline std::string string_VkRenderPassCreateFlags(VkRenderPassCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkRenderPassCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkRenderPassCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSubpassDescriptionFlagBits(VkSubpassDescriptionFlagBits input_value) +{ + switch ((VkSubpassDescriptionFlagBits)input_value) + { + case VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM: + return "VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM"; + case VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX: + return "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX"; + case VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX: + return "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX"; + case VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM: + return "VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM"; + default: + return "Unhandled VkSubpassDescriptionFlagBits"; + } +} + +static inline std::string string_VkSubpassDescriptionFlags(VkSubpassDescriptionFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSubpassDescriptionFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSubpassDescriptionFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCommandPoolCreateFlagBits(VkCommandPoolCreateFlagBits input_value) +{ + switch ((VkCommandPoolCreateFlagBits)input_value) + { + case VK_COMMAND_POOL_CREATE_PROTECTED_BIT: + return "VK_COMMAND_POOL_CREATE_PROTECTED_BIT"; + case VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT: + return "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT"; + case VK_COMMAND_POOL_CREATE_TRANSIENT_BIT: + return "VK_COMMAND_POOL_CREATE_TRANSIENT_BIT"; + default: + return "Unhandled VkCommandPoolCreateFlagBits"; + } +} + +static inline std::string string_VkCommandPoolCreateFlags(VkCommandPoolCreateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCommandPoolCreateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCommandPoolCreateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCommandPoolResetFlagBits(VkCommandPoolResetFlagBits input_value) +{ + switch ((VkCommandPoolResetFlagBits)input_value) + { + case VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT: + return "VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT"; + default: + return "Unhandled VkCommandPoolResetFlagBits"; + } +} + +static inline std::string string_VkCommandPoolResetFlags(VkCommandPoolResetFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCommandPoolResetFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCommandPoolResetFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCommandBufferLevel(VkCommandBufferLevel input_value) +{ + switch ((VkCommandBufferLevel)input_value) + { + case VK_COMMAND_BUFFER_LEVEL_PRIMARY: + return "VK_COMMAND_BUFFER_LEVEL_PRIMARY"; + case VK_COMMAND_BUFFER_LEVEL_SECONDARY: + return "VK_COMMAND_BUFFER_LEVEL_SECONDARY"; + default: + return "Unhandled VkCommandBufferLevel"; + } +} + +static inline const char* string_VkCommandBufferUsageFlagBits(VkCommandBufferUsageFlagBits input_value) +{ + switch ((VkCommandBufferUsageFlagBits)input_value) + { + case VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT: + return "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT"; + case VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT: + return "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT"; + case VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT: + return "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT"; + default: + return "Unhandled VkCommandBufferUsageFlagBits"; + } +} + +static inline std::string string_VkCommandBufferUsageFlags(VkCommandBufferUsageFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCommandBufferUsageFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCommandBufferUsageFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkQueryControlFlagBits(VkQueryControlFlagBits input_value) +{ + switch ((VkQueryControlFlagBits)input_value) + { + case VK_QUERY_CONTROL_PRECISE_BIT: + return "VK_QUERY_CONTROL_PRECISE_BIT"; + default: + return "Unhandled VkQueryControlFlagBits"; + } +} + +static inline std::string string_VkQueryControlFlags(VkQueryControlFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkQueryControlFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkQueryControlFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkCommandBufferResetFlagBits(VkCommandBufferResetFlagBits input_value) +{ + switch ((VkCommandBufferResetFlagBits)input_value) + { + case VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT: + return "VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT"; + default: + return "Unhandled VkCommandBufferResetFlagBits"; + } +} + +static inline std::string string_VkCommandBufferResetFlags(VkCommandBufferResetFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCommandBufferResetFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCommandBufferResetFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkIndexType(VkIndexType input_value) +{ + switch ((VkIndexType)input_value) + { + case VK_INDEX_TYPE_NONE_KHR: + return "VK_INDEX_TYPE_NONE_KHR"; + case VK_INDEX_TYPE_UINT16: + return "VK_INDEX_TYPE_UINT16"; + case VK_INDEX_TYPE_UINT32: + return "VK_INDEX_TYPE_UINT32"; + case VK_INDEX_TYPE_UINT8_EXT: + return "VK_INDEX_TYPE_UINT8_EXT"; + default: + return "Unhandled VkIndexType"; + } +} + +static inline const char* string_VkStencilFaceFlagBits(VkStencilFaceFlagBits input_value) +{ + switch ((VkStencilFaceFlagBits)input_value) + { + case VK_STENCIL_FACE_BACK_BIT: + return "VK_STENCIL_FACE_BACK_BIT"; + case VK_STENCIL_FACE_FRONT_AND_BACK: + return "VK_STENCIL_FACE_FRONT_AND_BACK"; + case VK_STENCIL_FACE_FRONT_BIT: + return "VK_STENCIL_FACE_FRONT_BIT"; + default: + return "Unhandled VkStencilFaceFlagBits"; + } +} + +static inline std::string string_VkStencilFaceFlags(VkStencilFaceFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkStencilFaceFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkStencilFaceFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSubpassContents(VkSubpassContents input_value) +{ + switch ((VkSubpassContents)input_value) + { + case VK_SUBPASS_CONTENTS_INLINE: + return "VK_SUBPASS_CONTENTS_INLINE"; + case VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS: + return "VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS"; + default: + return "Unhandled VkSubpassContents"; + } +} + +static inline const char* string_VkSubgroupFeatureFlagBits(VkSubgroupFeatureFlagBits input_value) +{ + switch ((VkSubgroupFeatureFlagBits)input_value) + { + case VK_SUBGROUP_FEATURE_ARITHMETIC_BIT: + return "VK_SUBGROUP_FEATURE_ARITHMETIC_BIT"; + case VK_SUBGROUP_FEATURE_BALLOT_BIT: + return "VK_SUBGROUP_FEATURE_BALLOT_BIT"; + case VK_SUBGROUP_FEATURE_BASIC_BIT: + return "VK_SUBGROUP_FEATURE_BASIC_BIT"; + case VK_SUBGROUP_FEATURE_CLUSTERED_BIT: + return "VK_SUBGROUP_FEATURE_CLUSTERED_BIT"; + case VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV: + return "VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV"; + case VK_SUBGROUP_FEATURE_QUAD_BIT: + return "VK_SUBGROUP_FEATURE_QUAD_BIT"; + case VK_SUBGROUP_FEATURE_SHUFFLE_BIT: + return "VK_SUBGROUP_FEATURE_SHUFFLE_BIT"; + case VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT: + return "VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT"; + case VK_SUBGROUP_FEATURE_VOTE_BIT: + return "VK_SUBGROUP_FEATURE_VOTE_BIT"; + default: + return "Unhandled VkSubgroupFeatureFlagBits"; + } +} + +static inline std::string string_VkSubgroupFeatureFlags(VkSubgroupFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSubgroupFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSubgroupFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPeerMemoryFeatureFlagBits(VkPeerMemoryFeatureFlagBits input_value) +{ + switch ((VkPeerMemoryFeatureFlagBits)input_value) + { + case VK_PEER_MEMORY_FEATURE_COPY_DST_BIT: + return "VK_PEER_MEMORY_FEATURE_COPY_DST_BIT"; + case VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT: + return "VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT"; + case VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT: + return "VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT"; + case VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT: + return "VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT"; + default: + return "Unhandled VkPeerMemoryFeatureFlagBits"; + } +} + +static inline std::string string_VkPeerMemoryFeatureFlags(VkPeerMemoryFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPeerMemoryFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPeerMemoryFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkMemoryAllocateFlagBits(VkMemoryAllocateFlagBits input_value) +{ + switch ((VkMemoryAllocateFlagBits)input_value) + { + case VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT"; + case VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT"; + case VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT"; + default: + return "Unhandled VkMemoryAllocateFlagBits"; + } +} + +static inline std::string string_VkMemoryAllocateFlags(VkMemoryAllocateFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkMemoryAllocateFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkMemoryAllocateFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkPointClippingBehavior(VkPointClippingBehavior input_value) +{ + switch ((VkPointClippingBehavior)input_value) + { + case VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES: + return "VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES"; + case VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY: + return "VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY"; + default: + return "Unhandled VkPointClippingBehavior"; + } +} + +static inline const char* string_VkTessellationDomainOrigin(VkTessellationDomainOrigin input_value) +{ + switch ((VkTessellationDomainOrigin)input_value) + { + case VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT: + return "VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT"; + case VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT: + return "VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT"; + default: + return "Unhandled VkTessellationDomainOrigin"; + } +} + +static inline const char* string_VkSamplerYcbcrModelConversion(VkSamplerYcbcrModelConversion input_value) +{ + switch ((VkSamplerYcbcrModelConversion)input_value) + { + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY"; + default: + return "Unhandled VkSamplerYcbcrModelConversion"; + } +} + +static inline const char* string_VkSamplerYcbcrRange(VkSamplerYcbcrRange input_value) +{ + switch ((VkSamplerYcbcrRange)input_value) + { + case VK_SAMPLER_YCBCR_RANGE_ITU_FULL: + return "VK_SAMPLER_YCBCR_RANGE_ITU_FULL"; + case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW: + return "VK_SAMPLER_YCBCR_RANGE_ITU_NARROW"; + default: + return "Unhandled VkSamplerYcbcrRange"; + } +} + +static inline const char* string_VkChromaLocation(VkChromaLocation input_value) +{ + switch ((VkChromaLocation)input_value) + { + case VK_CHROMA_LOCATION_COSITED_EVEN: + return "VK_CHROMA_LOCATION_COSITED_EVEN"; + case VK_CHROMA_LOCATION_MIDPOINT: + return "VK_CHROMA_LOCATION_MIDPOINT"; + default: + return "Unhandled VkChromaLocation"; + } +} + +static inline const char* string_VkDescriptorUpdateTemplateType(VkDescriptorUpdateTemplateType input_value) +{ + switch ((VkDescriptorUpdateTemplateType)input_value) + { + case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET: + return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET"; + case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR: + return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR"; + default: + return "Unhandled VkDescriptorUpdateTemplateType"; + } +} + +static inline const char* string_VkExternalMemoryHandleTypeFlagBits(VkExternalMemoryHandleTypeFlagBits input_value) +{ + switch ((VkExternalMemoryHandleTypeFlagBits)input_value) + { + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + default: + return "Unhandled VkExternalMemoryHandleTypeFlagBits"; + } +} + +static inline std::string string_VkExternalMemoryHandleTypeFlags(VkExternalMemoryHandleTypeFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryHandleTypeFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryHandleTypeFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalMemoryFeatureFlagBits(VkExternalMemoryFeatureFlagBits input_value) +{ + switch ((VkExternalMemoryFeatureFlagBits)input_value) + { + case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT"; + case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalMemoryFeatureFlagBits"; + } +} + +static inline std::string string_VkExternalMemoryFeatureFlags(VkExternalMemoryFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalFenceHandleTypeFlagBits(VkExternalFenceHandleTypeFlagBits input_value) +{ + switch ((VkExternalFenceHandleTypeFlagBits)input_value) + { + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT"; + default: + return "Unhandled VkExternalFenceHandleTypeFlagBits"; + } +} + +static inline std::string string_VkExternalFenceHandleTypeFlags(VkExternalFenceHandleTypeFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalFenceHandleTypeFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalFenceHandleTypeFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalFenceFeatureFlagBits(VkExternalFenceFeatureFlagBits input_value) +{ + switch ((VkExternalFenceFeatureFlagBits)input_value) + { + case VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalFenceFeatureFlagBits"; + } +} + +static inline std::string string_VkExternalFenceFeatureFlags(VkExternalFenceFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalFenceFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalFenceFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkFenceImportFlagBits(VkFenceImportFlagBits input_value) +{ + switch ((VkFenceImportFlagBits)input_value) + { + case VK_FENCE_IMPORT_TEMPORARY_BIT: + return "VK_FENCE_IMPORT_TEMPORARY_BIT"; + default: + return "Unhandled VkFenceImportFlagBits"; + } +} + +static inline std::string string_VkFenceImportFlags(VkFenceImportFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkFenceImportFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkFenceImportFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSemaphoreImportFlagBits(VkSemaphoreImportFlagBits input_value) +{ + switch ((VkSemaphoreImportFlagBits)input_value) + { + case VK_SEMAPHORE_IMPORT_TEMPORARY_BIT: + return "VK_SEMAPHORE_IMPORT_TEMPORARY_BIT"; + default: + return "Unhandled VkSemaphoreImportFlagBits"; + } +} + +static inline std::string string_VkSemaphoreImportFlags(VkSemaphoreImportFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSemaphoreImportFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSemaphoreImportFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalSemaphoreHandleTypeFlagBits(VkExternalSemaphoreHandleTypeFlagBits input_value) +{ + switch ((VkExternalSemaphoreHandleTypeFlagBits)input_value) + { + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT"; + default: + return "Unhandled VkExternalSemaphoreHandleTypeFlagBits"; + } +} + +static inline std::string string_VkExternalSemaphoreHandleTypeFlags(VkExternalSemaphoreHandleTypeFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalSemaphoreHandleTypeFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalSemaphoreHandleTypeFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalSemaphoreFeatureFlagBits(VkExternalSemaphoreFeatureFlagBits input_value) +{ + switch ((VkExternalSemaphoreFeatureFlagBits)input_value) + { + case VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalSemaphoreFeatureFlagBits"; + } +} + +static inline std::string string_VkExternalSemaphoreFeatureFlags(VkExternalSemaphoreFeatureFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalSemaphoreFeatureFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalSemaphoreFeatureFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkDriverId(VkDriverId input_value) +{ + switch ((VkDriverId)input_value) + { + case VK_DRIVER_ID_AMD_OPEN_SOURCE: + return "VK_DRIVER_ID_AMD_OPEN_SOURCE"; + case VK_DRIVER_ID_AMD_PROPRIETARY: + return "VK_DRIVER_ID_AMD_PROPRIETARY"; + case VK_DRIVER_ID_ARM_PROPRIETARY: + return "VK_DRIVER_ID_ARM_PROPRIETARY"; + case VK_DRIVER_ID_BROADCOM_PROPRIETARY: + return "VK_DRIVER_ID_BROADCOM_PROPRIETARY"; + case VK_DRIVER_ID_GGP_PROPRIETARY: + return "VK_DRIVER_ID_GGP_PROPRIETARY"; + case VK_DRIVER_ID_GOOGLE_SWIFTSHADER: + return "VK_DRIVER_ID_GOOGLE_SWIFTSHADER"; + case VK_DRIVER_ID_IMAGINATION_PROPRIETARY: + return "VK_DRIVER_ID_IMAGINATION_PROPRIETARY"; + case VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA: + return "VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA"; + case VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS: + return "VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS"; + case VK_DRIVER_ID_MESA_LLVMPIPE: + return "VK_DRIVER_ID_MESA_LLVMPIPE"; + case VK_DRIVER_ID_MESA_RADV: + return "VK_DRIVER_ID_MESA_RADV"; + case VK_DRIVER_ID_MOLTENVK: + return "VK_DRIVER_ID_MOLTENVK"; + case VK_DRIVER_ID_NVIDIA_PROPRIETARY: + return "VK_DRIVER_ID_NVIDIA_PROPRIETARY"; + case VK_DRIVER_ID_QUALCOMM_PROPRIETARY: + return "VK_DRIVER_ID_QUALCOMM_PROPRIETARY"; + default: + return "Unhandled VkDriverId"; + } +} + +static inline const char* string_VkShaderFloatControlsIndependence(VkShaderFloatControlsIndependence input_value) +{ + switch ((VkShaderFloatControlsIndependence)input_value) + { + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY"; + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL"; + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE"; + default: + return "Unhandled VkShaderFloatControlsIndependence"; + } +} + +static inline const char* string_VkResolveModeFlagBits(VkResolveModeFlagBits input_value) +{ + switch ((VkResolveModeFlagBits)input_value) + { + case VK_RESOLVE_MODE_AVERAGE_BIT: + return "VK_RESOLVE_MODE_AVERAGE_BIT"; + case VK_RESOLVE_MODE_MAX_BIT: + return "VK_RESOLVE_MODE_MAX_BIT"; + case VK_RESOLVE_MODE_MIN_BIT: + return "VK_RESOLVE_MODE_MIN_BIT"; + case VK_RESOLVE_MODE_NONE: + return "VK_RESOLVE_MODE_NONE"; + case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT: + return "VK_RESOLVE_MODE_SAMPLE_ZERO_BIT"; + default: + return "Unhandled VkResolveModeFlagBits"; + } +} + +static inline std::string string_VkResolveModeFlags(VkResolveModeFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkResolveModeFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkResolveModeFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkDescriptorBindingFlagBits(VkDescriptorBindingFlagBits input_value) +{ + switch ((VkDescriptorBindingFlagBits)input_value) + { + case VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT: + return "VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT"; + case VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT: + return "VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT"; + case VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT: + return "VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT"; + case VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT: + return "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT"; + default: + return "Unhandled VkDescriptorBindingFlagBits"; + } +} + +static inline std::string string_VkDescriptorBindingFlags(VkDescriptorBindingFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDescriptorBindingFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDescriptorBindingFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSamplerReductionMode(VkSamplerReductionMode input_value) +{ + switch ((VkSamplerReductionMode)input_value) + { + case VK_SAMPLER_REDUCTION_MODE_MAX: + return "VK_SAMPLER_REDUCTION_MODE_MAX"; + case VK_SAMPLER_REDUCTION_MODE_MIN: + return "VK_SAMPLER_REDUCTION_MODE_MIN"; + case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE: + return "VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE"; + default: + return "Unhandled VkSamplerReductionMode"; + } +} + +static inline const char* string_VkSemaphoreType(VkSemaphoreType input_value) +{ + switch ((VkSemaphoreType)input_value) + { + case VK_SEMAPHORE_TYPE_BINARY: + return "VK_SEMAPHORE_TYPE_BINARY"; + case VK_SEMAPHORE_TYPE_TIMELINE: + return "VK_SEMAPHORE_TYPE_TIMELINE"; + default: + return "Unhandled VkSemaphoreType"; + } +} + +static inline const char* string_VkSemaphoreWaitFlagBits(VkSemaphoreWaitFlagBits input_value) +{ + switch ((VkSemaphoreWaitFlagBits)input_value) + { + case VK_SEMAPHORE_WAIT_ANY_BIT: + return "VK_SEMAPHORE_WAIT_ANY_BIT"; + default: + return "Unhandled VkSemaphoreWaitFlagBits"; + } +} + +static inline std::string string_VkSemaphoreWaitFlags(VkSemaphoreWaitFlags input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSemaphoreWaitFlagBits(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSemaphoreWaitFlagBits(static_cast(0))); + return ret; +} + +static inline const char* string_VkSurfaceTransformFlagBitsKHR(VkSurfaceTransformFlagBitsKHR input_value) +{ + switch ((VkSurfaceTransformFlagBitsKHR)input_value) + { + case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR: + return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR"; + case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR: + return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR"; + case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR: + return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR"; + case VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR: + return "VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR"; + case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR: + return "VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR"; + case VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR: + return "VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR"; + case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR: + return "VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR"; + case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR: + return "VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR"; + case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR: + return "VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR"; + default: + return "Unhandled VkSurfaceTransformFlagBitsKHR"; + } +} + +static inline std::string string_VkSurfaceTransformFlagsKHR(VkSurfaceTransformFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSurfaceTransformFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSurfaceTransformFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkPresentModeKHR(VkPresentModeKHR input_value) +{ + switch ((VkPresentModeKHR)input_value) + { + case VK_PRESENT_MODE_FIFO_KHR: + return "VK_PRESENT_MODE_FIFO_KHR"; + case VK_PRESENT_MODE_FIFO_RELAXED_KHR: + return "VK_PRESENT_MODE_FIFO_RELAXED_KHR"; + case VK_PRESENT_MODE_IMMEDIATE_KHR: + return "VK_PRESENT_MODE_IMMEDIATE_KHR"; + case VK_PRESENT_MODE_MAILBOX_KHR: + return "VK_PRESENT_MODE_MAILBOX_KHR"; + case VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR: + return "VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR"; + case VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR: + return "VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR"; + default: + return "Unhandled VkPresentModeKHR"; + } +} + +static inline const char* string_VkColorSpaceKHR(VkColorSpaceKHR input_value) +{ + switch ((VkColorSpaceKHR)input_value) + { + case VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT: + return "VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT"; + case VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT: + return "VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT"; + case VK_COLOR_SPACE_BT2020_LINEAR_EXT: + return "VK_COLOR_SPACE_BT2020_LINEAR_EXT"; + case VK_COLOR_SPACE_BT709_LINEAR_EXT: + return "VK_COLOR_SPACE_BT709_LINEAR_EXT"; + case VK_COLOR_SPACE_BT709_NONLINEAR_EXT: + return "VK_COLOR_SPACE_BT709_NONLINEAR_EXT"; + case VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT: + return "VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT"; + case VK_COLOR_SPACE_DISPLAY_NATIVE_AMD: + return "VK_COLOR_SPACE_DISPLAY_NATIVE_AMD"; + case VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT: + return "VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT"; + case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT: + return "VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT"; + case VK_COLOR_SPACE_DOLBYVISION_EXT: + return "VK_COLOR_SPACE_DOLBYVISION_EXT"; + case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT: + return "VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT"; + case VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT: + return "VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT"; + case VK_COLOR_SPACE_HDR10_HLG_EXT: + return "VK_COLOR_SPACE_HDR10_HLG_EXT"; + case VK_COLOR_SPACE_HDR10_ST2084_EXT: + return "VK_COLOR_SPACE_HDR10_ST2084_EXT"; + case VK_COLOR_SPACE_PASS_THROUGH_EXT: + return "VK_COLOR_SPACE_PASS_THROUGH_EXT"; + case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR: + return "VK_COLOR_SPACE_SRGB_NONLINEAR_KHR"; + default: + return "Unhandled VkColorSpaceKHR"; + } +} + +static inline const char* string_VkCompositeAlphaFlagBitsKHR(VkCompositeAlphaFlagBitsKHR input_value) +{ + switch ((VkCompositeAlphaFlagBitsKHR)input_value) + { + case VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR: + return "VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR"; + case VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR: + return "VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR"; + case VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR: + return "VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR"; + case VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR: + return "VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR"; + default: + return "Unhandled VkCompositeAlphaFlagBitsKHR"; + } +} + +static inline std::string string_VkCompositeAlphaFlagsKHR(VkCompositeAlphaFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkCompositeAlphaFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkCompositeAlphaFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkSwapchainCreateFlagBitsKHR(VkSwapchainCreateFlagBitsKHR input_value) +{ + switch ((VkSwapchainCreateFlagBitsKHR)input_value) + { + case VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR: + return "VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR"; + case VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR: + return "VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR"; + case VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR: + return "VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR"; + default: + return "Unhandled VkSwapchainCreateFlagBitsKHR"; + } +} + +static inline std::string string_VkSwapchainCreateFlagsKHR(VkSwapchainCreateFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSwapchainCreateFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSwapchainCreateFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkDeviceGroupPresentModeFlagBitsKHR(VkDeviceGroupPresentModeFlagBitsKHR input_value) +{ + switch ((VkDeviceGroupPresentModeFlagBitsKHR)input_value) + { + case VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR: + return "VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR"; + case VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR: + return "VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR"; + case VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR: + return "VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR"; + case VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR: + return "VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR"; + default: + return "Unhandled VkDeviceGroupPresentModeFlagBitsKHR"; + } +} + +static inline std::string string_VkDeviceGroupPresentModeFlagsKHR(VkDeviceGroupPresentModeFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDeviceGroupPresentModeFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDeviceGroupPresentModeFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkDisplayPlaneAlphaFlagBitsKHR(VkDisplayPlaneAlphaFlagBitsKHR input_value) +{ + switch ((VkDisplayPlaneAlphaFlagBitsKHR)input_value) + { + case VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR: + return "VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR"; + case VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR: + return "VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR"; + case VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR: + return "VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR"; + case VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR: + return "VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR"; + default: + return "Unhandled VkDisplayPlaneAlphaFlagBitsKHR"; + } +} + +static inline std::string string_VkDisplayPlaneAlphaFlagsKHR(VkDisplayPlaneAlphaFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDisplayPlaneAlphaFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDisplayPlaneAlphaFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkPeerMemoryFeatureFlagBitsKHR(VkPeerMemoryFeatureFlagBitsKHR input_value) +{ + switch ((VkPeerMemoryFeatureFlagBitsKHR)input_value) + { + case VK_PEER_MEMORY_FEATURE_COPY_DST_BIT: + return "VK_PEER_MEMORY_FEATURE_COPY_DST_BIT"; + case VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT: + return "VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT"; + case VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT: + return "VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT"; + case VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT: + return "VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT"; + default: + return "Unhandled VkPeerMemoryFeatureFlagBitsKHR"; + } +} + +static inline std::string string_VkPeerMemoryFeatureFlagsKHR(VkPeerMemoryFeatureFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPeerMemoryFeatureFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPeerMemoryFeatureFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkMemoryAllocateFlagBitsKHR(VkMemoryAllocateFlagBitsKHR input_value) +{ + switch ((VkMemoryAllocateFlagBitsKHR)input_value) + { + case VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT"; + case VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT"; + case VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT: + return "VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT"; + default: + return "Unhandled VkMemoryAllocateFlagBitsKHR"; + } +} + +static inline std::string string_VkMemoryAllocateFlagsKHR(VkMemoryAllocateFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkMemoryAllocateFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkMemoryAllocateFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalMemoryHandleTypeFlagBitsKHR(VkExternalMemoryHandleTypeFlagBitsKHR input_value) +{ + switch ((VkExternalMemoryHandleTypeFlagBitsKHR)input_value) + { + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + default: + return "Unhandled VkExternalMemoryHandleTypeFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalMemoryHandleTypeFlagsKHR(VkExternalMemoryHandleTypeFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryHandleTypeFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryHandleTypeFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalMemoryFeatureFlagBitsKHR(VkExternalMemoryFeatureFlagBitsKHR input_value) +{ + switch ((VkExternalMemoryFeatureFlagBitsKHR)input_value) + { + case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT"; + case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalMemoryFeatureFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalMemoryFeatureFlagsKHR(VkExternalMemoryFeatureFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryFeatureFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryFeatureFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalSemaphoreHandleTypeFlagBitsKHR(VkExternalSemaphoreHandleTypeFlagBitsKHR input_value) +{ + switch ((VkExternalSemaphoreHandleTypeFlagBitsKHR)input_value) + { + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: + return "VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT"; + default: + return "Unhandled VkExternalSemaphoreHandleTypeFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalSemaphoreHandleTypeFlagsKHR(VkExternalSemaphoreHandleTypeFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalSemaphoreHandleTypeFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalSemaphoreHandleTypeFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalSemaphoreFeatureFlagBitsKHR(VkExternalSemaphoreFeatureFlagBitsKHR input_value) +{ + switch ((VkExternalSemaphoreFeatureFlagBitsKHR)input_value) + { + case VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalSemaphoreFeatureFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalSemaphoreFeatureFlagsKHR(VkExternalSemaphoreFeatureFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalSemaphoreFeatureFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalSemaphoreFeatureFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkSemaphoreImportFlagBitsKHR(VkSemaphoreImportFlagBitsKHR input_value) +{ + switch ((VkSemaphoreImportFlagBitsKHR)input_value) + { + case VK_SEMAPHORE_IMPORT_TEMPORARY_BIT: + return "VK_SEMAPHORE_IMPORT_TEMPORARY_BIT"; + default: + return "Unhandled VkSemaphoreImportFlagBitsKHR"; + } +} + +static inline std::string string_VkSemaphoreImportFlagsKHR(VkSemaphoreImportFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSemaphoreImportFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSemaphoreImportFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkDescriptorUpdateTemplateTypeKHR(VkDescriptorUpdateTemplateTypeKHR input_value) +{ + switch ((VkDescriptorUpdateTemplateTypeKHR)input_value) + { + case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET: + return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET"; + case VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR: + return "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR"; + default: + return "Unhandled VkDescriptorUpdateTemplateTypeKHR"; + } +} + +static inline const char* string_VkExternalFenceHandleTypeFlagBitsKHR(VkExternalFenceHandleTypeFlagBitsKHR input_value) +{ + switch ((VkExternalFenceHandleTypeFlagBitsKHR)input_value) + { + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT"; + case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT: + return "VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT"; + default: + return "Unhandled VkExternalFenceHandleTypeFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalFenceHandleTypeFlagsKHR(VkExternalFenceHandleTypeFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalFenceHandleTypeFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalFenceHandleTypeFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalFenceFeatureFlagBitsKHR(VkExternalFenceFeatureFlagBitsKHR input_value) +{ + switch ((VkExternalFenceFeatureFlagBitsKHR)input_value) + { + case VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT: + return "VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT"; + case VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT: + return "VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT"; + default: + return "Unhandled VkExternalFenceFeatureFlagBitsKHR"; + } +} + +static inline std::string string_VkExternalFenceFeatureFlagsKHR(VkExternalFenceFeatureFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalFenceFeatureFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalFenceFeatureFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkFenceImportFlagBitsKHR(VkFenceImportFlagBitsKHR input_value) +{ + switch ((VkFenceImportFlagBitsKHR)input_value) + { + case VK_FENCE_IMPORT_TEMPORARY_BIT: + return "VK_FENCE_IMPORT_TEMPORARY_BIT"; + default: + return "Unhandled VkFenceImportFlagBitsKHR"; + } +} + +static inline std::string string_VkFenceImportFlagsKHR(VkFenceImportFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkFenceImportFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkFenceImportFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkPerformanceCounterUnitKHR(VkPerformanceCounterUnitKHR input_value) +{ + switch ((VkPerformanceCounterUnitKHR)input_value) + { + case VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR"; + case VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR: + return "VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR"; + default: + return "Unhandled VkPerformanceCounterUnitKHR"; + } +} + +static inline const char* string_VkPerformanceCounterScopeKHR(VkPerformanceCounterScopeKHR input_value) +{ + switch ((VkPerformanceCounterScopeKHR)input_value) + { + case VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR: + return "VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR"; + case VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR: + return "VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR"; + case VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR: + return "VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR"; + default: + return "Unhandled VkPerformanceCounterScopeKHR"; + } +} + +static inline const char* string_VkPerformanceCounterStorageKHR(VkPerformanceCounterStorageKHR input_value) +{ + switch ((VkPerformanceCounterStorageKHR)input_value) + { + case VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR"; + case VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR"; + case VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR"; + case VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR"; + case VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR"; + case VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR: + return "VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR"; + default: + return "Unhandled VkPerformanceCounterStorageKHR"; + } +} + +static inline const char* string_VkPerformanceCounterDescriptionFlagBitsKHR(VkPerformanceCounterDescriptionFlagBitsKHR input_value) +{ + switch ((VkPerformanceCounterDescriptionFlagBitsKHR)input_value) + { + case VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR: + return "VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR"; + case VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR: + return "VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR"; + default: + return "Unhandled VkPerformanceCounterDescriptionFlagBitsKHR"; + } +} + +static inline std::string string_VkPerformanceCounterDescriptionFlagsKHR(VkPerformanceCounterDescriptionFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPerformanceCounterDescriptionFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPerformanceCounterDescriptionFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkPointClippingBehaviorKHR(VkPointClippingBehaviorKHR input_value) +{ + switch ((VkPointClippingBehaviorKHR)input_value) + { + case VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES: + return "VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES"; + case VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY: + return "VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY"; + default: + return "Unhandled VkPointClippingBehaviorKHR"; + } +} + +static inline const char* string_VkTessellationDomainOriginKHR(VkTessellationDomainOriginKHR input_value) +{ + switch ((VkTessellationDomainOriginKHR)input_value) + { + case VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT: + return "VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT"; + case VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT: + return "VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT"; + default: + return "Unhandled VkTessellationDomainOriginKHR"; + } +} + +static inline const char* string_VkSamplerYcbcrModelConversionKHR(VkSamplerYcbcrModelConversionKHR input_value) +{ + switch ((VkSamplerYcbcrModelConversionKHR)input_value) + { + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709"; + case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY: + return "VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY"; + default: + return "Unhandled VkSamplerYcbcrModelConversionKHR"; + } +} + +static inline const char* string_VkSamplerYcbcrRangeKHR(VkSamplerYcbcrRangeKHR input_value) +{ + switch ((VkSamplerYcbcrRangeKHR)input_value) + { + case VK_SAMPLER_YCBCR_RANGE_ITU_FULL: + return "VK_SAMPLER_YCBCR_RANGE_ITU_FULL"; + case VK_SAMPLER_YCBCR_RANGE_ITU_NARROW: + return "VK_SAMPLER_YCBCR_RANGE_ITU_NARROW"; + default: + return "Unhandled VkSamplerYcbcrRangeKHR"; + } +} + +static inline const char* string_VkChromaLocationKHR(VkChromaLocationKHR input_value) +{ + switch ((VkChromaLocationKHR)input_value) + { + case VK_CHROMA_LOCATION_COSITED_EVEN: + return "VK_CHROMA_LOCATION_COSITED_EVEN"; + case VK_CHROMA_LOCATION_MIDPOINT: + return "VK_CHROMA_LOCATION_MIDPOINT"; + default: + return "Unhandled VkChromaLocationKHR"; + } +} + +static inline const char* string_VkDriverIdKHR(VkDriverIdKHR input_value) +{ + switch ((VkDriverIdKHR)input_value) + { + case VK_DRIVER_ID_AMD_OPEN_SOURCE: + return "VK_DRIVER_ID_AMD_OPEN_SOURCE"; + case VK_DRIVER_ID_AMD_PROPRIETARY: + return "VK_DRIVER_ID_AMD_PROPRIETARY"; + case VK_DRIVER_ID_ARM_PROPRIETARY: + return "VK_DRIVER_ID_ARM_PROPRIETARY"; + case VK_DRIVER_ID_BROADCOM_PROPRIETARY: + return "VK_DRIVER_ID_BROADCOM_PROPRIETARY"; + case VK_DRIVER_ID_GGP_PROPRIETARY: + return "VK_DRIVER_ID_GGP_PROPRIETARY"; + case VK_DRIVER_ID_GOOGLE_SWIFTSHADER: + return "VK_DRIVER_ID_GOOGLE_SWIFTSHADER"; + case VK_DRIVER_ID_IMAGINATION_PROPRIETARY: + return "VK_DRIVER_ID_IMAGINATION_PROPRIETARY"; + case VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA: + return "VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA"; + case VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS: + return "VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS"; + case VK_DRIVER_ID_MESA_LLVMPIPE: + return "VK_DRIVER_ID_MESA_LLVMPIPE"; + case VK_DRIVER_ID_MESA_RADV: + return "VK_DRIVER_ID_MESA_RADV"; + case VK_DRIVER_ID_MOLTENVK: + return "VK_DRIVER_ID_MOLTENVK"; + case VK_DRIVER_ID_NVIDIA_PROPRIETARY: + return "VK_DRIVER_ID_NVIDIA_PROPRIETARY"; + case VK_DRIVER_ID_QUALCOMM_PROPRIETARY: + return "VK_DRIVER_ID_QUALCOMM_PROPRIETARY"; + default: + return "Unhandled VkDriverIdKHR"; + } +} + +static inline const char* string_VkShaderFloatControlsIndependenceKHR(VkShaderFloatControlsIndependenceKHR input_value) +{ + switch ((VkShaderFloatControlsIndependenceKHR)input_value) + { + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY"; + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL"; + case VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE: + return "VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE"; + default: + return "Unhandled VkShaderFloatControlsIndependenceKHR"; + } +} + +static inline const char* string_VkResolveModeFlagBitsKHR(VkResolveModeFlagBitsKHR input_value) +{ + switch ((VkResolveModeFlagBitsKHR)input_value) + { + case VK_RESOLVE_MODE_AVERAGE_BIT: + return "VK_RESOLVE_MODE_AVERAGE_BIT"; + case VK_RESOLVE_MODE_MAX_BIT: + return "VK_RESOLVE_MODE_MAX_BIT"; + case VK_RESOLVE_MODE_MIN_BIT: + return "VK_RESOLVE_MODE_MIN_BIT"; + case VK_RESOLVE_MODE_NONE: + return "VK_RESOLVE_MODE_NONE"; + case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT: + return "VK_RESOLVE_MODE_SAMPLE_ZERO_BIT"; + default: + return "Unhandled VkResolveModeFlagBitsKHR"; + } +} + +static inline std::string string_VkResolveModeFlagsKHR(VkResolveModeFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkResolveModeFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkResolveModeFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkSemaphoreTypeKHR(VkSemaphoreTypeKHR input_value) +{ + switch ((VkSemaphoreTypeKHR)input_value) + { + case VK_SEMAPHORE_TYPE_BINARY: + return "VK_SEMAPHORE_TYPE_BINARY"; + case VK_SEMAPHORE_TYPE_TIMELINE: + return "VK_SEMAPHORE_TYPE_TIMELINE"; + default: + return "Unhandled VkSemaphoreTypeKHR"; + } +} + +static inline const char* string_VkSemaphoreWaitFlagBitsKHR(VkSemaphoreWaitFlagBitsKHR input_value) +{ + switch ((VkSemaphoreWaitFlagBitsKHR)input_value) + { + case VK_SEMAPHORE_WAIT_ANY_BIT: + return "VK_SEMAPHORE_WAIT_ANY_BIT"; + default: + return "Unhandled VkSemaphoreWaitFlagBitsKHR"; + } +} + +static inline std::string string_VkSemaphoreWaitFlagsKHR(VkSemaphoreWaitFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSemaphoreWaitFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSemaphoreWaitFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkFragmentShadingRateCombinerOpKHR(VkFragmentShadingRateCombinerOpKHR input_value) +{ + switch ((VkFragmentShadingRateCombinerOpKHR)input_value) + { + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR"; + case VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR: + return "VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR"; + default: + return "Unhandled VkFragmentShadingRateCombinerOpKHR"; + } +} + +static inline const char* string_VkPipelineExecutableStatisticFormatKHR(VkPipelineExecutableStatisticFormatKHR input_value) +{ + switch ((VkPipelineExecutableStatisticFormatKHR)input_value) + { + case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR: + return "VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR"; + case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR: + return "VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR"; + case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR: + return "VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR"; + case VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR: + return "VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR"; + default: + return "Unhandled VkPipelineExecutableStatisticFormatKHR"; + } +} + +static inline const char* string_VkDebugReportFlagBitsEXT(VkDebugReportFlagBitsEXT input_value) +{ + switch ((VkDebugReportFlagBitsEXT)input_value) + { + case VK_DEBUG_REPORT_DEBUG_BIT_EXT: + return "VK_DEBUG_REPORT_DEBUG_BIT_EXT"; + case VK_DEBUG_REPORT_ERROR_BIT_EXT: + return "VK_DEBUG_REPORT_ERROR_BIT_EXT"; + case VK_DEBUG_REPORT_INFORMATION_BIT_EXT: + return "VK_DEBUG_REPORT_INFORMATION_BIT_EXT"; + case VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT: + return "VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT"; + case VK_DEBUG_REPORT_WARNING_BIT_EXT: + return "VK_DEBUG_REPORT_WARNING_BIT_EXT"; + default: + return "Unhandled VkDebugReportFlagBitsEXT"; + } +} + +static inline std::string string_VkDebugReportFlagsEXT(VkDebugReportFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDebugReportFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDebugReportFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkDebugReportObjectTypeEXT(VkDebugReportObjectTypeEXT input_value) +{ + switch ((VkDebugReportObjectTypeEXT)input_value) + { + case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT"; + case VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT: + return "VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT"; + default: + return "Unhandled VkDebugReportObjectTypeEXT"; + } +} + +static inline const char* string_VkRasterizationOrderAMD(VkRasterizationOrderAMD input_value) +{ + switch ((VkRasterizationOrderAMD)input_value) + { + case VK_RASTERIZATION_ORDER_RELAXED_AMD: + return "VK_RASTERIZATION_ORDER_RELAXED_AMD"; + case VK_RASTERIZATION_ORDER_STRICT_AMD: + return "VK_RASTERIZATION_ORDER_STRICT_AMD"; + default: + return "Unhandled VkRasterizationOrderAMD"; + } +} + +static inline const char* string_VkShaderInfoTypeAMD(VkShaderInfoTypeAMD input_value) +{ + switch ((VkShaderInfoTypeAMD)input_value) + { + case VK_SHADER_INFO_TYPE_BINARY_AMD: + return "VK_SHADER_INFO_TYPE_BINARY_AMD"; + case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD: + return "VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD"; + case VK_SHADER_INFO_TYPE_STATISTICS_AMD: + return "VK_SHADER_INFO_TYPE_STATISTICS_AMD"; + default: + return "Unhandled VkShaderInfoTypeAMD"; + } +} + +static inline const char* string_VkExternalMemoryHandleTypeFlagBitsNV(VkExternalMemoryHandleTypeFlagBitsNV input_value) +{ + switch ((VkExternalMemoryHandleTypeFlagBitsNV)input_value) + { + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV"; + case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV: + return "VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV"; + default: + return "Unhandled VkExternalMemoryHandleTypeFlagBitsNV"; + } +} + +static inline std::string string_VkExternalMemoryHandleTypeFlagsNV(VkExternalMemoryHandleTypeFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryHandleTypeFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryHandleTypeFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkExternalMemoryFeatureFlagBitsNV(VkExternalMemoryFeatureFlagBitsNV input_value) +{ + switch ((VkExternalMemoryFeatureFlagBitsNV)input_value) + { + case VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV: + return "VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV"; + case VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV: + return "VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV"; + case VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV: + return "VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV"; + default: + return "Unhandled VkExternalMemoryFeatureFlagBitsNV"; + } +} + +static inline std::string string_VkExternalMemoryFeatureFlagsNV(VkExternalMemoryFeatureFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkExternalMemoryFeatureFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkExternalMemoryFeatureFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkValidationCheckEXT(VkValidationCheckEXT input_value) +{ + switch ((VkValidationCheckEXT)input_value) + { + case VK_VALIDATION_CHECK_ALL_EXT: + return "VK_VALIDATION_CHECK_ALL_EXT"; + case VK_VALIDATION_CHECK_SHADERS_EXT: + return "VK_VALIDATION_CHECK_SHADERS_EXT"; + default: + return "Unhandled VkValidationCheckEXT"; + } +} + +static inline const char* string_VkConditionalRenderingFlagBitsEXT(VkConditionalRenderingFlagBitsEXT input_value) +{ + switch ((VkConditionalRenderingFlagBitsEXT)input_value) + { + case VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT: + return "VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT"; + default: + return "Unhandled VkConditionalRenderingFlagBitsEXT"; + } +} + +static inline std::string string_VkConditionalRenderingFlagsEXT(VkConditionalRenderingFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkConditionalRenderingFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkConditionalRenderingFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkSurfaceCounterFlagBitsEXT(VkSurfaceCounterFlagBitsEXT input_value) +{ + switch ((VkSurfaceCounterFlagBitsEXT)input_value) + { + case VK_SURFACE_COUNTER_VBLANK_BIT_EXT: + return "VK_SURFACE_COUNTER_VBLANK_BIT_EXT"; + default: + return "Unhandled VkSurfaceCounterFlagBitsEXT"; + } +} + +static inline std::string string_VkSurfaceCounterFlagsEXT(VkSurfaceCounterFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkSurfaceCounterFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkSurfaceCounterFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkDisplayPowerStateEXT(VkDisplayPowerStateEXT input_value) +{ + switch ((VkDisplayPowerStateEXT)input_value) + { + case VK_DISPLAY_POWER_STATE_OFF_EXT: + return "VK_DISPLAY_POWER_STATE_OFF_EXT"; + case VK_DISPLAY_POWER_STATE_ON_EXT: + return "VK_DISPLAY_POWER_STATE_ON_EXT"; + case VK_DISPLAY_POWER_STATE_SUSPEND_EXT: + return "VK_DISPLAY_POWER_STATE_SUSPEND_EXT"; + default: + return "Unhandled VkDisplayPowerStateEXT"; + } +} + +static inline const char* string_VkDeviceEventTypeEXT(VkDeviceEventTypeEXT input_value) +{ + switch ((VkDeviceEventTypeEXT)input_value) + { + case VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT: + return "VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT"; + default: + return "Unhandled VkDeviceEventTypeEXT"; + } +} + +static inline const char* string_VkDisplayEventTypeEXT(VkDisplayEventTypeEXT input_value) +{ + switch ((VkDisplayEventTypeEXT)input_value) + { + case VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT: + return "VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT"; + default: + return "Unhandled VkDisplayEventTypeEXT"; + } +} + +static inline const char* string_VkViewportCoordinateSwizzleNV(VkViewportCoordinateSwizzleNV input_value) +{ + switch ((VkViewportCoordinateSwizzleNV)input_value) + { + case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV"; + case VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV: + return "VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV"; + default: + return "Unhandled VkViewportCoordinateSwizzleNV"; + } +} + +static inline const char* string_VkDiscardRectangleModeEXT(VkDiscardRectangleModeEXT input_value) +{ + switch ((VkDiscardRectangleModeEXT)input_value) + { + case VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT: + return "VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT"; + case VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT: + return "VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT"; + default: + return "Unhandled VkDiscardRectangleModeEXT"; + } +} + +static inline const char* string_VkConservativeRasterizationModeEXT(VkConservativeRasterizationModeEXT input_value) +{ + switch ((VkConservativeRasterizationModeEXT)input_value) + { + case VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT: + return "VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT"; + case VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT: + return "VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT"; + case VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT: + return "VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT"; + default: + return "Unhandled VkConservativeRasterizationModeEXT"; + } +} + +static inline const char* string_VkDebugUtilsMessageSeverityFlagBitsEXT(VkDebugUtilsMessageSeverityFlagBitsEXT input_value) +{ + switch ((VkDebugUtilsMessageSeverityFlagBitsEXT)input_value) + { + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT"; + case VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT"; + default: + return "Unhandled VkDebugUtilsMessageSeverityFlagBitsEXT"; + } +} + +static inline std::string string_VkDebugUtilsMessageSeverityFlagsEXT(VkDebugUtilsMessageSeverityFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDebugUtilsMessageSeverityFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDebugUtilsMessageSeverityFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkDebugUtilsMessageTypeFlagBitsEXT(VkDebugUtilsMessageTypeFlagBitsEXT input_value) +{ + switch ((VkDebugUtilsMessageTypeFlagBitsEXT)input_value) + { + case VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT"; + case VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT"; + case VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT: + return "VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT"; + default: + return "Unhandled VkDebugUtilsMessageTypeFlagBitsEXT"; + } +} + +static inline std::string string_VkDebugUtilsMessageTypeFlagsEXT(VkDebugUtilsMessageTypeFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDebugUtilsMessageTypeFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDebugUtilsMessageTypeFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkSamplerReductionModeEXT(VkSamplerReductionModeEXT input_value) +{ + switch ((VkSamplerReductionModeEXT)input_value) + { + case VK_SAMPLER_REDUCTION_MODE_MAX: + return "VK_SAMPLER_REDUCTION_MODE_MAX"; + case VK_SAMPLER_REDUCTION_MODE_MIN: + return "VK_SAMPLER_REDUCTION_MODE_MIN"; + case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE: + return "VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE"; + default: + return "Unhandled VkSamplerReductionModeEXT"; + } +} + +static inline const char* string_VkBlendOverlapEXT(VkBlendOverlapEXT input_value) +{ + switch ((VkBlendOverlapEXT)input_value) + { + case VK_BLEND_OVERLAP_CONJOINT_EXT: + return "VK_BLEND_OVERLAP_CONJOINT_EXT"; + case VK_BLEND_OVERLAP_DISJOINT_EXT: + return "VK_BLEND_OVERLAP_DISJOINT_EXT"; + case VK_BLEND_OVERLAP_UNCORRELATED_EXT: + return "VK_BLEND_OVERLAP_UNCORRELATED_EXT"; + default: + return "Unhandled VkBlendOverlapEXT"; + } +} + +static inline const char* string_VkCoverageModulationModeNV(VkCoverageModulationModeNV input_value) +{ + switch ((VkCoverageModulationModeNV)input_value) + { + case VK_COVERAGE_MODULATION_MODE_ALPHA_NV: + return "VK_COVERAGE_MODULATION_MODE_ALPHA_NV"; + case VK_COVERAGE_MODULATION_MODE_NONE_NV: + return "VK_COVERAGE_MODULATION_MODE_NONE_NV"; + case VK_COVERAGE_MODULATION_MODE_RGBA_NV: + return "VK_COVERAGE_MODULATION_MODE_RGBA_NV"; + case VK_COVERAGE_MODULATION_MODE_RGB_NV: + return "VK_COVERAGE_MODULATION_MODE_RGB_NV"; + default: + return "Unhandled VkCoverageModulationModeNV"; + } +} + +static inline const char* string_VkValidationCacheHeaderVersionEXT(VkValidationCacheHeaderVersionEXT input_value) +{ + switch ((VkValidationCacheHeaderVersionEXT)input_value) + { + case VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT: + return "VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT"; + default: + return "Unhandled VkValidationCacheHeaderVersionEXT"; + } +} + +static inline const char* string_VkDescriptorBindingFlagBitsEXT(VkDescriptorBindingFlagBitsEXT input_value) +{ + switch ((VkDescriptorBindingFlagBitsEXT)input_value) + { + case VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT: + return "VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT"; + case VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT: + return "VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT"; + case VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT: + return "VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT"; + case VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT: + return "VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT"; + default: + return "Unhandled VkDescriptorBindingFlagBitsEXT"; + } +} + +static inline std::string string_VkDescriptorBindingFlagsEXT(VkDescriptorBindingFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDescriptorBindingFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDescriptorBindingFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkShadingRatePaletteEntryNV(VkShadingRatePaletteEntryNV input_value) +{ + switch ((VkShadingRatePaletteEntryNV)input_value) + { + case VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV"; + case VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV: + return "VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV"; + default: + return "Unhandled VkShadingRatePaletteEntryNV"; + } +} + +static inline const char* string_VkCoarseSampleOrderTypeNV(VkCoarseSampleOrderTypeNV input_value) +{ + switch ((VkCoarseSampleOrderTypeNV)input_value) + { + case VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV: + return "VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV"; + case VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV: + return "VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV"; + case VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV: + return "VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV"; + case VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV: + return "VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV"; + default: + return "Unhandled VkCoarseSampleOrderTypeNV"; + } +} + +static inline const char* string_VkRayTracingShaderGroupTypeKHR(VkRayTracingShaderGroupTypeKHR input_value) +{ + switch ((VkRayTracingShaderGroupTypeKHR)input_value) + { + case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR"; + case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR"; + case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR"; + default: + return "Unhandled VkRayTracingShaderGroupTypeKHR"; + } +} + +static inline const char* string_VkRayTracingShaderGroupTypeNV(VkRayTracingShaderGroupTypeNV input_value) +{ + switch ((VkRayTracingShaderGroupTypeNV)input_value) + { + case VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR"; + case VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR"; + case VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR: + return "VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR"; + default: + return "Unhandled VkRayTracingShaderGroupTypeNV"; + } +} + +static inline const char* string_VkGeometryTypeKHR(VkGeometryTypeKHR input_value) +{ + switch ((VkGeometryTypeKHR)input_value) + { + case VK_GEOMETRY_TYPE_AABBS_KHR: + return "VK_GEOMETRY_TYPE_AABBS_KHR"; + case VK_GEOMETRY_TYPE_INSTANCES_KHR: + return "VK_GEOMETRY_TYPE_INSTANCES_KHR"; + case VK_GEOMETRY_TYPE_TRIANGLES_KHR: + return "VK_GEOMETRY_TYPE_TRIANGLES_KHR"; + default: + return "Unhandled VkGeometryTypeKHR"; + } +} + +static inline const char* string_VkGeometryTypeNV(VkGeometryTypeNV input_value) +{ + switch ((VkGeometryTypeNV)input_value) + { + case VK_GEOMETRY_TYPE_AABBS_KHR: + return "VK_GEOMETRY_TYPE_AABBS_KHR"; + case VK_GEOMETRY_TYPE_INSTANCES_KHR: + return "VK_GEOMETRY_TYPE_INSTANCES_KHR"; + case VK_GEOMETRY_TYPE_TRIANGLES_KHR: + return "VK_GEOMETRY_TYPE_TRIANGLES_KHR"; + default: + return "Unhandled VkGeometryTypeNV"; + } +} + +static inline const char* string_VkAccelerationStructureTypeKHR(VkAccelerationStructureTypeKHR input_value) +{ + switch ((VkAccelerationStructureTypeKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR"; + default: + return "Unhandled VkAccelerationStructureTypeKHR"; + } +} + +static inline const char* string_VkAccelerationStructureTypeNV(VkAccelerationStructureTypeNV input_value) +{ + switch ((VkAccelerationStructureTypeNV)input_value) + { + case VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR"; + case VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR: + return "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR"; + default: + return "Unhandled VkAccelerationStructureTypeNV"; + } +} + +static inline const char* string_VkGeometryFlagBitsKHR(VkGeometryFlagBitsKHR input_value) +{ + switch ((VkGeometryFlagBitsKHR)input_value) + { + case VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR: + return "VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR"; + case VK_GEOMETRY_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_OPAQUE_BIT_KHR"; + default: + return "Unhandled VkGeometryFlagBitsKHR"; + } +} + +static inline std::string string_VkGeometryFlagsKHR(VkGeometryFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkGeometryFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkGeometryFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkGeometryFlagBitsNV(VkGeometryFlagBitsNV input_value) +{ + switch ((VkGeometryFlagBitsNV)input_value) + { + case VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR: + return "VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR"; + case VK_GEOMETRY_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_OPAQUE_BIT_KHR"; + default: + return "Unhandled VkGeometryFlagBitsNV"; + } +} + +static inline std::string string_VkGeometryFlagsNV(VkGeometryFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkGeometryFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkGeometryFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkGeometryInstanceFlagBitsKHR(VkGeometryInstanceFlagBitsKHR input_value) +{ + switch ((VkGeometryInstanceFlagBitsKHR)input_value) + { + case VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR"; + default: + return "Unhandled VkGeometryInstanceFlagBitsKHR"; + } +} + +static inline std::string string_VkGeometryInstanceFlagsKHR(VkGeometryInstanceFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkGeometryInstanceFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkGeometryInstanceFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkGeometryInstanceFlagBitsNV(VkGeometryInstanceFlagBitsNV input_value) +{ + switch ((VkGeometryInstanceFlagBitsNV)input_value) + { + case VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR"; + case VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR: + return "VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR"; + default: + return "Unhandled VkGeometryInstanceFlagBitsNV"; + } +} + +static inline std::string string_VkGeometryInstanceFlagsNV(VkGeometryInstanceFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkGeometryInstanceFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkGeometryInstanceFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkBuildAccelerationStructureFlagBitsKHR(VkBuildAccelerationStructureFlagBitsKHR input_value) +{ + switch ((VkBuildAccelerationStructureFlagBitsKHR)input_value) + { + case VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR"; + default: + return "Unhandled VkBuildAccelerationStructureFlagBitsKHR"; + } +} + +static inline std::string string_VkBuildAccelerationStructureFlagsKHR(VkBuildAccelerationStructureFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkBuildAccelerationStructureFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkBuildAccelerationStructureFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkBuildAccelerationStructureFlagBitsNV(VkBuildAccelerationStructureFlagBitsNV input_value) +{ + switch ((VkBuildAccelerationStructureFlagBitsNV)input_value) + { + case VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR"; + default: + return "Unhandled VkBuildAccelerationStructureFlagBitsNV"; + } +} + +static inline std::string string_VkBuildAccelerationStructureFlagsNV(VkBuildAccelerationStructureFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkBuildAccelerationStructureFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkBuildAccelerationStructureFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkCopyAccelerationStructureModeKHR(VkCopyAccelerationStructureModeKHR input_value) +{ + switch ((VkCopyAccelerationStructureModeKHR)input_value) + { + case VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR"; + default: + return "Unhandled VkCopyAccelerationStructureModeKHR"; + } +} + +static inline const char* string_VkCopyAccelerationStructureModeNV(VkCopyAccelerationStructureModeNV input_value) +{ + switch ((VkCopyAccelerationStructureModeNV)input_value) + { + case VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR"; + case VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR: + return "VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR"; + default: + return "Unhandled VkCopyAccelerationStructureModeNV"; + } +} + +static inline const char* string_VkAccelerationStructureMemoryRequirementsTypeNV(VkAccelerationStructureMemoryRequirementsTypeNV input_value) +{ + switch ((VkAccelerationStructureMemoryRequirementsTypeNV)input_value) + { + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV"; + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV"; + case VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV: + return "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV"; + default: + return "Unhandled VkAccelerationStructureMemoryRequirementsTypeNV"; + } +} + +static inline const char* string_VkQueueGlobalPriorityEXT(VkQueueGlobalPriorityEXT input_value) +{ + switch ((VkQueueGlobalPriorityEXT)input_value) + { + case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT: + return "VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT"; + case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT: + return "VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT"; + case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT: + return "VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT"; + case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT: + return "VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT"; + default: + return "Unhandled VkQueueGlobalPriorityEXT"; + } +} + +static inline const char* string_VkTimeDomainEXT(VkTimeDomainEXT input_value) +{ + switch ((VkTimeDomainEXT)input_value) + { + case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT: + return "VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT"; + case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT: + return "VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT"; + case VK_TIME_DOMAIN_DEVICE_EXT: + return "VK_TIME_DOMAIN_DEVICE_EXT"; + case VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT: + return "VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT"; + default: + return "Unhandled VkTimeDomainEXT"; + } +} + +static inline const char* string_VkMemoryOverallocationBehaviorAMD(VkMemoryOverallocationBehaviorAMD input_value) +{ + switch ((VkMemoryOverallocationBehaviorAMD)input_value) + { + case VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD: + return "VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD"; + case VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD: + return "VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD"; + case VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD: + return "VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD"; + default: + return "Unhandled VkMemoryOverallocationBehaviorAMD"; + } +} + +static inline const char* string_VkPipelineCreationFeedbackFlagBitsEXT(VkPipelineCreationFeedbackFlagBitsEXT input_value) +{ + switch ((VkPipelineCreationFeedbackFlagBitsEXT)input_value) + { + case VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT: + return "VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT"; + case VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT: + return "VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT"; + case VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT: + return "VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT"; + default: + return "Unhandled VkPipelineCreationFeedbackFlagBitsEXT"; + } +} + +static inline std::string string_VkPipelineCreationFeedbackFlagsEXT(VkPipelineCreationFeedbackFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkPipelineCreationFeedbackFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkPipelineCreationFeedbackFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkPerformanceConfigurationTypeINTEL(VkPerformanceConfigurationTypeINTEL input_value) +{ + switch ((VkPerformanceConfigurationTypeINTEL)input_value) + { + case VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL: + return "VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL"; + default: + return "Unhandled VkPerformanceConfigurationTypeINTEL"; + } +} + +static inline const char* string_VkQueryPoolSamplingModeINTEL(VkQueryPoolSamplingModeINTEL input_value) +{ + switch ((VkQueryPoolSamplingModeINTEL)input_value) + { + case VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL: + return "VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL"; + default: + return "Unhandled VkQueryPoolSamplingModeINTEL"; + } +} + +static inline const char* string_VkPerformanceOverrideTypeINTEL(VkPerformanceOverrideTypeINTEL input_value) +{ + switch ((VkPerformanceOverrideTypeINTEL)input_value) + { + case VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL: + return "VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL"; + case VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL: + return "VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL"; + default: + return "Unhandled VkPerformanceOverrideTypeINTEL"; + } +} + +static inline const char* string_VkPerformanceParameterTypeINTEL(VkPerformanceParameterTypeINTEL input_value) +{ + switch ((VkPerformanceParameterTypeINTEL)input_value) + { + case VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL: + return "VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL"; + case VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL: + return "VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL"; + default: + return "Unhandled VkPerformanceParameterTypeINTEL"; + } +} + +static inline const char* string_VkPerformanceValueTypeINTEL(VkPerformanceValueTypeINTEL input_value) +{ + switch ((VkPerformanceValueTypeINTEL)input_value) + { + case VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL: + return "VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL"; + case VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL: + return "VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL"; + case VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL: + return "VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL"; + case VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL: + return "VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL"; + case VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL: + return "VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL"; + default: + return "Unhandled VkPerformanceValueTypeINTEL"; + } +} + +static inline const char* string_VkToolPurposeFlagBitsEXT(VkToolPurposeFlagBitsEXT input_value) +{ + switch ((VkToolPurposeFlagBitsEXT)input_value) + { + case VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT: + return "VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT"; + case VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT: + return "VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT"; + case VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT: + return "VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT"; + case VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT: + return "VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT"; + case VK_TOOL_PURPOSE_PROFILING_BIT_EXT: + return "VK_TOOL_PURPOSE_PROFILING_BIT_EXT"; + case VK_TOOL_PURPOSE_TRACING_BIT_EXT: + return "VK_TOOL_PURPOSE_TRACING_BIT_EXT"; + case VK_TOOL_PURPOSE_VALIDATION_BIT_EXT: + return "VK_TOOL_PURPOSE_VALIDATION_BIT_EXT"; + default: + return "Unhandled VkToolPurposeFlagBitsEXT"; + } +} + +static inline std::string string_VkToolPurposeFlagsEXT(VkToolPurposeFlagsEXT input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkToolPurposeFlagBitsEXT(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkToolPurposeFlagBitsEXT(static_cast(0))); + return ret; +} + +static inline const char* string_VkValidationFeatureEnableEXT(VkValidationFeatureEnableEXT input_value) +{ + switch ((VkValidationFeatureEnableEXT)input_value) + { + case VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT: + return "VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT"; + case VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT: + return "VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT"; + case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT: + return "VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT"; + case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT: + return "VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT"; + case VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT: + return "VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT"; + default: + return "Unhandled VkValidationFeatureEnableEXT"; + } +} + +static inline const char* string_VkValidationFeatureDisableEXT(VkValidationFeatureDisableEXT input_value) +{ + switch ((VkValidationFeatureDisableEXT)input_value) + { + case VK_VALIDATION_FEATURE_DISABLE_ALL_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_ALL_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT"; + case VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT: + return "VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT"; + default: + return "Unhandled VkValidationFeatureDisableEXT"; + } +} + +static inline const char* string_VkComponentTypeNV(VkComponentTypeNV input_value) +{ + switch ((VkComponentTypeNV)input_value) + { + case VK_COMPONENT_TYPE_FLOAT16_NV: + return "VK_COMPONENT_TYPE_FLOAT16_NV"; + case VK_COMPONENT_TYPE_FLOAT32_NV: + return "VK_COMPONENT_TYPE_FLOAT32_NV"; + case VK_COMPONENT_TYPE_FLOAT64_NV: + return "VK_COMPONENT_TYPE_FLOAT64_NV"; + case VK_COMPONENT_TYPE_SINT16_NV: + return "VK_COMPONENT_TYPE_SINT16_NV"; + case VK_COMPONENT_TYPE_SINT32_NV: + return "VK_COMPONENT_TYPE_SINT32_NV"; + case VK_COMPONENT_TYPE_SINT64_NV: + return "VK_COMPONENT_TYPE_SINT64_NV"; + case VK_COMPONENT_TYPE_SINT8_NV: + return "VK_COMPONENT_TYPE_SINT8_NV"; + case VK_COMPONENT_TYPE_UINT16_NV: + return "VK_COMPONENT_TYPE_UINT16_NV"; + case VK_COMPONENT_TYPE_UINT32_NV: + return "VK_COMPONENT_TYPE_UINT32_NV"; + case VK_COMPONENT_TYPE_UINT64_NV: + return "VK_COMPONENT_TYPE_UINT64_NV"; + case VK_COMPONENT_TYPE_UINT8_NV: + return "VK_COMPONENT_TYPE_UINT8_NV"; + default: + return "Unhandled VkComponentTypeNV"; + } +} + +static inline const char* string_VkScopeNV(VkScopeNV input_value) +{ + switch ((VkScopeNV)input_value) + { + case VK_SCOPE_DEVICE_NV: + return "VK_SCOPE_DEVICE_NV"; + case VK_SCOPE_QUEUE_FAMILY_NV: + return "VK_SCOPE_QUEUE_FAMILY_NV"; + case VK_SCOPE_SUBGROUP_NV: + return "VK_SCOPE_SUBGROUP_NV"; + case VK_SCOPE_WORKGROUP_NV: + return "VK_SCOPE_WORKGROUP_NV"; + default: + return "Unhandled VkScopeNV"; + } +} + +static inline const char* string_VkCoverageReductionModeNV(VkCoverageReductionModeNV input_value) +{ + switch ((VkCoverageReductionModeNV)input_value) + { + case VK_COVERAGE_REDUCTION_MODE_MERGE_NV: + return "VK_COVERAGE_REDUCTION_MODE_MERGE_NV"; + case VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV: + return "VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV"; + default: + return "Unhandled VkCoverageReductionModeNV"; + } +} + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + +static inline const char* string_VkFullScreenExclusiveEXT(VkFullScreenExclusiveEXT input_value) +{ + switch ((VkFullScreenExclusiveEXT)input_value) + { + case VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT: + return "VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT"; + case VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT: + return "VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT"; + case VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT: + return "VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT"; + case VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT: + return "VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT"; + default: + return "Unhandled VkFullScreenExclusiveEXT"; + } +} +#endif // VK_USE_PLATFORM_WIN32_KHR + +static inline const char* string_VkLineRasterizationModeEXT(VkLineRasterizationModeEXT input_value) +{ + switch ((VkLineRasterizationModeEXT)input_value) + { + case VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT: + return "VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT"; + case VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT: + return "VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT"; + case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT: + return "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT"; + case VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT: + return "VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT"; + default: + return "Unhandled VkLineRasterizationModeEXT"; + } +} + +static inline const char* string_VkIndirectStateFlagBitsNV(VkIndirectStateFlagBitsNV input_value) +{ + switch ((VkIndirectStateFlagBitsNV)input_value) + { + case VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV: + return "VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV"; + default: + return "Unhandled VkIndirectStateFlagBitsNV"; + } +} + +static inline std::string string_VkIndirectStateFlagsNV(VkIndirectStateFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkIndirectStateFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkIndirectStateFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkIndirectCommandsTokenTypeNV(VkIndirectCommandsTokenTypeNV input_value) +{ + switch ((VkIndirectCommandsTokenTypeNV)input_value) + { + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV"; + case VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV: + return "VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV"; + default: + return "Unhandled VkIndirectCommandsTokenTypeNV"; + } +} + +static inline const char* string_VkIndirectCommandsLayoutUsageFlagBitsNV(VkIndirectCommandsLayoutUsageFlagBitsNV input_value) +{ + switch ((VkIndirectCommandsLayoutUsageFlagBitsNV)input_value) + { + case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV: + return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV"; + case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV: + return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV"; + case VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV: + return "VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV"; + default: + return "Unhandled VkIndirectCommandsLayoutUsageFlagBitsNV"; + } +} + +static inline std::string string_VkIndirectCommandsLayoutUsageFlagsNV(VkIndirectCommandsLayoutUsageFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkIndirectCommandsLayoutUsageFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkIndirectCommandsLayoutUsageFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkDeviceMemoryReportEventTypeEXT(VkDeviceMemoryReportEventTypeEXT input_value) +{ + switch ((VkDeviceMemoryReportEventTypeEXT)input_value) + { + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT"; + case VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT: + return "VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT"; + default: + return "Unhandled VkDeviceMemoryReportEventTypeEXT"; + } +} + +static inline const char* string_VkDeviceDiagnosticsConfigFlagBitsNV(VkDeviceDiagnosticsConfigFlagBitsNV input_value) +{ + switch ((VkDeviceDiagnosticsConfigFlagBitsNV)input_value) + { + case VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV: + return "VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV"; + case VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV: + return "VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV"; + case VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV: + return "VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV"; + default: + return "Unhandled VkDeviceDiagnosticsConfigFlagBitsNV"; + } +} + +static inline std::string string_VkDeviceDiagnosticsConfigFlagsNV(VkDeviceDiagnosticsConfigFlagsNV input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkDeviceDiagnosticsConfigFlagBitsNV(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkDeviceDiagnosticsConfigFlagBitsNV(static_cast(0))); + return ret; +} + +static inline const char* string_VkFragmentShadingRateTypeNV(VkFragmentShadingRateTypeNV input_value) +{ + switch ((VkFragmentShadingRateTypeNV)input_value) + { + case VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV: + return "VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV"; + case VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV: + return "VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV"; + default: + return "Unhandled VkFragmentShadingRateTypeNV"; + } +} + +static inline const char* string_VkFragmentShadingRateNV(VkFragmentShadingRateNV input_value) +{ + switch ((VkFragmentShadingRateNV)input_value) + { + case VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV"; + case VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV: + return "VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV"; + case VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV: + return "VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV"; + default: + return "Unhandled VkFragmentShadingRateNV"; + } +} + +static inline const char* string_VkBuildAccelerationStructureModeKHR(VkBuildAccelerationStructureModeKHR input_value) +{ + switch ((VkBuildAccelerationStructureModeKHR)input_value) + { + case VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR"; + case VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR: + return "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR"; + default: + return "Unhandled VkBuildAccelerationStructureModeKHR"; + } +} + +static inline const char* string_VkAccelerationStructureBuildTypeKHR(VkAccelerationStructureBuildTypeKHR input_value) +{ + switch ((VkAccelerationStructureBuildTypeKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR: + return "VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR"; + case VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR: + return "VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR"; + case VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR: + return "VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR"; + default: + return "Unhandled VkAccelerationStructureBuildTypeKHR"; + } +} + +static inline const char* string_VkAccelerationStructureCreateFlagBitsKHR(VkAccelerationStructureCreateFlagBitsKHR input_value) +{ + switch ((VkAccelerationStructureCreateFlagBitsKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR: + return "VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR"; + default: + return "Unhandled VkAccelerationStructureCreateFlagBitsKHR"; + } +} + +static inline std::string string_VkAccelerationStructureCreateFlagsKHR(VkAccelerationStructureCreateFlagsKHR input_value) +{ + std::string ret; + int index = 0; + while(input_value) { + if (input_value & 1) { + if( !ret.empty()) ret.append("|"); + ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast(1 << index))); + } + ++index; + input_value >>= 1; + } + if( ret.empty()) ret.append(string_VkAccelerationStructureCreateFlagBitsKHR(static_cast(0))); + return ret; +} + +static inline const char* string_VkAccelerationStructureCompatibilityKHR(VkAccelerationStructureCompatibilityKHR input_value) +{ + switch ((VkAccelerationStructureCompatibilityKHR)input_value) + { + case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR: + return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR"; + case VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR: + return "VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR"; + default: + return "Unhandled VkAccelerationStructureCompatibilityKHR"; + } +} + +static inline const char* string_VkShaderGroupShaderKHR(VkShaderGroupShaderKHR input_value) +{ + switch ((VkShaderGroupShaderKHR)input_value) + { + case VK_SHADER_GROUP_SHADER_ANY_HIT_KHR: + return "VK_SHADER_GROUP_SHADER_ANY_HIT_KHR"; + case VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR: + return "VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR"; + case VK_SHADER_GROUP_SHADER_GENERAL_KHR: + return "VK_SHADER_GROUP_SHADER_GENERAL_KHR"; + case VK_SHADER_GROUP_SHADER_INTERSECTION_KHR: + return "VK_SHADER_GROUP_SHADER_INTERSECTION_KHR"; + default: + return "Unhandled VkShaderGroupShaderKHR"; + } +} + +static inline const char * GetPhysDevFeatureString(uint32_t index) { + const char * IndexToPhysDevFeatureString[] = { + "robustBufferAccess", + "fullDrawIndexUint32", + "imageCubeArray", + "independentBlend", + "geometryShader", + "tessellationShader", + "sampleRateShading", + "dualSrcBlend", + "logicOp", + "multiDrawIndirect", + "drawIndirectFirstInstance", + "depthClamp", + "depthBiasClamp", + "fillModeNonSolid", + "depthBounds", + "wideLines", + "largePoints", + "alphaToOne", + "multiViewport", + "samplerAnisotropy", + "textureCompressionETC2", + "textureCompressionASTC_LDR", + "textureCompressionBC", + "occlusionQueryPrecise", + "pipelineStatisticsQuery", + "vertexPipelineStoresAndAtomics", + "fragmentStoresAndAtomics", + "shaderTessellationAndGeometryPointSize", + "shaderImageGatherExtended", + "shaderStorageImageExtendedFormats", + "shaderStorageImageMultisample", + "shaderStorageImageReadWithoutFormat", + "shaderStorageImageWriteWithoutFormat", + "shaderUniformBufferArrayDynamicIndexing", + "shaderSampledImageArrayDynamicIndexing", + "shaderStorageBufferArrayDynamicIndexing", + "shaderStorageImageArrayDynamicIndexing", + "shaderClipDistance", + "shaderCullDistance", + "shaderFloat64", + "shaderInt64", + "shaderInt16", + "shaderResourceResidency", + "shaderResourceMinLod", + "sparseBinding", + "sparseResidencyBuffer", + "sparseResidencyImage2D", + "sparseResidencyImage3D", + "sparseResidency2Samples", + "sparseResidency4Samples", + "sparseResidency8Samples", + "sparseResidency16Samples", + "sparseResidencyAliased", + "variableMultisampleRate", + "inheritedQueries", + }; + + return IndexToPhysDevFeatureString[index]; +} diff --git a/src/third_party/vulkan/vk_icd.h b/src/third_party/vulkan/vk_icd.h new file mode 100644 index 0000000..5e29ef5 --- /dev/null +++ b/src/third_party/vulkan/vk_icd.h @@ -0,0 +1,236 @@ +// +// File: vk_icd.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef VKICD_H +#define VKICD_H + +#include "vulkan.h" +#include + +// Loader-ICD version negotiation API. Versions add the following features: +// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr +// or vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 1 - Add support for vk_icdGetInstanceProcAddr. +// Version 2 - Add Loader/ICD Interface version negotiation +// via vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 3 - Add ICD creation/destruction of KHR_surface objects. +// Version 4 - Add unknown physical device extension qyering via +// vk_icdGetPhysicalDeviceProcAddr. +// Version 5 - Tells ICDs that the loader is now paying attention to the +// application version of Vulkan passed into the ApplicationInfo +// structure during vkCreateInstance. This will tell the ICD +// that if the loader is older, it should automatically fail a +// call for any API version > 1.0. Otherwise, the loader will +// manually determine if it can support the expected version. +// Version 6 - Add support for vk_icdEnumerateAdapterPhysicalDevices. +#define CURRENT_LOADER_ICD_INTERFACE_VERSION 6 +#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0 +#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4 + +// Old typedefs that don't follow a proper naming convention but are preserved for compatibility +typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion); +// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this +// file directly, it won't be found. +#ifndef PFN_GetPhysicalDeviceProcAddr +typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName); +#endif + +// Typedefs for loader/ICD interface +typedef VkResult (VKAPI_PTR *PFN_vk_icdNegotiateLoaderICDInterfaceVersion)(uint32_t* pVersion); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vk_icdGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vk_icdGetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); +#if defined(VK_USE_PLATFORM_WIN32_KHR) +typedef VkResult (VKAPI_PTR *PFN_vk_icdEnumerateAdapterPhysicalDevices)(VkInstance instance, LUID adapterLUID, + uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +#endif + +// Prototypes for loader/ICD interface +#if !defined(VK_NO_PROTOTYPES) +#ifdef __cplusplus +extern "C" { +#endif + VKAPI_ATTR VkResult VKAPI_CALL vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t* pVersion); + VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(VkInstance instance, const char* pName); + VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(VkInstance isntance, const char* pName); +#if defined(VK_USE_PLATFORM_WIN32_KHR) + VKAPI_ATTR VkResult VKAPI_CALL vk_icdEnumerateAdapterPhysicalDevices(VkInstance instance, LUID adapterLUID, + uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +#endif +#ifdef __cplusplus +} +#endif +#endif + +/* + * The ICD must reserve space for a pointer for the loader's dispatch + * table, at the start of . + * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro. + */ + +#define ICD_LOADER_MAGIC 0x01CDC0DE + +typedef union { + uintptr_t loaderMagic; + void *loaderData; +} VK_LOADER_DATA; + +static inline void set_loader_magic_value(void *pNewObject) { + VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + loader_info->loaderMagic = ICD_LOADER_MAGIC; +} + +static inline bool valid_loader_magic_value(void *pNewObject) { + const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC; +} + +/* + * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that + * contains the platform-specific connection and surface information. + */ +typedef enum { + VK_ICD_WSI_PLATFORM_MIR, + VK_ICD_WSI_PLATFORM_WAYLAND, + VK_ICD_WSI_PLATFORM_WIN32, + VK_ICD_WSI_PLATFORM_XCB, + VK_ICD_WSI_PLATFORM_XLIB, + VK_ICD_WSI_PLATFORM_ANDROID, + VK_ICD_WSI_PLATFORM_MACOS, + VK_ICD_WSI_PLATFORM_IOS, + VK_ICD_WSI_PLATFORM_DISPLAY, + VK_ICD_WSI_PLATFORM_HEADLESS, + VK_ICD_WSI_PLATFORM_METAL, + VK_ICD_WSI_PLATFORM_DIRECTFB, + VK_ICD_WSI_PLATFORM_VI, + VK_ICD_WSI_PLATFORM_GGP, +} VkIcdWsiPlatform; + +typedef struct { + VkIcdWsiPlatform platform; +} VkIcdSurfaceBase; + +#ifdef VK_USE_PLATFORM_MIR_KHR +typedef struct { + VkIcdSurfaceBase base; + MirConnection *connection; + MirSurface *mirSurface; +} VkIcdSurfaceMir; +#endif // VK_USE_PLATFORM_MIR_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +typedef struct { + VkIcdSurfaceBase base; + struct wl_display *display; + struct wl_surface *surface; +} VkIcdSurfaceWayland; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_WIN32_KHR +typedef struct { + VkIcdSurfaceBase base; + HINSTANCE hinstance; + HWND hwnd; +} VkIcdSurfaceWin32; +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR +typedef struct { + VkIcdSurfaceBase base; + xcb_connection_t *connection; + xcb_window_t window; +} VkIcdSurfaceXcb; +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR +typedef struct { + VkIcdSurfaceBase base; + Display *dpy; + Window window; +} VkIcdSurfaceXlib; +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT +typedef struct { + VkIcdSurfaceBase base; + IDirectFB *dfb; + IDirectFBSurface *surface; +} VkIcdSurfaceDirectFB; +#endif // VK_USE_PLATFORM_DIRECTFB_EXT + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +typedef struct { + VkIcdSurfaceBase base; + struct ANativeWindow *window; +} VkIcdSurfaceAndroid; +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceMacOS; +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceIOS; +#endif // VK_USE_PLATFORM_IOS_MVK + +#ifdef VK_USE_PLATFORM_GGP +typedef struct { + VkIcdSurfaceBase base; + GgpStreamDescriptor streamDescriptor; +} VkIcdSurfaceGgp; +#endif // VK_USE_PLATFORM_GGP + +typedef struct { + VkIcdSurfaceBase base; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkIcdSurfaceDisplay; + +typedef struct { + VkIcdSurfaceBase base; +} VkIcdSurfaceHeadless; + +#ifdef VK_USE_PLATFORM_METAL_EXT +typedef struct { + VkIcdSurfaceBase base; + const CAMetalLayer *pLayer; +} VkIcdSurfaceMetal; +#endif // VK_USE_PLATFORM_METAL_EXT + +#ifdef VK_USE_PLATFORM_VI_NN +typedef struct { + VkIcdSurfaceBase base; + void *window; +} VkIcdSurfaceVi; +#endif // VK_USE_PLATFORM_VI_NN + +#endif // VKICD_H diff --git a/src/third_party/vulkan/vk_layer.h b/src/third_party/vulkan/vk_layer.h new file mode 100644 index 0000000..0651870 --- /dev/null +++ b/src/third_party/vulkan/vk_layer.h @@ -0,0 +1,210 @@ +// +// File: vk_layer.h +// +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Need to define dispatch table + * Core struct can then have ptr to dispatch table at the top + * Along with object ptrs for current and next OBJ + */ +#pragma once + +#include "vulkan.h" +#if defined(__GNUC__) && __GNUC__ >= 4 +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#else +#define VK_LAYER_EXPORT +#endif + +#define MAX_NUM_UNKNOWN_EXTS 250 + + // Loader-Layer version negotiation API. Versions add the following features: + // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr + // or vk_icdNegotiateLoaderLayerInterfaceVersion. + // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and + // vk_icdNegotiateLoaderLayerInterfaceVersion. +#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2 +#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1 + +#define VK_CURRENT_CHAIN_VERSION 1 + +// Typedef for use in the interfaces below +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Version negotiation values +typedef enum VkNegotiateLayerStructType { + LAYER_NEGOTIATE_UNINTIALIZED = 0, + LAYER_NEGOTIATE_INTERFACE_STRUCT = 1, +} VkNegotiateLayerStructType; + +// Version negotiation structures +typedef struct VkNegotiateLayerInterface { + VkNegotiateLayerStructType sType; + void *pNext; + uint32_t loaderLayerInterfaceVersion; + PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr; +} VkNegotiateLayerInterface; + +// Version negotiation functions +typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct); + +// Function prototype for unknown physical device extension command +typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device); + +// ------------------------------------------------------------------------------------------------ +// CreateInstance and CreateDevice support structures + +/* Sub type of structure for instance and device loader ext of CreateInfo. + * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + * then VkLayerFunction indicates struct type pointed to by pNext + */ +typedef enum VkLayerFunction_ { + VK_LAYER_LINK_INFO = 0, + VK_LOADER_DATA_CALLBACK = 1, + VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK = 2, + VK_LOADER_FEATURES = 3, +} VkLayerFunction; + +typedef struct VkLayerInstanceLink_ { + struct VkLayerInstanceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr; +} VkLayerInstanceLink; + +/* + * When creating the device chain the loader needs to pass + * down information about it's device structure needed at + * the end of the chain. Passing the data via the + * VkLayerDeviceInfo avoids issues with finding the + * exact instance being used. + */ +typedef struct VkLayerDeviceInfo_ { + void *device_info; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; +} VkLayerDeviceInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkLayerCreateDevice)(VkInstance instance, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA); +typedef void (VKAPI_PTR *PFN_vkLayerDestroyDevice)(VkDevice physicalDevice, const VkAllocationCallbacks *pAllocator, PFN_vkDestroyDevice destroyFunction); + +typedef enum VkLoaderFeastureFlagBits { + VK_LOADER_FEATURE_PHYSICAL_DEVICE_SORTING = 0x00000001, +} VkLoaderFlagBits; +typedef VkFlags VkLoaderFeatureFlags; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerInstanceLink *pLayerInfo; + PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData; + struct { + PFN_vkLayerCreateDevice pfnLayerCreateDevice; + PFN_vkLayerDestroyDevice pfnLayerDestroyDevice; + } layerDevice; + VkLoaderFeatureFlags loaderFeatures; + } u; +} VkLayerInstanceCreateInfo; + +typedef struct VkLayerDeviceLink_ { + struct VkLayerDeviceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr; +} VkLayerDeviceLink; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerDeviceLink *pLayerInfo; + PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData; + } u; +} VkLayerDeviceCreateInfo; + +#ifdef __cplusplus +extern "C" { +#endif + +VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct); + +typedef enum VkChainType { + VK_CHAIN_TYPE_UNKNOWN = 0, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3, +} VkChainType; + +typedef struct VkChainHeader { + VkChainType type; + uint32_t version; + uint32_t size; +} VkChainHeader; + +typedef struct VkEnumerateInstanceExtensionPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *, + VkExtensionProperties *); + const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { + return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceExtensionPropertiesChain; + +typedef struct VkEnumerateInstanceLayerPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *); + const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const { + return pfnNextLayer(pNextLink, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceLayerPropertiesChain; + +typedef struct VkEnumerateInstanceVersionChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *); + const struct VkEnumerateInstanceVersionChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pApiVersion) const { + return pfnNextLayer(pNextLink, pApiVersion); + } +#endif +} VkEnumerateInstanceVersionChain; + +#ifdef __cplusplus +} +#endif diff --git a/src/third_party/vulkan/vk_platform.h b/src/third_party/vulkan/vk_platform.h new file mode 100644 index 0000000..048322d --- /dev/null +++ b/src/third_party/vulkan/vk_platform.h @@ -0,0 +1,82 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/src/third_party/vulkan/vk_sdk_platform.h b/src/third_party/vulkan/vk_sdk_platform.h new file mode 100644 index 0000000..96d8676 --- /dev/null +++ b/src/third_party/vulkan/vk_sdk_platform.h @@ -0,0 +1,69 @@ +// +// File: vk_sdk_platform.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VK_SDK_PLATFORM_H +#define VK_SDK_PLATFORM_H + +#if defined(_WIN32) +#define NOMINMAX +#ifndef __cplusplus +#undef inline +#define inline __inline +#endif // __cplusplus + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +// C99: +// Microsoft didn't implement C99 in Visual Studio; but started adding it with +// VS2013. However, VS2013 still didn't have snprintf(). The following is a +// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the +// "CMakeLists.txt" file). +// NOTE: This is fixed in Visual Studio 2015. +#define snprintf _snprintf +#endif + +#define strdup _strdup + +#endif // _WIN32 + +// Check for noexcept support using clang, with fallback to Windows or GCC version numbers +#ifndef NOEXCEPT +#if defined(__clang__) +#if __has_feature(cxx_noexcept) +#define HAS_NOEXCEPT +#endif +#else +#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 +#define HAS_NOEXCEPT +#else +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS +#define HAS_NOEXCEPT +#endif +#endif +#endif + +#ifdef HAS_NOEXCEPT +#define NOEXCEPT noexcept +#else +#define NOEXCEPT +#endif +#endif + +#endif // VK_SDK_PLATFORM_H diff --git a/src/third_party/vulkan/vulkan.h b/src/third_party/vulkan/vulkan.h new file mode 100644 index 0000000..b7716ec --- /dev/null +++ b/src/third_party/vulkan/vulkan.h @@ -0,0 +1,87 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + +#ifdef VK_USE_PLATFORM_FUCHSIA +#include +#include "vulkan_fuchsia.h" +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + +#ifdef VK_USE_PLATFORM_METAL_EXT +#include "vulkan_metal.h" +#endif + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT +#include +#include "vulkan_directfb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + + +#ifdef VK_USE_PLATFORM_GGP +#include +#include "vulkan_ggp.h" +#endif + + +#ifdef VK_ENABLE_BETA_EXTENSIONS +#include "vulkan_beta.h" +#endif + +#endif // VULKAN_H_ diff --git a/src/third_party/vulkan/vulkan.hpp b/src/third_party/vulkan/vulkan.hpp new file mode 100644 index 0000000..3b5e7c0 --- /dev/null +++ b/src/third_party/vulkan/vulkan.hpp @@ -0,0 +1,94292 @@ +// Copyright (c) 2015-2020 The Khronos Group Inc. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT +// + +// This header is generated from the Khronos Vulkan XML API Registry. + +#ifndef VULKAN_HPP +#define VULKAN_HPP + +#if defined( _MSVC_LANG ) +# define VULKAN_HPP_CPLUSPLUS _MSVC_LANG +#else +# define VULKAN_HPP_CPLUSPLUS __cplusplus +#endif + +#if 201703L < VULKAN_HPP_CPLUSPLUS +# define VULKAN_HPP_CPP_VERSION 20 +#elif 201402L < VULKAN_HPP_CPLUSPLUS +# define VULKAN_HPP_CPP_VERSION 17 +#elif 201103L < VULKAN_HPP_CPLUSPLUS +# define VULKAN_HPP_CPP_VERSION 14 +#elif 199711L < VULKAN_HPP_CPLUSPLUS +# define VULKAN_HPP_CPP_VERSION 11 +#else +# error "vulkan.hpp needs at least c++ standard version 11" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../vulkan.h" + +#if 17 <= VULKAN_HPP_CPP_VERSION +#include +#endif + +#if defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +# if !defined(VULKAN_HPP_NO_SMART_HANDLE) +# define VULKAN_HPP_NO_SMART_HANDLE +# endif +#else +# include +# include +#endif + +#if !defined(VULKAN_HPP_ASSERT) +# include +# define VULKAN_HPP_ASSERT assert +#endif + +#if !defined(VULKAN_HPP_ASSERT_ON_RESULT) +# define VULKAN_HPP_ASSERT_ON_RESULT VULKAN_HPP_ASSERT +#endif + +#if !defined(VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL) +# define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 1 +#endif + +#if VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL == 1 +# if defined( __linux__ ) || defined( __APPLE__ ) +# include +# elif defined( _WIN32 ) +typedef struct HINSTANCE__ * HINSTANCE; +# if defined( _WIN64 ) +typedef int64_t( __stdcall * FARPROC )(); +# else +typedef int( __stdcall * FARPROC )(); +# endif +extern "C" __declspec( dllimport ) HINSTANCE __stdcall LoadLibraryA( char const * lpLibFileName ); +extern "C" __declspec( dllimport ) int __stdcall FreeLibrary( HINSTANCE hLibModule ); +extern "C" __declspec( dllimport ) FARPROC __stdcall GetProcAddress( HINSTANCE hModule, const char * lpProcName ); +# endif +#endif + +#if !defined(__has_include) +# define __has_include(x) false +#endif + +#if ( 201711 <= __cpp_impl_three_way_comparison ) && __has_include( ) && !defined( VULKAN_HPP_NO_SPACESHIP_OPERATOR ) +# define VULKAN_HPP_HAS_SPACESHIP_OPERATOR +#endif +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) +# include +#endif + + +static_assert( VK_HEADER_VERSION == 162 , "Wrong VK_HEADER_VERSION!" ); + +// 32-bit vulkan is not typesafe for handles, so don't allow copy constructors on this platform by default. +// To enable this feature on 32-bit platforms please define VULKAN_HPP_TYPESAFE_CONVERSION +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) +# if !defined( VULKAN_HPP_TYPESAFE_CONVERSION ) +# define VULKAN_HPP_TYPESAFE_CONVERSION +# endif +#endif + +// includes through some other header +// this results in major(x) being resolved to gnu_dev_major(x) +// which is an expression in a constructor initializer list. +#if defined(major) + #undef major +#endif +#if defined(minor) + #undef minor +#endif + +// Windows defines MemoryBarrier which is deprecated and collides +// with the VULKAN_HPP_NAMESPACE::MemoryBarrier struct. +#if defined(MemoryBarrier) + #undef MemoryBarrier +#endif + +#if !defined(VULKAN_HPP_HAS_UNRESTRICTED_UNIONS) +# if defined(__clang__) +# if __has_feature(cxx_unrestricted_unions) +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# elif defined(__GNUC__) +# define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +# if 40600 <= GCC_VERSION +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# elif defined(_MSC_VER) +# if 1900 <= _MSC_VER +# define VULKAN_HPP_HAS_UNRESTRICTED_UNIONS +# endif +# endif +#endif + +#if !defined(VULKAN_HPP_INLINE) +# if defined(__clang__) +# if __has_attribute(always_inline) +# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__ +# else +# define VULKAN_HPP_INLINE inline +# endif +# elif defined(__GNUC__) +# define VULKAN_HPP_INLINE __attribute__((always_inline)) __inline__ +# elif defined(_MSC_VER) +# define VULKAN_HPP_INLINE inline +# else +# define VULKAN_HPP_INLINE inline +# endif +#endif + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) +# define VULKAN_HPP_TYPESAFE_EXPLICIT +#else +# define VULKAN_HPP_TYPESAFE_EXPLICIT explicit +#endif + +#if defined(__cpp_constexpr) +# define VULKAN_HPP_CONSTEXPR constexpr +# if __cpp_constexpr >= 201304 +# define VULKAN_HPP_CONSTEXPR_14 constexpr +# else +# define VULKAN_HPP_CONSTEXPR_14 +# endif +# define VULKAN_HPP_CONST_OR_CONSTEXPR constexpr +#else +# define VULKAN_HPP_CONSTEXPR +# define VULKAN_HPP_CONSTEXPR_14 +# define VULKAN_HPP_CONST_OR_CONSTEXPR const +#endif + +#if !defined(VULKAN_HPP_NOEXCEPT) +# if defined(_MSC_VER) && (_MSC_VER <= 1800) +# define VULKAN_HPP_NOEXCEPT +# else +# define VULKAN_HPP_NOEXCEPT noexcept +# define VULKAN_HPP_HAS_NOEXCEPT 1 +# if defined(VULKAN_HPP_NO_EXCEPTIONS) +# define VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS noexcept +# else +# define VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS +# endif +# endif +#endif + +#if 14 <= VULKAN_HPP_CPP_VERSION +# define VULKAN_HPP_DEPRECATED( msg ) [[deprecated( msg )]] +#else +# define VULKAN_HPP_DEPRECATED( msg ) +#endif + +#if ( 17 <= VULKAN_HPP_CPP_VERSION ) && !defined( VULKAN_HPP_NO_NODISCARD_WARNINGS ) +# define VULKAN_HPP_NODISCARD [[nodiscard]] +# if defined(VULKAN_HPP_NO_EXCEPTIONS) +# define VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS [[nodiscard]] +# else +# define VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS +# endif +#else +# define VULKAN_HPP_NODISCARD +# define VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS +#endif + +#if !defined(VULKAN_HPP_NAMESPACE) +#define VULKAN_HPP_NAMESPACE vk +#endif + +#define VULKAN_HPP_STRINGIFY2(text) #text +#define VULKAN_HPP_STRINGIFY(text) VULKAN_HPP_STRINGIFY2(text) +#define VULKAN_HPP_NAMESPACE_STRING VULKAN_HPP_STRINGIFY(VULKAN_HPP_NAMESPACE) + +namespace VULKAN_HPP_NAMESPACE +{ + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + class ArrayProxy + { + public: + VULKAN_HPP_CONSTEXPR ArrayProxy() VULKAN_HPP_NOEXCEPT + : m_count( 0 ) + , m_ptr( nullptr ) + {} + + VULKAN_HPP_CONSTEXPR ArrayProxy( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_count( 0 ) + , m_ptr( nullptr ) + {} + + ArrayProxy( T & value ) VULKAN_HPP_NOEXCEPT + : m_count( 1 ) + , m_ptr( &value ) + {} + + template ::value, int>::type = 0> + ArrayProxy( typename std::remove_const::type & value ) VULKAN_HPP_NOEXCEPT + : m_count( 1 ) + , m_ptr( &value ) + {} + + ArrayProxy( uint32_t count, T * ptr ) VULKAN_HPP_NOEXCEPT + : m_count( count ) + , m_ptr( ptr ) + {} + + template ::value, int>::type = 0> + ArrayProxy( uint32_t count, typename std::remove_const::type * ptr ) VULKAN_HPP_NOEXCEPT + : m_count( count ) + , m_ptr( ptr ) + {} + + ArrayProxy( std::initializer_list const & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + template ::value, int>::type = 0> + ArrayProxy( std::initializer_list::type> const & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + ArrayProxy( std::initializer_list & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + template ::value, int>::type = 0> + ArrayProxy( std::initializer_list::type> & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + template + ArrayProxy( std::array const & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template ::value, int>::type = 0> + ArrayProxy( std::array::type, N> const & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template + ArrayProxy( std::array & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template ::value, int>::type = 0> + ArrayProxy( std::array::type, N> & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template ::type>> + ArrayProxy( std::vector const & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>, + typename B = T, + typename std::enable_if::value, int>::type = 0> + ArrayProxy( std::vector::type, Allocator> const & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>> + ArrayProxy( std::vector & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>, + typename B = T, + typename std::enable_if::value, int>::type = 0> + ArrayProxy( std::vector::type, Allocator> & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + const T * begin() const VULKAN_HPP_NOEXCEPT + { + return m_ptr; + } + + const T * end() const VULKAN_HPP_NOEXCEPT + { + return m_ptr + m_count; + } + + const T & front() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_count && m_ptr ); + return *m_ptr; + } + + const T & back() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_count && m_ptr ); + return *( m_ptr + m_count - 1 ); + } + + bool empty() const VULKAN_HPP_NOEXCEPT + { + return ( m_count == 0 ); + } + + uint32_t size() const VULKAN_HPP_NOEXCEPT + { + return m_count; + } + + T * data() const VULKAN_HPP_NOEXCEPT + { + return m_ptr; + } + + private: + uint32_t m_count; + T * m_ptr; + }; + + template + class ArrayProxyNoTemporaries + { + public: + VULKAN_HPP_CONSTEXPR ArrayProxyNoTemporaries() VULKAN_HPP_NOEXCEPT + : m_count( 0 ) + , m_ptr( nullptr ) + {} + + VULKAN_HPP_CONSTEXPR ArrayProxyNoTemporaries( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_count( 0 ) + , m_ptr( nullptr ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( typename std::remove_const::type & value ) VULKAN_HPP_NOEXCEPT + : m_count( 1 ) + , m_ptr( &value ) + {} + + ArrayProxyNoTemporaries( uint32_t count, T * ptr ) VULKAN_HPP_NOEXCEPT + : m_count( count ) + , m_ptr( ptr ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( uint32_t count, typename std::remove_const::type * ptr ) VULKAN_HPP_NOEXCEPT + : m_count( count ) + , m_ptr( ptr ) + {} + + ArrayProxyNoTemporaries( std::initializer_list const & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( std::initializer_list::type> const & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + ArrayProxyNoTemporaries( std::initializer_list & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( std::initializer_list::type> & list ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( list.size() ) ) + , m_ptr( list.begin() ) + {} + + ArrayProxyNoTemporaries( std::initializer_list const && list ) VULKAN_HPP_NOEXCEPT = delete; + ArrayProxyNoTemporaries( std::initializer_list && list ) VULKAN_HPP_NOEXCEPT = delete; + + template + ArrayProxyNoTemporaries( std::array const & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( std::array::type, N> const & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template + ArrayProxyNoTemporaries( std::array & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template ::value, int>::type = 0> + ArrayProxyNoTemporaries( std::array::type, N> & data ) VULKAN_HPP_NOEXCEPT + : m_count( N ) + , m_ptr( data.data() ) + {} + + template + ArrayProxyNoTemporaries( std::array const && data ) VULKAN_HPP_NOEXCEPT = delete; + template + ArrayProxyNoTemporaries( std::array && data ) VULKAN_HPP_NOEXCEPT = delete; + + template ::type>> + ArrayProxyNoTemporaries( std::vector const & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>, + typename B = T, + typename std::enable_if::value, int>::type = 0> + ArrayProxyNoTemporaries( std::vector::type, Allocator> const & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>> + ArrayProxyNoTemporaries( std::vector & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + template ::type>, + typename B = T, + typename std::enable_if::value, int>::type = 0> + ArrayProxyNoTemporaries( std::vector::type, Allocator> & data ) VULKAN_HPP_NOEXCEPT + : m_count( static_cast( data.size() ) ) + , m_ptr( data.data() ) + {} + + ArrayProxyNoTemporaries( std::vector const && data ) VULKAN_HPP_NOEXCEPT = delete; + ArrayProxyNoTemporaries( std::vector && data ) VULKAN_HPP_NOEXCEPT = delete; + + const T * begin() const VULKAN_HPP_NOEXCEPT + { + return m_ptr; + } + + const T * end() const VULKAN_HPP_NOEXCEPT + { + return m_ptr + m_count; + } + + const T & front() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_count && m_ptr ); + return *m_ptr; + } + + const T & back() const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_count && m_ptr ); + return *( m_ptr + m_count - 1 ); + } + + bool empty() const VULKAN_HPP_NOEXCEPT + { + return ( m_count == 0 ); + } + + uint32_t size() const VULKAN_HPP_NOEXCEPT + { + return m_count; + } + + T * data() const VULKAN_HPP_NOEXCEPT + { + return m_ptr; + } + + private: + uint32_t m_count; + T * m_ptr; + }; +#endif + + template + class ArrayWrapper1D : public std::array + { + public: + VULKAN_HPP_CONSTEXPR ArrayWrapper1D() VULKAN_HPP_NOEXCEPT + : std::array() + {} + + VULKAN_HPP_CONSTEXPR ArrayWrapper1D(std::array const& data) VULKAN_HPP_NOEXCEPT + : std::array(data) + {} + +#if defined(_WIN32) && !defined(_WIN64) + VULKAN_HPP_CONSTEXPR T const& operator[](int index) const VULKAN_HPP_NOEXCEPT + { + return std::array::operator[](index); + } + + T & operator[](int index) VULKAN_HPP_NOEXCEPT + { + return std::array::operator[](index); + } +#endif + + operator T const* () const VULKAN_HPP_NOEXCEPT + { + return this->data(); + } + + operator T * () VULKAN_HPP_NOEXCEPT + { + return this->data(); + } + + template ::value, int>::type = 0> + operator std::string() const + { + return std::string( this->data() ); + } + +#if 17 <= VULKAN_HPP_CPP_VERSION + template ::value, int>::type = 0> + operator std::string_view() const + { + return std::string_view( this->data() ); + } +#endif + + template ::value, int>::type = 0> + bool operator<( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) < *static_cast const *>( &rhs ); + } + + template ::value, int>::type = 0> + bool operator<=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) <= *static_cast const *>( &rhs ); + } + + template ::value, int>::type = 0> + bool operator>( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) > *static_cast const *>( &rhs ); + } + + template ::value, int>::type = 0> + bool operator>=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) >= *static_cast const *>( &rhs ); + } + + template ::value, int>::type = 0> + bool operator==( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) == *static_cast const *>( &rhs ); + } + + template ::value, int>::type = 0> + bool operator!=( ArrayWrapper1D const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return *static_cast const *>( this ) != *static_cast const *>( &rhs ); + } + }; + + // specialization of relational operators between std::string and arrays of chars + template + bool operator<(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs < rhs.data(); + } + + template + bool operator<=(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs <= rhs.data(); + } + + template + bool operator>(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs > rhs.data(); + } + + template + bool operator>=(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs >= rhs.data(); + } + + template + bool operator==(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs == rhs.data(); + } + + template + bool operator!=(std::string const& lhs, ArrayWrapper1D const& rhs) VULKAN_HPP_NOEXCEPT + { + return lhs != rhs.data(); + } + + template + class ArrayWrapper2D : public std::array,N> + { + public: + VULKAN_HPP_CONSTEXPR ArrayWrapper2D() VULKAN_HPP_NOEXCEPT + : std::array, N>() + {} + + VULKAN_HPP_CONSTEXPR ArrayWrapper2D(std::array,N> const& data) VULKAN_HPP_NOEXCEPT + : std::array, N>(*reinterpret_cast,N> const*>(&data)) + {} + }; + + template struct FlagTraits + { + enum { allFlags = 0 }; + }; + + template + class Flags + { + public: + using MaskType = typename std::underlying_type::type; + + // constructors + VULKAN_HPP_CONSTEXPR Flags() VULKAN_HPP_NOEXCEPT + : m_mask(0) + {} + + VULKAN_HPP_CONSTEXPR Flags(BitType bit) VULKAN_HPP_NOEXCEPT + : m_mask(static_cast(bit)) + {} + + VULKAN_HPP_CONSTEXPR Flags(Flags const& rhs) VULKAN_HPP_NOEXCEPT = default; + + VULKAN_HPP_CONSTEXPR explicit Flags(MaskType flags) VULKAN_HPP_NOEXCEPT + : m_mask(flags) + {} + + // relational operators +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>(Flags const&) const = default; +#else + VULKAN_HPP_CONSTEXPR bool operator<(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask < rhs.m_mask; + } + + VULKAN_HPP_CONSTEXPR bool operator<=(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask <= rhs.m_mask; + } + + VULKAN_HPP_CONSTEXPR bool operator>(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask > rhs.m_mask; + } + + VULKAN_HPP_CONSTEXPR bool operator>=(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask >= rhs.m_mask; + } + + VULKAN_HPP_CONSTEXPR bool operator==(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask == rhs.m_mask; + } + + VULKAN_HPP_CONSTEXPR bool operator!=(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return m_mask != rhs.m_mask; + } +#endif + + // logical operator + VULKAN_HPP_CONSTEXPR bool operator!() const VULKAN_HPP_NOEXCEPT + { + return !m_mask; + } + + // bitwise operators + VULKAN_HPP_CONSTEXPR Flags operator&(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return Flags(m_mask & rhs.m_mask); + } + + VULKAN_HPP_CONSTEXPR Flags operator|(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return Flags(m_mask | rhs.m_mask); + } + + VULKAN_HPP_CONSTEXPR Flags operator^(Flags const& rhs) const VULKAN_HPP_NOEXCEPT + { + return Flags(m_mask ^ rhs.m_mask); + } + + VULKAN_HPP_CONSTEXPR Flags operator~() const VULKAN_HPP_NOEXCEPT + { + return Flags(m_mask ^ FlagTraits::allFlags); + } + + // assignment operators + VULKAN_HPP_CONSTEXPR_14 Flags & operator=(Flags const& rhs) VULKAN_HPP_NOEXCEPT = default; + + VULKAN_HPP_CONSTEXPR_14 Flags & operator|=(Flags const& rhs) VULKAN_HPP_NOEXCEPT + { + m_mask |= rhs.m_mask; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Flags & operator&=(Flags const& rhs) VULKAN_HPP_NOEXCEPT + { + m_mask &= rhs.m_mask; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Flags & operator^=(Flags const& rhs) VULKAN_HPP_NOEXCEPT + { + m_mask ^= rhs.m_mask; + return *this; + } + + // cast operators + explicit VULKAN_HPP_CONSTEXPR operator bool() const VULKAN_HPP_NOEXCEPT + { + return !!m_mask; + } + + explicit VULKAN_HPP_CONSTEXPR operator MaskType() const VULKAN_HPP_NOEXCEPT + { + return m_mask; + } + + private: + MaskType m_mask; + }; + +#if !defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + // relational operators only needed for pre C++20 + template + VULKAN_HPP_CONSTEXPR bool operator<(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator>( bit ); + } + + template + VULKAN_HPP_CONSTEXPR bool operator<=(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator>=( bit ); + } + + template + VULKAN_HPP_CONSTEXPR bool operator>(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator<( bit ); + } + + template + VULKAN_HPP_CONSTEXPR bool operator>=(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator<=(bit); + } + + template + VULKAN_HPP_CONSTEXPR bool operator==(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator==( bit ); + } + + template + VULKAN_HPP_CONSTEXPR bool operator!=(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator!=( bit ); + } +#endif + + // bitwise operators + template + VULKAN_HPP_CONSTEXPR Flags operator&(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator&( bit ); + } + + template + VULKAN_HPP_CONSTEXPR Flags operator|(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator|( bit ); + } + + template + VULKAN_HPP_CONSTEXPR Flags operator^(BitType bit, Flags const& flags) VULKAN_HPP_NOEXCEPT + { + return flags.operator^( bit ); + } + + template + class Optional + { + public: + Optional(RefType & reference) VULKAN_HPP_NOEXCEPT { m_ptr = &reference; } + Optional(RefType * ptr) VULKAN_HPP_NOEXCEPT { m_ptr = ptr; } + Optional(std::nullptr_t) VULKAN_HPP_NOEXCEPT { m_ptr = nullptr; } + + operator RefType*() const VULKAN_HPP_NOEXCEPT { return m_ptr; } + RefType const* operator->() const VULKAN_HPP_NOEXCEPT { return m_ptr; } + explicit operator bool() const VULKAN_HPP_NOEXCEPT { return !!m_ptr; } + + private: + RefType *m_ptr; + }; + + template struct StructExtends { enum { value = false }; }; + + template + struct IsPartOfStructureChain + { + static const bool valid = false; + }; + + template + struct IsPartOfStructureChain + { + static const bool valid = std::is_same::value || IsPartOfStructureChain::valid; + }; + + template + struct StructureChainContains + { + static const bool value = std::is_same>::type>::value || + StructureChainContains::value; + }; + + template + struct StructureChainContains<0, T, ChainElements...> + { + static const bool value = std::is_same>::type>::value; + }; + + template + struct StructureChainValidation + { + using TestType = typename std::tuple_element>::type; + static const bool valid = + StructExtends>::type>::value && + ( TestType::allowDuplicate || !StructureChainContains::value ) && + StructureChainValidation::valid; + }; + + template + struct StructureChainValidation<0, ChainElements...> + { + static const bool valid = true; + }; + + template + class StructureChain : public std::tuple + { + public: + StructureChain() VULKAN_HPP_NOEXCEPT + { + static_assert( StructureChainValidation::valid, + "The structure chain is not valid!" ); + link(); + } + + StructureChain( StructureChain const & rhs ) VULKAN_HPP_NOEXCEPT : std::tuple( rhs ) + { + static_assert( StructureChainValidation::valid, + "The structure chain is not valid!" ); + link(); + } + + StructureChain( StructureChain && rhs ) VULKAN_HPP_NOEXCEPT + : std::tuple( std::forward>( rhs ) ) + { + static_assert( StructureChainValidation::valid, + "The structure chain is not valid!" ); + link(); + } + + StructureChain( ChainElements const &... elems ) VULKAN_HPP_NOEXCEPT : std::tuple( elems... ) + { + static_assert( StructureChainValidation::valid, + "The structure chain is not valid!" ); + link(); + } + + StructureChain & operator=( StructureChain const & rhs ) VULKAN_HPP_NOEXCEPT + { + std::tuple::operator=( rhs ); + link(); + return *this; + } + + StructureChain & operator=( StructureChain && rhs ) = delete; + + template >::type, size_t Which = 0> + T & get() VULKAN_HPP_NOEXCEPT + { + return std::get::value>( static_cast&>( *this ) ); + } + + template >::type, size_t Which = 0> + T const & get() const VULKAN_HPP_NOEXCEPT + { + return std::get::value>( static_cast const &>( *this ) ); + } + + template + std::tuple get() VULKAN_HPP_NOEXCEPT + { + return std::tie( get(), get(), get()... ); + } + + template + std::tuple get() const VULKAN_HPP_NOEXCEPT + { + return std::tie( get(), get(), get()... ); + } + + template + void relink() VULKAN_HPP_NOEXCEPT + { + static_assert( IsPartOfStructureChain::valid, + "Can't relink Structure that's not part of this StructureChain!" ); + static_assert( + !std::is_same>::type>::value || (Which != 0), + "It's not allowed to have the first element unlinked!" ); + + auto pNext = reinterpret_cast( &get() ); + VULKAN_HPP_ASSERT( !isLinked( pNext ) ); + auto & headElement = std::get<0>( static_cast&>( *this ) ); + pNext->pNext = reinterpret_cast(headElement.pNext); + headElement.pNext = pNext; + } + + template + void unlink() VULKAN_HPP_NOEXCEPT + { + static_assert( IsPartOfStructureChain::valid, + "Can't unlink Structure that's not part of this StructureChain!" ); + static_assert( + !std::is_same>::type>::value || (Which != 0), + "It's not allowed to unlink the first element!" ); + + unlink( reinterpret_cast( &get() ) ); + } + + private: + template + struct ChainElementIndex : ChainElementIndex + {}; + + template + struct ChainElementIndex::value, void>::type, + First, + Types...> : ChainElementIndex + {}; + + template + struct ChainElementIndex::value, void>::type, + First, + Types...> : ChainElementIndex + {}; + + template + struct ChainElementIndex::value, void>::type, + First, + Types...> : std::integral_constant + {}; + + bool isLinked( VkBaseInStructure const * pNext ) + { + VkBaseInStructure const * elementPtr = reinterpret_cast(&std::get<0>( static_cast&>( *this ) ) ); + while ( elementPtr ) + { + if ( elementPtr->pNext == pNext ) + { + return true; + } + elementPtr = elementPtr->pNext; + } + return false; + } + + template + typename std::enable_if::type link() VULKAN_HPP_NOEXCEPT + { + auto & x = std::get( static_cast&>( *this ) ); + x.pNext = &std::get( static_cast&>( *this ) ); + link(); + } + + template + typename std::enable_if::type link() VULKAN_HPP_NOEXCEPT + {} + + template + typename std::enable_if::type unlink( VkBaseOutStructure const * pNext ) VULKAN_HPP_NOEXCEPT + { + auto & element = std::get( static_cast&>( *this ) ); + if ( element.pNext == pNext ) + { + element.pNext = pNext->pNext; + } + else + { + unlink( pNext ); + } + } + + template + typename std::enable_if::type unlink( VkBaseOutStructure const * pNext ) VULKAN_HPP_NOEXCEPT + { + auto & element = std::get<0>( static_cast&>( *this ) ); + if ( element.pNext == pNext ) + { + element.pNext = pNext->pNext; + } + else + { + VULKAN_HPP_ASSERT( false ); // fires, if the ClassType member has already been unlinked ! + } + } + }; + +#if !defined(VULKAN_HPP_NO_SMART_HANDLE) + template class UniqueHandleTraits; + + template + class UniqueHandle : public UniqueHandleTraits::deleter + { + private: + using Deleter = typename UniqueHandleTraits::deleter; + + public: + using element_type = Type; + + UniqueHandle() + : Deleter() + , m_value() + {} + + explicit UniqueHandle( Type const& value, Deleter const& deleter = Deleter() ) VULKAN_HPP_NOEXCEPT + : Deleter( deleter) + , m_value( value ) + {} + + UniqueHandle( UniqueHandle const& ) = delete; + + UniqueHandle( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT + : Deleter( std::move( static_cast( other ) ) ) + , m_value( other.release() ) + {} + + ~UniqueHandle() VULKAN_HPP_NOEXCEPT + { + if ( m_value ) this->destroy( m_value ); + } + + UniqueHandle & operator=( UniqueHandle const& ) = delete; + + UniqueHandle & operator=( UniqueHandle && other ) VULKAN_HPP_NOEXCEPT + { + reset( other.release() ); + *static_cast(this) = std::move( static_cast(other) ); + return *this; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_value.operator bool(); + } + + Type const* operator->() const VULKAN_HPP_NOEXCEPT + { + return &m_value; + } + + Type * operator->() VULKAN_HPP_NOEXCEPT + { + return &m_value; + } + + Type const& operator*() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + Type & operator*() VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + const Type & get() const VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + Type & get() VULKAN_HPP_NOEXCEPT + { + return m_value; + } + + void reset( Type const& value = Type() ) VULKAN_HPP_NOEXCEPT + { + if ( m_value != value ) + { + if ( m_value ) this->destroy( m_value ); + m_value = value; + } + } + + Type release() VULKAN_HPP_NOEXCEPT + { + Type value = m_value; + m_value = nullptr; + return value; + } + + void swap( UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT + { + std::swap(m_value, rhs.m_value); + std::swap(static_cast(*this), static_cast(rhs)); + } + + private: + Type m_value; + }; + + template + VULKAN_HPP_INLINE std::vector uniqueToRaw(std::vector const& handles) + { + std::vector newBuffer(handles.size()); + std::transform(handles.begin(), handles.end(), newBuffer.begin(), [](UniqueType const& handle) { return handle.get(); }); + return newBuffer; + } + + template + VULKAN_HPP_INLINE void swap( UniqueHandle & lhs, UniqueHandle & rhs ) VULKAN_HPP_NOEXCEPT + { + lhs.swap( rhs ); + } +#endif + +#if !defined(VK_NO_PROTOTYPES) + class DispatchLoaderStatic + { + public: +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkAcquireFullScreenExclusiveModeEXT( VkDevice device, VkSwapchainKHR swapchain ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquireFullScreenExclusiveModeEXT( device, swapchain ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkAcquireNextImage2KHR( VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquireNextImage2KHR( device, pAcquireInfo, pImageIndex ); + } + + VkResult vkAcquireNextImageKHR( VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquireNextImageKHR( device, swapchain, timeout, semaphore, fence, pImageIndex ); + } + + VkResult vkAcquirePerformanceConfigurationINTEL( VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquirePerformanceConfigurationINTEL( device, pAcquireInfo, pConfiguration ); + } + + VkResult vkAcquireProfilingLockKHR( VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquireProfilingLockKHR( device, pInfo ); + } + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + VkResult vkAcquireXlibDisplayEXT( VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAcquireXlibDisplayEXT( physicalDevice, dpy, display ); + } +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + VkResult vkAllocateCommandBuffers( VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAllocateCommandBuffers( device, pAllocateInfo, pCommandBuffers ); + } + + VkResult vkAllocateDescriptorSets( VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAllocateDescriptorSets( device, pAllocateInfo, pDescriptorSets ); + } + + VkResult vkAllocateMemory( VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory ) const VULKAN_HPP_NOEXCEPT + { + return ::vkAllocateMemory( device, pAllocateInfo, pAllocator, pMemory ); + } + + VkResult vkBeginCommandBuffer( VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBeginCommandBuffer( commandBuffer, pBeginInfo ); + } + + VkResult vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindAccelerationStructureMemoryNV( device, bindInfoCount, pBindInfos ); + } + + VkResult vkBindBufferMemory( VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindBufferMemory( device, buffer, memory, memoryOffset ); + } + + VkResult vkBindBufferMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindBufferMemory2( device, bindInfoCount, pBindInfos ); + } + + VkResult vkBindBufferMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindBufferMemory2KHR( device, bindInfoCount, pBindInfos ); + } + + VkResult vkBindImageMemory( VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindImageMemory( device, image, memory, memoryOffset ); + } + + VkResult vkBindImageMemory2( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindImageMemory2( device, bindInfoCount, pBindInfos ); + } + + VkResult vkBindImageMemory2KHR( VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBindImageMemory2KHR( device, bindInfoCount, pBindInfos ); + } + + VkResult vkBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkBuildAccelerationStructuresKHR( device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos ); + } + + void vkCmdBeginConditionalRenderingEXT( VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginConditionalRenderingEXT( commandBuffer, pConditionalRenderingBegin ); + } + + void vkCmdBeginDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginDebugUtilsLabelEXT( commandBuffer, pLabelInfo ); + } + + void vkCmdBeginQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginQuery( commandBuffer, queryPool, query, flags ); + } + + void vkCmdBeginQueryIndexedEXT( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginQueryIndexedEXT( commandBuffer, queryPool, query, flags, index ); + } + + void vkCmdBeginRenderPass( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginRenderPass( commandBuffer, pRenderPassBegin, contents ); + } + + void vkCmdBeginRenderPass2( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginRenderPass2( commandBuffer, pRenderPassBegin, pSubpassBeginInfo ); + } + + void vkCmdBeginRenderPass2KHR( VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginRenderPass2KHR( commandBuffer, pRenderPassBegin, pSubpassBeginInfo ); + } + + void vkCmdBeginTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBeginTransformFeedbackEXT( commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets ); + } + + void vkCmdBindDescriptorSets( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindDescriptorSets( commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets ); + } + + void vkCmdBindIndexBuffer( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindIndexBuffer( commandBuffer, buffer, offset, indexType ); + } + + void vkCmdBindPipeline( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindPipeline( commandBuffer, pipelineBindPoint, pipeline ); + } + + void vkCmdBindPipelineShaderGroupNV( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindPipelineShaderGroupNV( commandBuffer, pipelineBindPoint, pipeline, groupIndex ); + } + + void vkCmdBindShadingRateImageNV( VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindShadingRateImageNV( commandBuffer, imageView, imageLayout ); + } + + void vkCmdBindTransformFeedbackBuffersEXT( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindTransformFeedbackBuffersEXT( commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes ); + } + + void vkCmdBindVertexBuffers( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindVertexBuffers( commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets ); + } + + void vkCmdBindVertexBuffers2EXT( VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBindVertexBuffers2EXT( commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, pStrides ); + } + + void vkCmdBlitImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBlitImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter ); + } + + void vkCmdBlitImage2KHR( VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBlitImage2KHR( commandBuffer, pBlitImageInfo ); + } + + void vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBuildAccelerationStructureNV( commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset ); + } + + void vkCmdBuildAccelerationStructuresIndirectKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBuildAccelerationStructuresIndirectKHR( commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts ); + } + + void vkCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdBuildAccelerationStructuresKHR( commandBuffer, infoCount, pInfos, ppBuildRangeInfos ); + } + + void vkCmdClearAttachments( VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdClearAttachments( commandBuffer, attachmentCount, pAttachments, rectCount, pRects ); + } + + void vkCmdClearColorImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdClearColorImage( commandBuffer, image, imageLayout, pColor, rangeCount, pRanges ); + } + + void vkCmdClearDepthStencilImage( VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdClearDepthStencilImage( commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges ); + } + + void vkCmdCopyAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyAccelerationStructureKHR( commandBuffer, pInfo ); + } + + void vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyAccelerationStructureNV( commandBuffer, dst, src, mode ); + } + + void vkCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyAccelerationStructureToMemoryKHR( commandBuffer, pInfo ); + } + + void vkCmdCopyBuffer( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyBuffer( commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions ); + } + + void vkCmdCopyBuffer2KHR( VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyBuffer2KHR( commandBuffer, pCopyBufferInfo ); + } + + void vkCmdCopyBufferToImage( VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyBufferToImage( commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions ); + } + + void vkCmdCopyBufferToImage2KHR( VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyBufferToImage2KHR( commandBuffer, pCopyBufferToImageInfo ); + } + + void vkCmdCopyImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions ); + } + + void vkCmdCopyImage2KHR( VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyImage2KHR( commandBuffer, pCopyImageInfo ); + } + + void vkCmdCopyImageToBuffer( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyImageToBuffer( commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions ); + } + + void vkCmdCopyImageToBuffer2KHR( VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyImageToBuffer2KHR( commandBuffer, pCopyImageToBufferInfo ); + } + + void vkCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyMemoryToAccelerationStructureKHR( commandBuffer, pInfo ); + } + + void vkCmdCopyQueryPoolResults( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdCopyQueryPoolResults( commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags ); + } + + void vkCmdDebugMarkerBeginEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDebugMarkerBeginEXT( commandBuffer, pMarkerInfo ); + } + + void vkCmdDebugMarkerEndEXT( VkCommandBuffer commandBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDebugMarkerEndEXT( commandBuffer ); + } + + void vkCmdDebugMarkerInsertEXT( VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDebugMarkerInsertEXT( commandBuffer, pMarkerInfo ); + } + + void vkCmdDispatch( VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDispatch( commandBuffer, groupCountX, groupCountY, groupCountZ ); + } + + void vkCmdDispatchBase( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDispatchBase( commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } + + void vkCmdDispatchBaseKHR( VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDispatchBaseKHR( commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } + + void vkCmdDispatchIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDispatchIndirect( commandBuffer, buffer, offset ); + } + + void vkCmdDraw( VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDraw( commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); + } + + void vkCmdDrawIndexed( VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndexed( commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); + } + + void vkCmdDrawIndexedIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndexedIndirect( commandBuffer, buffer, offset, drawCount, stride ); + } + + void vkCmdDrawIndexedIndirectCount( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndexedIndirectCount( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawIndexedIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndexedIndirectCountAMD( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawIndexedIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndexedIndirectCountKHR( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawIndirect( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndirect( commandBuffer, buffer, offset, drawCount, stride ); + } + + void vkCmdDrawIndirectByteCountEXT( VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndirectByteCountEXT( commandBuffer, instanceCount, firstInstance, counterBuffer, counterBufferOffset, counterOffset, vertexStride ); + } + + void vkCmdDrawIndirectCount( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndirectCount( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawIndirectCountAMD( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndirectCountAMD( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawIndirectCountKHR( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawIndirectCountKHR( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawMeshTasksIndirectCountNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawMeshTasksIndirectCountNV( commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride ); + } + + void vkCmdDrawMeshTasksIndirectNV( VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawMeshTasksIndirectNV( commandBuffer, buffer, offset, drawCount, stride ); + } + + void vkCmdDrawMeshTasksNV( VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdDrawMeshTasksNV( commandBuffer, taskCount, firstTask ); + } + + void vkCmdEndConditionalRenderingEXT( VkCommandBuffer commandBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndConditionalRenderingEXT( commandBuffer ); + } + + void vkCmdEndDebugUtilsLabelEXT( VkCommandBuffer commandBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndDebugUtilsLabelEXT( commandBuffer ); + } + + void vkCmdEndQuery( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndQuery( commandBuffer, queryPool, query ); + } + + void vkCmdEndQueryIndexedEXT( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndQueryIndexedEXT( commandBuffer, queryPool, query, index ); + } + + void vkCmdEndRenderPass( VkCommandBuffer commandBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndRenderPass( commandBuffer ); + } + + void vkCmdEndRenderPass2( VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndRenderPass2( commandBuffer, pSubpassEndInfo ); + } + + void vkCmdEndRenderPass2KHR( VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndRenderPass2KHR( commandBuffer, pSubpassEndInfo ); + } + + void vkCmdEndTransformFeedbackEXT( VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdEndTransformFeedbackEXT( commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets ); + } + + void vkCmdExecuteCommands( VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdExecuteCommands( commandBuffer, commandBufferCount, pCommandBuffers ); + } + + void vkCmdExecuteGeneratedCommandsNV( VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdExecuteGeneratedCommandsNV( commandBuffer, isPreprocessed, pGeneratedCommandsInfo ); + } + + void vkCmdFillBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdFillBuffer( commandBuffer, dstBuffer, dstOffset, size, data ); + } + + void vkCmdInsertDebugUtilsLabelEXT( VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdInsertDebugUtilsLabelEXT( commandBuffer, pLabelInfo ); + } + + void vkCmdNextSubpass( VkCommandBuffer commandBuffer, VkSubpassContents contents ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdNextSubpass( commandBuffer, contents ); + } + + void vkCmdNextSubpass2( VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdNextSubpass2( commandBuffer, pSubpassBeginInfo, pSubpassEndInfo ); + } + + void vkCmdNextSubpass2KHR( VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdNextSubpass2KHR( commandBuffer, pSubpassBeginInfo, pSubpassEndInfo ); + } + + void vkCmdPipelineBarrier( VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPipelineBarrier( commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers ); + } + + void vkCmdPreprocessGeneratedCommandsNV( VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPreprocessGeneratedCommandsNV( commandBuffer, pGeneratedCommandsInfo ); + } + + void vkCmdPushConstants( VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushConstants( commandBuffer, layout, stageFlags, offset, size, pValues ); + } + + void vkCmdPushDescriptorSetKHR( VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSetKHR( commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites ); + } + + void vkCmdPushDescriptorSetWithTemplateKHR( VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdPushDescriptorSetWithTemplateKHR( commandBuffer, descriptorUpdateTemplate, layout, set, pData ); + } + + void vkCmdResetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdResetEvent( commandBuffer, event, stageMask ); + } + + void vkCmdResetQueryPool( VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdResetQueryPool( commandBuffer, queryPool, firstQuery, queryCount ); + } + + void vkCmdResolveImage( VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdResolveImage( commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions ); + } + + void vkCmdResolveImage2KHR( VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdResolveImage2KHR( commandBuffer, pResolveImageInfo ); + } + + void vkCmdSetBlendConstants( VkCommandBuffer commandBuffer, const float blendConstants[4] ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetBlendConstants( commandBuffer, blendConstants ); + } + + void vkCmdSetCheckpointNV( VkCommandBuffer commandBuffer, const void* pCheckpointMarker ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetCheckpointNV( commandBuffer, pCheckpointMarker ); + } + + void vkCmdSetCoarseSampleOrderNV( VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetCoarseSampleOrderNV( commandBuffer, sampleOrderType, customSampleOrderCount, pCustomSampleOrders ); + } + + void vkCmdSetCullModeEXT( VkCommandBuffer commandBuffer, VkCullModeFlags cullMode ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetCullModeEXT( commandBuffer, cullMode ); + } + + void vkCmdSetDepthBias( VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthBias( commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); + } + + void vkCmdSetDepthBounds( VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthBounds( commandBuffer, minDepthBounds, maxDepthBounds ); + } + + void vkCmdSetDepthBoundsTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthBoundsTestEnableEXT( commandBuffer, depthBoundsTestEnable ); + } + + void vkCmdSetDepthCompareOpEXT( VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthCompareOpEXT( commandBuffer, depthCompareOp ); + } + + void vkCmdSetDepthTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthTestEnable ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthTestEnableEXT( commandBuffer, depthTestEnable ); + } + + void vkCmdSetDepthWriteEnableEXT( VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDepthWriteEnableEXT( commandBuffer, depthWriteEnable ); + } + + void vkCmdSetDeviceMask( VkCommandBuffer commandBuffer, uint32_t deviceMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDeviceMask( commandBuffer, deviceMask ); + } + + void vkCmdSetDeviceMaskKHR( VkCommandBuffer commandBuffer, uint32_t deviceMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDeviceMaskKHR( commandBuffer, deviceMask ); + } + + void vkCmdSetDiscardRectangleEXT( VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetDiscardRectangleEXT( commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles ); + } + + void vkCmdSetEvent( VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetEvent( commandBuffer, event, stageMask ); + } + + void vkCmdSetExclusiveScissorNV( VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetExclusiveScissorNV( commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors ); + } + + void vkCmdSetFragmentShadingRateEnumNV( VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetFragmentShadingRateEnumNV( commandBuffer, shadingRate, combinerOps ); + } + + void vkCmdSetFragmentShadingRateKHR( VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2] ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetFragmentShadingRateKHR( commandBuffer, pFragmentSize, combinerOps ); + } + + void vkCmdSetFrontFaceEXT( VkCommandBuffer commandBuffer, VkFrontFace frontFace ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetFrontFaceEXT( commandBuffer, frontFace ); + } + + void vkCmdSetLineStippleEXT( VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetLineStippleEXT( commandBuffer, lineStippleFactor, lineStipplePattern ); + } + + void vkCmdSetLineWidth( VkCommandBuffer commandBuffer, float lineWidth ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetLineWidth( commandBuffer, lineWidth ); + } + + VkResult vkCmdSetPerformanceMarkerINTEL( VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetPerformanceMarkerINTEL( commandBuffer, pMarkerInfo ); + } + + VkResult vkCmdSetPerformanceOverrideINTEL( VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetPerformanceOverrideINTEL( commandBuffer, pOverrideInfo ); + } + + VkResult vkCmdSetPerformanceStreamMarkerINTEL( VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetPerformanceStreamMarkerINTEL( commandBuffer, pMarkerInfo ); + } + + void vkCmdSetPrimitiveTopologyEXT( VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetPrimitiveTopologyEXT( commandBuffer, primitiveTopology ); + } + + void vkCmdSetRayTracingPipelineStackSizeKHR( VkCommandBuffer commandBuffer, uint32_t pipelineStackSize ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetRayTracingPipelineStackSizeKHR( commandBuffer, pipelineStackSize ); + } + + void vkCmdSetSampleLocationsEXT( VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetSampleLocationsEXT( commandBuffer, pSampleLocationsInfo ); + } + + void vkCmdSetScissor( VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetScissor( commandBuffer, firstScissor, scissorCount, pScissors ); + } + + void vkCmdSetScissorWithCountEXT( VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetScissorWithCountEXT( commandBuffer, scissorCount, pScissors ); + } + + void vkCmdSetStencilCompareMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetStencilCompareMask( commandBuffer, faceMask, compareMask ); + } + + void vkCmdSetStencilOpEXT( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetStencilOpEXT( commandBuffer, faceMask, failOp, passOp, depthFailOp, compareOp ); + } + + void vkCmdSetStencilReference( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetStencilReference( commandBuffer, faceMask, reference ); + } + + void vkCmdSetStencilTestEnableEXT( VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetStencilTestEnableEXT( commandBuffer, stencilTestEnable ); + } + + void vkCmdSetStencilWriteMask( VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetStencilWriteMask( commandBuffer, faceMask, writeMask ); + } + + void vkCmdSetViewport( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetViewport( commandBuffer, firstViewport, viewportCount, pViewports ); + } + + void vkCmdSetViewportShadingRatePaletteNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetViewportShadingRatePaletteNV( commandBuffer, firstViewport, viewportCount, pShadingRatePalettes ); + } + + void vkCmdSetViewportWScalingNV( VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetViewportWScalingNV( commandBuffer, firstViewport, viewportCount, pViewportWScalings ); + } + + void vkCmdSetViewportWithCountEXT( VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdSetViewportWithCountEXT( commandBuffer, viewportCount, pViewports ); + } + + void vkCmdTraceRaysIndirectKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdTraceRaysIndirectKHR( commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress ); + } + + void vkCmdTraceRaysKHR( VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdTraceRaysKHR( commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth ); + } + + void vkCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdTraceRaysNV( commandBuffer, raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, callableShaderBindingTableBuffer, callableShaderBindingOffset, callableShaderBindingStride, width, height, depth ); + } + + void vkCmdUpdateBuffer( VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdUpdateBuffer( commandBuffer, dstBuffer, dstOffset, dataSize, pData ); + } + + void vkCmdWaitEvents( VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdWaitEvents( commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers ); + } + + void vkCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdWriteAccelerationStructuresPropertiesKHR( commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery ); + } + + void vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdWriteAccelerationStructuresPropertiesNV( commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery ); + } + + void vkCmdWriteBufferMarkerAMD( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdWriteBufferMarkerAMD( commandBuffer, pipelineStage, dstBuffer, dstOffset, marker ); + } + + void vkCmdWriteTimestamp( VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCmdWriteTimestamp( commandBuffer, pipelineStage, queryPool, query ); + } + + VkResult vkCompileDeferredNV( VkDevice device, VkPipeline pipeline, uint32_t shader ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCompileDeferredNV( device, pipeline, shader ); + } + + VkResult vkCopyAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyAccelerationStructureKHR( device, deferredOperation, pInfo ); + } + + VkResult vkCopyAccelerationStructureToMemoryKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyAccelerationStructureToMemoryKHR( device, deferredOperation, pInfo ); + } + + VkResult vkCopyMemoryToAccelerationStructureKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCopyMemoryToAccelerationStructureKHR( device, deferredOperation, pInfo ); + } + + VkResult vkCreateAccelerationStructureKHR( VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateAccelerationStructureKHR( device, pCreateInfo, pAllocator, pAccelerationStructure ); + } + + VkResult vkCreateAccelerationStructureNV( VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateAccelerationStructureNV( device, pCreateInfo, pAllocator, pAccelerationStructure ); + } + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VkResult vkCreateAndroidSurfaceKHR( VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateAndroidSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VkResult vkCreateBuffer( VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateBuffer( device, pCreateInfo, pAllocator, pBuffer ); + } + + VkResult vkCreateBufferView( VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateBufferView( device, pCreateInfo, pAllocator, pView ); + } + + VkResult vkCreateCommandPool( VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateCommandPool( device, pCreateInfo, pAllocator, pCommandPool ); + } + + VkResult vkCreateComputePipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateComputePipelines( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); + } + + VkResult vkCreateDebugReportCallbackEXT( VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDebugReportCallbackEXT( instance, pCreateInfo, pAllocator, pCallback ); + } + + VkResult vkCreateDebugUtilsMessengerEXT( VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDebugUtilsMessengerEXT( instance, pCreateInfo, pAllocator, pMessenger ); + } + + VkResult vkCreateDeferredOperationKHR( VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDeferredOperationKHR( device, pAllocator, pDeferredOperation ); + } + + VkResult vkCreateDescriptorPool( VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDescriptorPool( device, pCreateInfo, pAllocator, pDescriptorPool ); + } + + VkResult vkCreateDescriptorSetLayout( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDescriptorSetLayout( device, pCreateInfo, pAllocator, pSetLayout ); + } + + VkResult vkCreateDescriptorUpdateTemplate( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDescriptorUpdateTemplate( device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate ); + } + + VkResult vkCreateDescriptorUpdateTemplateKHR( VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDescriptorUpdateTemplateKHR( device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate ); + } + + VkResult vkCreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDevice( physicalDevice, pCreateInfo, pAllocator, pDevice ); + } + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VkResult vkCreateDirectFBSurfaceEXT( VkInstance instance, const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDirectFBSurfaceEXT( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + VkResult vkCreateDisplayModeKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDisplayModeKHR( physicalDevice, display, pCreateInfo, pAllocator, pMode ); + } + + VkResult vkCreateDisplayPlaneSurfaceKHR( VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateDisplayPlaneSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } + + VkResult vkCreateEvent( VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateEvent( device, pCreateInfo, pAllocator, pEvent ); + } + + VkResult vkCreateFence( VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateFence( device, pCreateInfo, pAllocator, pFence ); + } + + VkResult vkCreateFramebuffer( VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateFramebuffer( device, pCreateInfo, pAllocator, pFramebuffer ); + } + + VkResult vkCreateGraphicsPipelines( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateGraphicsPipelines( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); + } + + VkResult vkCreateHeadlessSurfaceEXT( VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateHeadlessSurfaceEXT( instance, pCreateInfo, pAllocator, pSurface ); + } + +#ifdef VK_USE_PLATFORM_IOS_MVK + VkResult vkCreateIOSSurfaceMVK( VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateIOSSurfaceMVK( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + + VkResult vkCreateImage( VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateImage( device, pCreateInfo, pAllocator, pImage ); + } + +#ifdef VK_USE_PLATFORM_FUCHSIA + VkResult vkCreateImagePipeSurfaceFUCHSIA( VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateImagePipeSurfaceFUCHSIA( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + VkResult vkCreateImageView( VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateImageView( device, pCreateInfo, pAllocator, pView ); + } + + VkResult vkCreateIndirectCommandsLayoutNV( VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateIndirectCommandsLayoutNV( device, pCreateInfo, pAllocator, pIndirectCommandsLayout ); + } + + VkResult vkCreateInstance( const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateInstance( pCreateInfo, pAllocator, pInstance ); + } + +#ifdef VK_USE_PLATFORM_MACOS_MVK + VkResult vkCreateMacOSSurfaceMVK( VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateMacOSSurfaceMVK( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + +#ifdef VK_USE_PLATFORM_METAL_EXT + VkResult vkCreateMetalSurfaceEXT( VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateMetalSurfaceEXT( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + VkResult vkCreatePipelineCache( VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreatePipelineCache( device, pCreateInfo, pAllocator, pPipelineCache ); + } + + VkResult vkCreatePipelineLayout( VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreatePipelineLayout( device, pCreateInfo, pAllocator, pPipelineLayout ); + } + + VkResult vkCreatePrivateDataSlotEXT( VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreatePrivateDataSlotEXT( device, pCreateInfo, pAllocator, pPrivateDataSlot ); + } + + VkResult vkCreateQueryPool( VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateQueryPool( device, pCreateInfo, pAllocator, pQueryPool ); + } + + VkResult vkCreateRayTracingPipelinesKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateRayTracingPipelinesKHR( device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); + } + + VkResult vkCreateRayTracingPipelinesNV( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateRayTracingPipelinesNV( device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines ); + } + + VkResult vkCreateRenderPass( VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateRenderPass( device, pCreateInfo, pAllocator, pRenderPass ); + } + + VkResult vkCreateRenderPass2( VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateRenderPass2( device, pCreateInfo, pAllocator, pRenderPass ); + } + + VkResult vkCreateRenderPass2KHR( VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateRenderPass2KHR( device, pCreateInfo, pAllocator, pRenderPass ); + } + + VkResult vkCreateSampler( VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSampler( device, pCreateInfo, pAllocator, pSampler ); + } + + VkResult vkCreateSamplerYcbcrConversion( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSamplerYcbcrConversion( device, pCreateInfo, pAllocator, pYcbcrConversion ); + } + + VkResult vkCreateSamplerYcbcrConversionKHR( VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSamplerYcbcrConversionKHR( device, pCreateInfo, pAllocator, pYcbcrConversion ); + } + + VkResult vkCreateSemaphore( VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSemaphore( device, pCreateInfo, pAllocator, pSemaphore ); + } + + VkResult vkCreateShaderModule( VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateShaderModule( device, pCreateInfo, pAllocator, pShaderModule ); + } + + VkResult vkCreateSharedSwapchainsKHR( VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSharedSwapchainsKHR( device, swapchainCount, pCreateInfos, pAllocator, pSwapchains ); + } + +#ifdef VK_USE_PLATFORM_GGP + VkResult vkCreateStreamDescriptorSurfaceGGP( VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateStreamDescriptorSurfaceGGP( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_GGP*/ + + VkResult vkCreateSwapchainKHR( VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateSwapchainKHR( device, pCreateInfo, pAllocator, pSwapchain ); + } + + VkResult vkCreateValidationCacheEXT( VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateValidationCacheEXT( device, pCreateInfo, pAllocator, pValidationCache ); + } + +#ifdef VK_USE_PLATFORM_VI_NN + VkResult vkCreateViSurfaceNN( VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateViSurfaceNN( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VkResult vkCreateWaylandSurfaceKHR( VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateWaylandSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkCreateWin32SurfaceKHR( VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateWin32SurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + VkResult vkCreateXcbSurfaceKHR( VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateXcbSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + VkResult vkCreateXlibSurfaceKHR( VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface ) const VULKAN_HPP_NOEXCEPT + { + return ::vkCreateXlibSurfaceKHR( instance, pCreateInfo, pAllocator, pSurface ); + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + VkResult vkDebugMarkerSetObjectNameEXT( VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDebugMarkerSetObjectNameEXT( device, pNameInfo ); + } + + VkResult vkDebugMarkerSetObjectTagEXT( VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDebugMarkerSetObjectTagEXT( device, pTagInfo ); + } + + void vkDebugReportMessageEXT( VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDebugReportMessageEXT( instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage ); + } + + VkResult vkDeferredOperationJoinKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDeferredOperationJoinKHR( device, operation ); + } + + void vkDestroyAccelerationStructureKHR( VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyAccelerationStructureKHR( device, accelerationStructure, pAllocator ); + } + + void vkDestroyAccelerationStructureNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyAccelerationStructureNV( device, accelerationStructure, pAllocator ); + } + + void vkDestroyBuffer( VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyBuffer( device, buffer, pAllocator ); + } + + void vkDestroyBufferView( VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyBufferView( device, bufferView, pAllocator ); + } + + void vkDestroyCommandPool( VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyCommandPool( device, commandPool, pAllocator ); + } + + void vkDestroyDebugReportCallbackEXT( VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDebugReportCallbackEXT( instance, callback, pAllocator ); + } + + void vkDestroyDebugUtilsMessengerEXT( VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDebugUtilsMessengerEXT( instance, messenger, pAllocator ); + } + + void vkDestroyDeferredOperationKHR( VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDeferredOperationKHR( device, operation, pAllocator ); + } + + void vkDestroyDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDescriptorPool( device, descriptorPool, pAllocator ); + } + + void vkDestroyDescriptorSetLayout( VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDescriptorSetLayout( device, descriptorSetLayout, pAllocator ); + } + + void vkDestroyDescriptorUpdateTemplate( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDescriptorUpdateTemplate( device, descriptorUpdateTemplate, pAllocator ); + } + + void vkDestroyDescriptorUpdateTemplateKHR( VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDescriptorUpdateTemplateKHR( device, descriptorUpdateTemplate, pAllocator ); + } + + void vkDestroyDevice( VkDevice device, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyDevice( device, pAllocator ); + } + + void vkDestroyEvent( VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyEvent( device, event, pAllocator ); + } + + void vkDestroyFence( VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyFence( device, fence, pAllocator ); + } + + void vkDestroyFramebuffer( VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyFramebuffer( device, framebuffer, pAllocator ); + } + + void vkDestroyImage( VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyImage( device, image, pAllocator ); + } + + void vkDestroyImageView( VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyImageView( device, imageView, pAllocator ); + } + + void vkDestroyIndirectCommandsLayoutNV( VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyIndirectCommandsLayoutNV( device, indirectCommandsLayout, pAllocator ); + } + + void vkDestroyInstance( VkInstance instance, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyInstance( instance, pAllocator ); + } + + void vkDestroyPipeline( VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyPipeline( device, pipeline, pAllocator ); + } + + void vkDestroyPipelineCache( VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyPipelineCache( device, pipelineCache, pAllocator ); + } + + void vkDestroyPipelineLayout( VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyPipelineLayout( device, pipelineLayout, pAllocator ); + } + + void vkDestroyPrivateDataSlotEXT( VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyPrivateDataSlotEXT( device, privateDataSlot, pAllocator ); + } + + void vkDestroyQueryPool( VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyQueryPool( device, queryPool, pAllocator ); + } + + void vkDestroyRenderPass( VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyRenderPass( device, renderPass, pAllocator ); + } + + void vkDestroySampler( VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySampler( device, sampler, pAllocator ); + } + + void vkDestroySamplerYcbcrConversion( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySamplerYcbcrConversion( device, ycbcrConversion, pAllocator ); + } + + void vkDestroySamplerYcbcrConversionKHR( VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySamplerYcbcrConversionKHR( device, ycbcrConversion, pAllocator ); + } + + void vkDestroySemaphore( VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySemaphore( device, semaphore, pAllocator ); + } + + void vkDestroyShaderModule( VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyShaderModule( device, shaderModule, pAllocator ); + } + + void vkDestroySurfaceKHR( VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySurfaceKHR( instance, surface, pAllocator ); + } + + void vkDestroySwapchainKHR( VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroySwapchainKHR( device, swapchain, pAllocator ); + } + + void vkDestroyValidationCacheEXT( VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDestroyValidationCacheEXT( device, validationCache, pAllocator ); + } + + VkResult vkDeviceWaitIdle( VkDevice device ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDeviceWaitIdle( device ); + } + + VkResult vkDisplayPowerControlEXT( VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkDisplayPowerControlEXT( device, display, pDisplayPowerInfo ); + } + + VkResult vkEndCommandBuffer( VkCommandBuffer commandBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEndCommandBuffer( commandBuffer ); + } + + VkResult vkEnumerateDeviceExtensionProperties( VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumerateDeviceExtensionProperties( physicalDevice, pLayerName, pPropertyCount, pProperties ); + } + + VkResult vkEnumerateDeviceLayerProperties( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumerateDeviceLayerProperties( physicalDevice, pPropertyCount, pProperties ); + } + + VkResult vkEnumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, pProperties ); + } + + VkResult vkEnumerateInstanceLayerProperties( uint32_t* pPropertyCount, VkLayerProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumerateInstanceLayerProperties( pPropertyCount, pProperties ); + } + + VkResult vkEnumerateInstanceVersion( uint32_t* pApiVersion ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumerateInstanceVersion( pApiVersion ); + } + + VkResult vkEnumeratePhysicalDeviceGroups( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumeratePhysicalDeviceGroups( instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties ); + } + + VkResult vkEnumeratePhysicalDeviceGroupsKHR( VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumeratePhysicalDeviceGroupsKHR( instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties ); + } + + VkResult vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( physicalDevice, queueFamilyIndex, pCounterCount, pCounters, pCounterDescriptions ); + } + + VkResult vkEnumeratePhysicalDevices( VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices ) const VULKAN_HPP_NOEXCEPT + { + return ::vkEnumeratePhysicalDevices( instance, pPhysicalDeviceCount, pPhysicalDevices ); + } + + VkResult vkFlushMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges ) const VULKAN_HPP_NOEXCEPT + { + return ::vkFlushMappedMemoryRanges( device, memoryRangeCount, pMemoryRanges ); + } + + void vkFreeCommandBuffers( VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers ) const VULKAN_HPP_NOEXCEPT + { + return ::vkFreeCommandBuffers( device, commandPool, commandBufferCount, pCommandBuffers ); + } + + VkResult vkFreeDescriptorSets( VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets ) const VULKAN_HPP_NOEXCEPT + { + return ::vkFreeDescriptorSets( device, descriptorPool, descriptorSetCount, pDescriptorSets ); + } + + void vkFreeMemory( VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator ) const VULKAN_HPP_NOEXCEPT + { + return ::vkFreeMemory( device, memory, pAllocator ); + } + + void vkGetAccelerationStructureBuildSizesKHR( VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetAccelerationStructureBuildSizesKHR( device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo ); + } + + VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR( VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetAccelerationStructureDeviceAddressKHR( device, pInfo ); + } + + VkResult vkGetAccelerationStructureHandleNV( VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetAccelerationStructureHandleNV( device, accelerationStructure, dataSize, pData ); + } + + void vkGetAccelerationStructureMemoryRequirementsNV( VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetAccelerationStructureMemoryRequirementsNV( device, pInfo, pMemoryRequirements ); + } + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VkResult vkGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetAndroidHardwareBufferPropertiesANDROID( device, buffer, pProperties ); + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VkDeviceAddress vkGetBufferDeviceAddress( VkDevice device, const VkBufferDeviceAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferDeviceAddress( device, pInfo ); + } + + VkDeviceAddress vkGetBufferDeviceAddressEXT( VkDevice device, const VkBufferDeviceAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferDeviceAddressEXT( device, pInfo ); + } + + VkDeviceAddress vkGetBufferDeviceAddressKHR( VkDevice device, const VkBufferDeviceAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferDeviceAddressKHR( device, pInfo ); + } + + void vkGetBufferMemoryRequirements( VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferMemoryRequirements( device, buffer, pMemoryRequirements ); + } + + void vkGetBufferMemoryRequirements2( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferMemoryRequirements2( device, pInfo, pMemoryRequirements ); + } + + void vkGetBufferMemoryRequirements2KHR( VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferMemoryRequirements2KHR( device, pInfo, pMemoryRequirements ); + } + + uint64_t vkGetBufferOpaqueCaptureAddress( VkDevice device, const VkBufferDeviceAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferOpaqueCaptureAddress( device, pInfo ); + } + + uint64_t vkGetBufferOpaqueCaptureAddressKHR( VkDevice device, const VkBufferDeviceAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetBufferOpaqueCaptureAddressKHR( device, pInfo ); + } + + VkResult vkGetCalibratedTimestampsEXT( VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetCalibratedTimestampsEXT( device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation ); + } + + uint32_t vkGetDeferredOperationMaxConcurrencyKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeferredOperationMaxConcurrencyKHR( device, operation ); + } + + VkResult vkGetDeferredOperationResultKHR( VkDevice device, VkDeferredOperationKHR operation ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeferredOperationResultKHR( device, operation ); + } + + void vkGetDescriptorSetLayoutSupport( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDescriptorSetLayoutSupport( device, pCreateInfo, pSupport ); + } + + void vkGetDescriptorSetLayoutSupportKHR( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDescriptorSetLayoutSupportKHR( device, pCreateInfo, pSupport ); + } + + void vkGetDeviceAccelerationStructureCompatibilityKHR( VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceAccelerationStructureCompatibilityKHR( device, pVersionInfo, pCompatibility ); + } + + void vkGetDeviceGroupPeerMemoryFeatures( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceGroupPeerMemoryFeatures( device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures ); + } + + void vkGetDeviceGroupPeerMemoryFeaturesKHR( VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceGroupPeerMemoryFeaturesKHR( device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures ); + } + + VkResult vkGetDeviceGroupPresentCapabilitiesKHR( VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceGroupPresentCapabilitiesKHR( device, pDeviceGroupPresentCapabilities ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetDeviceGroupSurfacePresentModes2EXT( VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceGroupSurfacePresentModes2EXT( device, pSurfaceInfo, pModes ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkGetDeviceGroupSurfacePresentModesKHR( VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceGroupSurfacePresentModesKHR( device, surface, pModes ); + } + + void vkGetDeviceMemoryCommitment( VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceMemoryCommitment( device, memory, pCommittedMemoryInBytes ); + } + + uint64_t vkGetDeviceMemoryOpaqueCaptureAddress( VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceMemoryOpaqueCaptureAddress( device, pInfo ); + } + + uint64_t vkGetDeviceMemoryOpaqueCaptureAddressKHR( VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceMemoryOpaqueCaptureAddressKHR( device, pInfo ); + } + + PFN_vkVoidFunction vkGetDeviceProcAddr( VkDevice device, const char* pName ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceProcAddr( device, pName ); + } + + void vkGetDeviceQueue( VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceQueue( device, queueFamilyIndex, queueIndex, pQueue ); + } + + void vkGetDeviceQueue2( VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDeviceQueue2( device, pQueueInfo, pQueue ); + } + + VkResult vkGetDisplayModeProperties2KHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDisplayModeProperties2KHR( physicalDevice, display, pPropertyCount, pProperties ); + } + + VkResult vkGetDisplayModePropertiesKHR( VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDisplayModePropertiesKHR( physicalDevice, display, pPropertyCount, pProperties ); + } + + VkResult vkGetDisplayPlaneCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDisplayPlaneCapabilities2KHR( physicalDevice, pDisplayPlaneInfo, pCapabilities ); + } + + VkResult vkGetDisplayPlaneCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDisplayPlaneCapabilitiesKHR( physicalDevice, mode, planeIndex, pCapabilities ); + } + + VkResult vkGetDisplayPlaneSupportedDisplaysKHR( VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetDisplayPlaneSupportedDisplaysKHR( physicalDevice, planeIndex, pDisplayCount, pDisplays ); + } + + VkResult vkGetEventStatus( VkDevice device, VkEvent event ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetEventStatus( device, event ); + } + + VkResult vkGetFenceFdKHR( VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetFenceFdKHR( device, pGetFdInfo, pFd ); + } + + VkResult vkGetFenceStatus( VkDevice device, VkFence fence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetFenceStatus( device, fence ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetFenceWin32HandleKHR( VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetFenceWin32HandleKHR( device, pGetWin32HandleInfo, pHandle ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + void vkGetGeneratedCommandsMemoryRequirementsNV( VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetGeneratedCommandsMemoryRequirementsNV( device, pInfo, pMemoryRequirements ); + } + + VkResult vkGetImageDrmFormatModifierPropertiesEXT( VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageDrmFormatModifierPropertiesEXT( device, image, pProperties ); + } + + void vkGetImageMemoryRequirements( VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageMemoryRequirements( device, image, pMemoryRequirements ); + } + + void vkGetImageMemoryRequirements2( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageMemoryRequirements2( device, pInfo, pMemoryRequirements ); + } + + void vkGetImageMemoryRequirements2KHR( VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageMemoryRequirements2KHR( device, pInfo, pMemoryRequirements ); + } + + void vkGetImageSparseMemoryRequirements( VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageSparseMemoryRequirements( device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements ); + } + + void vkGetImageSparseMemoryRequirements2( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageSparseMemoryRequirements2( device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements ); + } + + void vkGetImageSparseMemoryRequirements2KHR( VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageSparseMemoryRequirements2KHR( device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements ); + } + + void vkGetImageSubresourceLayout( VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageSubresourceLayout( device, image, pSubresource, pLayout ); + } + + VkResult vkGetImageViewAddressNVX( VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageViewAddressNVX( device, imageView, pProperties ); + } + + uint32_t vkGetImageViewHandleNVX( VkDevice device, const VkImageViewHandleInfoNVX* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetImageViewHandleNVX( device, pInfo ); + } + + PFN_vkVoidFunction vkGetInstanceProcAddr( VkInstance instance, const char* pName ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetInstanceProcAddr( instance, pName ); + } + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + VkResult vkGetMemoryAndroidHardwareBufferANDROID( VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryAndroidHardwareBufferANDROID( device, pInfo, pBuffer ); + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + VkResult vkGetMemoryFdKHR( VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryFdKHR( device, pGetFdInfo, pFd ); + } + + VkResult vkGetMemoryFdPropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryFdPropertiesKHR( device, handleType, fd, pMemoryFdProperties ); + } + + VkResult vkGetMemoryHostPointerPropertiesEXT( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryHostPointerPropertiesEXT( device, handleType, pHostPointer, pMemoryHostPointerProperties ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetMemoryWin32HandleKHR( VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryWin32HandleKHR( device, pGetWin32HandleInfo, pHandle ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetMemoryWin32HandleNV( VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryWin32HandleNV( device, memory, handleType, pHandle ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetMemoryWin32HandlePropertiesKHR( VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetMemoryWin32HandlePropertiesKHR( device, handleType, handle, pMemoryWin32HandleProperties ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkGetPastPresentationTimingGOOGLE( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPastPresentationTimingGOOGLE( device, swapchain, pPresentationTimingCount, pPresentationTimings ); + } + + VkResult vkGetPerformanceParameterINTEL( VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPerformanceParameterINTEL( device, parameter, pValue ); + } + + VkResult vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( physicalDevice, pTimeDomainCount, pTimeDomains ); + } + + VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( physicalDevice, pPropertyCount, pProperties ); + } + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + VkBool32 vkGetPhysicalDeviceDirectFBPresentationSupportEXT( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, IDirectFB* dfb ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceDirectFBPresentationSupportEXT( physicalDevice, queueFamilyIndex, dfb ); + } +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceDisplayPlaneProperties2KHR( physicalDevice, pPropertyCount, pProperties ); + } + + VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceDisplayPlanePropertiesKHR( physicalDevice, pPropertyCount, pProperties ); + } + + VkResult vkGetPhysicalDeviceDisplayProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceDisplayProperties2KHR( physicalDevice, pPropertyCount, pProperties ); + } + + VkResult vkGetPhysicalDeviceDisplayPropertiesKHR( VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceDisplayPropertiesKHR( physicalDevice, pPropertyCount, pProperties ); + } + + void vkGetPhysicalDeviceExternalBufferProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalBufferProperties( physicalDevice, pExternalBufferInfo, pExternalBufferProperties ); + } + + void vkGetPhysicalDeviceExternalBufferPropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalBufferPropertiesKHR( physicalDevice, pExternalBufferInfo, pExternalBufferProperties ); + } + + void vkGetPhysicalDeviceExternalFenceProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalFenceProperties( physicalDevice, pExternalFenceInfo, pExternalFenceProperties ); + } + + void vkGetPhysicalDeviceExternalFencePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalFencePropertiesKHR( physicalDevice, pExternalFenceInfo, pExternalFenceProperties ); + } + + VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalImageFormatPropertiesNV( physicalDevice, format, type, tiling, usage, flags, externalHandleType, pExternalImageFormatProperties ); + } + + void vkGetPhysicalDeviceExternalSemaphoreProperties( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalSemaphoreProperties( physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties ); + } + + void vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties ); + } + + void vkGetPhysicalDeviceFeatures( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFeatures( physicalDevice, pFeatures ); + } + + void vkGetPhysicalDeviceFeatures2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFeatures2( physicalDevice, pFeatures ); + } + + void vkGetPhysicalDeviceFeatures2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFeatures2KHR( physicalDevice, pFeatures ); + } + + void vkGetPhysicalDeviceFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFormatProperties( physicalDevice, format, pFormatProperties ); + } + + void vkGetPhysicalDeviceFormatProperties2( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFormatProperties2( physicalDevice, format, pFormatProperties ); + } + + void vkGetPhysicalDeviceFormatProperties2KHR( VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFormatProperties2KHR( physicalDevice, format, pFormatProperties ); + } + + VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR( VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceFragmentShadingRatesKHR( physicalDevice, pFragmentShadingRateCount, pFragmentShadingRates ); + } + + VkResult vkGetPhysicalDeviceImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceImageFormatProperties( physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties ); + } + + VkResult vkGetPhysicalDeviceImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceImageFormatProperties2( physicalDevice, pImageFormatInfo, pImageFormatProperties ); + } + + VkResult vkGetPhysicalDeviceImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceImageFormatProperties2KHR( physicalDevice, pImageFormatInfo, pImageFormatProperties ); + } + + void vkGetPhysicalDeviceMemoryProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceMemoryProperties( physicalDevice, pMemoryProperties ); + } + + void vkGetPhysicalDeviceMemoryProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceMemoryProperties2( physicalDevice, pMemoryProperties ); + } + + void vkGetPhysicalDeviceMemoryProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceMemoryProperties2KHR( physicalDevice, pMemoryProperties ); + } + + void vkGetPhysicalDeviceMultisamplePropertiesEXT( VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceMultisamplePropertiesEXT( physicalDevice, samples, pMultisampleProperties ); + } + + VkResult vkGetPhysicalDevicePresentRectanglesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDevicePresentRectanglesKHR( physicalDevice, surface, pRectCount, pRects ); + } + + void vkGetPhysicalDeviceProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceProperties( physicalDevice, pProperties ); + } + + void vkGetPhysicalDeviceProperties2( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceProperties2( physicalDevice, pProperties ); + } + + void vkGetPhysicalDeviceProperties2KHR( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceProperties2KHR( physicalDevice, pProperties ); + } + + void vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( physicalDevice, pPerformanceQueryCreateInfo, pNumPasses ); + } + + void vkGetPhysicalDeviceQueueFamilyProperties( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceQueueFamilyProperties( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties ); + } + + void vkGetPhysicalDeviceQueueFamilyProperties2( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceQueueFamilyProperties2( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties ); + } + + void vkGetPhysicalDeviceQueueFamilyProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceQueueFamilyProperties2KHR( physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties ); + } + + void vkGetPhysicalDeviceSparseImageFormatProperties( VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties( physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties ); + } + + void vkGetPhysicalDeviceSparseImageFormatProperties2( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties2( physicalDevice, pFormatInfo, pPropertyCount, pProperties ); + } + + void vkGetPhysicalDeviceSparseImageFormatProperties2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSparseImageFormatProperties2KHR( physicalDevice, pFormatInfo, pPropertyCount, pProperties ); + } + + VkResult vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( physicalDevice, pCombinationCount, pCombinations ); + } + + VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceCapabilities2EXT( physicalDevice, surface, pSurfaceCapabilities ); + } + + VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceCapabilities2KHR( physicalDevice, pSurfaceInfo, pSurfaceCapabilities ); + } + + VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceCapabilitiesKHR( physicalDevice, surface, pSurfaceCapabilities ); + } + + VkResult vkGetPhysicalDeviceSurfaceFormats2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceFormats2KHR( physicalDevice, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats ); + } + + VkResult vkGetPhysicalDeviceSurfaceFormatsKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceFormatsKHR( physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetPhysicalDeviceSurfacePresentModes2EXT( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfacePresentModes2EXT( physicalDevice, pSurfaceInfo, pPresentModeCount, pPresentModes ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkGetPhysicalDeviceSurfacePresentModesKHR( VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfacePresentModesKHR( physicalDevice, surface, pPresentModeCount, pPresentModes ); + } + + VkResult vkGetPhysicalDeviceSurfaceSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceSurfaceSupportKHR( physicalDevice, queueFamilyIndex, surface, pSupported ); + } + + VkResult vkGetPhysicalDeviceToolPropertiesEXT( VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceToolPropertiesEXT( physicalDevice, pToolCount, pToolProperties ); + } + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceWaylandPresentationSupportKHR( physicalDevice, queueFamilyIndex, display ); + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceWin32PresentationSupportKHR( physicalDevice, queueFamilyIndex ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceXcbPresentationSupportKHR( physicalDevice, queueFamilyIndex, connection, visual_id ); + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR( VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPhysicalDeviceXlibPresentationSupportKHR( physicalDevice, queueFamilyIndex, dpy, visualID ); + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + VkResult vkGetPipelineCacheData( VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineCacheData( device, pipelineCache, pDataSize, pData ); + } + + VkResult vkGetPipelineExecutableInternalRepresentationsKHR( VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineExecutableInternalRepresentationsKHR( device, pExecutableInfo, pInternalRepresentationCount, pInternalRepresentations ); + } + + VkResult vkGetPipelineExecutablePropertiesKHR( VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineExecutablePropertiesKHR( device, pPipelineInfo, pExecutableCount, pProperties ); + } + + VkResult vkGetPipelineExecutableStatisticsKHR( VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPipelineExecutableStatisticsKHR( device, pExecutableInfo, pStatisticCount, pStatistics ); + } + + void vkGetPrivateDataEXT( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetPrivateDataEXT( device, objectType, objectHandle, privateDataSlot, pData ); + } + + VkResult vkGetQueryPoolResults( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetQueryPoolResults( device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags ); + } + + void vkGetQueueCheckpointDataNV( VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetQueueCheckpointDataNV( queue, pCheckpointDataCount, pCheckpointData ); + } + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + VkResult vkGetRandROutputDisplayEXT( VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRandROutputDisplayEXT( physicalDevice, dpy, rrOutput, pDisplay ); + } +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( device, pipeline, firstGroup, groupCount, dataSize, pData ); + } + + VkResult vkGetRayTracingShaderGroupHandlesKHR( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRayTracingShaderGroupHandlesKHR( device, pipeline, firstGroup, groupCount, dataSize, pData ); + } + + VkResult vkGetRayTracingShaderGroupHandlesNV( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRayTracingShaderGroupHandlesNV( device, pipeline, firstGroup, groupCount, dataSize, pData ); + } + + VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR( VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRayTracingShaderGroupStackSizeKHR( device, pipeline, group, groupShader ); + } + + VkResult vkGetRefreshCycleDurationGOOGLE( VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRefreshCycleDurationGOOGLE( device, swapchain, pDisplayTimingProperties ); + } + + void vkGetRenderAreaGranularity( VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetRenderAreaGranularity( device, renderPass, pGranularity ); + } + + VkResult vkGetSemaphoreCounterValue( VkDevice device, VkSemaphore semaphore, uint64_t* pValue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSemaphoreCounterValue( device, semaphore, pValue ); + } + + VkResult vkGetSemaphoreCounterValueKHR( VkDevice device, VkSemaphore semaphore, uint64_t* pValue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSemaphoreCounterValueKHR( device, semaphore, pValue ); + } + + VkResult vkGetSemaphoreFdKHR( VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSemaphoreFdKHR( device, pGetFdInfo, pFd ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkGetSemaphoreWin32HandleKHR( VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSemaphoreWin32HandleKHR( device, pGetWin32HandleInfo, pHandle ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkGetShaderInfoAMD( VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetShaderInfoAMD( device, pipeline, shaderStage, infoType, pInfoSize, pInfo ); + } + + VkResult vkGetSwapchainCounterEXT( VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSwapchainCounterEXT( device, swapchain, counter, pCounterValue ); + } + + VkResult vkGetSwapchainImagesKHR( VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSwapchainImagesKHR( device, swapchain, pSwapchainImageCount, pSwapchainImages ); + } + + VkResult vkGetSwapchainStatusKHR( VkDevice device, VkSwapchainKHR swapchain ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetSwapchainStatusKHR( device, swapchain ); + } + + VkResult vkGetValidationCacheDataEXT( VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkGetValidationCacheDataEXT( device, validationCache, pDataSize, pData ); + } + + VkResult vkImportFenceFdKHR( VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkImportFenceFdKHR( device, pImportFenceFdInfo ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkImportFenceWin32HandleKHR( device, pImportFenceWin32HandleInfo ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkImportSemaphoreFdKHR( VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkImportSemaphoreFdKHR( device, pImportSemaphoreFdInfo ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkImportSemaphoreWin32HandleKHR( device, pImportSemaphoreWin32HandleInfo ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkInitializePerformanceApiINTEL( VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkInitializePerformanceApiINTEL( device, pInitializeInfo ); + } + + VkResult vkInvalidateMappedMemoryRanges( VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges ) const VULKAN_HPP_NOEXCEPT + { + return ::vkInvalidateMappedMemoryRanges( device, memoryRangeCount, pMemoryRanges ); + } + + VkResult vkMapMemory( VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkMapMemory( device, memory, offset, size, flags, ppData ); + } + + VkResult vkMergePipelineCaches( VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches ) const VULKAN_HPP_NOEXCEPT + { + return ::vkMergePipelineCaches( device, dstCache, srcCacheCount, pSrcCaches ); + } + + VkResult vkMergeValidationCachesEXT( VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches ) const VULKAN_HPP_NOEXCEPT + { + return ::vkMergeValidationCachesEXT( device, dstCache, srcCacheCount, pSrcCaches ); + } + + void vkQueueBeginDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueBeginDebugUtilsLabelEXT( queue, pLabelInfo ); + } + + VkResult vkQueueBindSparse( VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueBindSparse( queue, bindInfoCount, pBindInfo, fence ); + } + + void vkQueueEndDebugUtilsLabelEXT( VkQueue queue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueEndDebugUtilsLabelEXT( queue ); + } + + void vkQueueInsertDebugUtilsLabelEXT( VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueInsertDebugUtilsLabelEXT( queue, pLabelInfo ); + } + + VkResult vkQueuePresentKHR( VkQueue queue, const VkPresentInfoKHR* pPresentInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueuePresentKHR( queue, pPresentInfo ); + } + + VkResult vkQueueSetPerformanceConfigurationINTEL( VkQueue queue, VkPerformanceConfigurationINTEL configuration ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueSetPerformanceConfigurationINTEL( queue, configuration ); + } + + VkResult vkQueueSubmit( VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueSubmit( queue, submitCount, pSubmits, fence ); + } + + VkResult vkQueueWaitIdle( VkQueue queue ) const VULKAN_HPP_NOEXCEPT + { + return ::vkQueueWaitIdle( queue ); + } + + VkResult vkRegisterDeviceEventEXT( VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkRegisterDeviceEventEXT( device, pDeviceEventInfo, pAllocator, pFence ); + } + + VkResult vkRegisterDisplayEventEXT( VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence ) const VULKAN_HPP_NOEXCEPT + { + return ::vkRegisterDisplayEventEXT( device, display, pDisplayEventInfo, pAllocator, pFence ); + } + + VkResult vkReleaseDisplayEXT( VkPhysicalDevice physicalDevice, VkDisplayKHR display ) const VULKAN_HPP_NOEXCEPT + { + return ::vkReleaseDisplayEXT( physicalDevice, display ); + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + VkResult vkReleaseFullScreenExclusiveModeEXT( VkDevice device, VkSwapchainKHR swapchain ) const VULKAN_HPP_NOEXCEPT + { + return ::vkReleaseFullScreenExclusiveModeEXT( device, swapchain ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + VkResult vkReleasePerformanceConfigurationINTEL( VkDevice device, VkPerformanceConfigurationINTEL configuration ) const VULKAN_HPP_NOEXCEPT + { + return ::vkReleasePerformanceConfigurationINTEL( device, configuration ); + } + + void vkReleaseProfilingLockKHR( VkDevice device ) const VULKAN_HPP_NOEXCEPT + { + return ::vkReleaseProfilingLockKHR( device ); + } + + VkResult vkResetCommandBuffer( VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetCommandBuffer( commandBuffer, flags ); + } + + VkResult vkResetCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetCommandPool( device, commandPool, flags ); + } + + VkResult vkResetDescriptorPool( VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetDescriptorPool( device, descriptorPool, flags ); + } + + VkResult vkResetEvent( VkDevice device, VkEvent event ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetEvent( device, event ); + } + + VkResult vkResetFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetFences( device, fenceCount, pFences ); + } + + void vkResetQueryPool( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetQueryPool( device, queryPool, firstQuery, queryCount ); + } + + void vkResetQueryPoolEXT( VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount ) const VULKAN_HPP_NOEXCEPT + { + return ::vkResetQueryPoolEXT( device, queryPool, firstQuery, queryCount ); + } + + VkResult vkSetDebugUtilsObjectNameEXT( VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetDebugUtilsObjectNameEXT( device, pNameInfo ); + } + + VkResult vkSetDebugUtilsObjectTagEXT( VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetDebugUtilsObjectTagEXT( device, pTagInfo ); + } + + VkResult vkSetEvent( VkDevice device, VkEvent event ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetEvent( device, event ); + } + + void vkSetHdrMetadataEXT( VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetHdrMetadataEXT( device, swapchainCount, pSwapchains, pMetadata ); + } + + void vkSetLocalDimmingAMD( VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetLocalDimmingAMD( device, swapChain, localDimmingEnable ); + } + + VkResult vkSetPrivateDataEXT( VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSetPrivateDataEXT( device, objectType, objectHandle, privateDataSlot, data ); + } + + VkResult vkSignalSemaphore( VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSignalSemaphore( device, pSignalInfo ); + } + + VkResult vkSignalSemaphoreKHR( VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSignalSemaphoreKHR( device, pSignalInfo ); + } + + void vkSubmitDebugUtilsMessageEXT( VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkSubmitDebugUtilsMessageEXT( instance, messageSeverity, messageTypes, pCallbackData ); + } + + void vkTrimCommandPool( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkTrimCommandPool( device, commandPool, flags ); + } + + void vkTrimCommandPoolKHR( VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags ) const VULKAN_HPP_NOEXCEPT + { + return ::vkTrimCommandPoolKHR( device, commandPool, flags ); + } + + void vkUninitializePerformanceApiINTEL( VkDevice device ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUninitializePerformanceApiINTEL( device ); + } + + void vkUnmapMemory( VkDevice device, VkDeviceMemory memory ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUnmapMemory( device, memory ); + } + + void vkUpdateDescriptorSetWithTemplate( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUpdateDescriptorSetWithTemplate( device, descriptorSet, descriptorUpdateTemplate, pData ); + } + + void vkUpdateDescriptorSetWithTemplateKHR( VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUpdateDescriptorSetWithTemplateKHR( device, descriptorSet, descriptorUpdateTemplate, pData ); + } + + void vkUpdateDescriptorSets( VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies ) const VULKAN_HPP_NOEXCEPT + { + return ::vkUpdateDescriptorSets( device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies ); + } + + VkResult vkWaitForFences( VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkWaitForFences( device, fenceCount, pFences, waitAll, timeout ); + } + + VkResult vkWaitSemaphores( VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkWaitSemaphores( device, pWaitInfo, timeout ); + } + + VkResult vkWaitSemaphoresKHR( VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout ) const VULKAN_HPP_NOEXCEPT + { + return ::vkWaitSemaphoresKHR( device, pWaitInfo, timeout ); + } + + VkResult vkWriteAccelerationStructuresPropertiesKHR( VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride ) const VULKAN_HPP_NOEXCEPT + { + return ::vkWriteAccelerationStructuresPropertiesKHR( device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride ); + } + }; +#endif + + class DispatchLoaderDynamic; +#if !defined(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC) +# if defined(VK_NO_PROTOTYPES) +# define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1 +# else +# define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 0 +# endif +#endif + +#if defined(_WIN32) && defined(VULKAN_HPP_STORAGE_SHARED) +# ifdef VULKAN_HPP_STORAGE_SHARED_EXPORT +# define VULKAN_HPP_STORAGE_API __declspec( dllexport ) +# else +# define VULKAN_HPP_STORAGE_API __declspec( dllimport ) +# endif +#else +# define VULKAN_HPP_STORAGE_API +#endif + +#if !defined(VULKAN_HPP_DEFAULT_DISPATCHER) +# if VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 +# define VULKAN_HPP_DEFAULT_DISPATCHER ::VULKAN_HPP_NAMESPACE::defaultDispatchLoaderDynamic +# define VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE namespace VULKAN_HPP_NAMESPACE { VULKAN_HPP_STORAGE_API DispatchLoaderDynamic defaultDispatchLoaderDynamic; } + extern VULKAN_HPP_STORAGE_API DispatchLoaderDynamic defaultDispatchLoaderDynamic; +# else +# define VULKAN_HPP_DEFAULT_DISPATCHER ::VULKAN_HPP_NAMESPACE::DispatchLoaderStatic() +# define VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE +# endif +#endif + +#if !defined(VULKAN_HPP_DEFAULT_DISPATCHER_TYPE) +# if VULKAN_HPP_DISPATCH_LOADER_DYNAMIC == 1 + #define VULKAN_HPP_DEFAULT_DISPATCHER_TYPE ::VULKAN_HPP_NAMESPACE::DispatchLoaderDynamic +# else +# define VULKAN_HPP_DEFAULT_DISPATCHER_TYPE ::VULKAN_HPP_NAMESPACE::DispatchLoaderStatic +# endif +#endif + +#if defined( VULKAN_HPP_NO_DEFAULT_DISPATCHER ) +# define VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT +# define VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT +# define VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT +#else +# define VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT = {} +# define VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT = nullptr +# define VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT = VULKAN_HPP_DEFAULT_DISPATCHER +#endif + + struct AllocationCallbacks; + + template + class ObjectDestroy + { + public: + ObjectDestroy() = default; + + ObjectDestroy( OwnerType owner, + Optional allocationCallbacks + VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_owner( owner ) + , m_allocationCallbacks( allocationCallbacks ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT { return m_owner; } + Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } + + protected: + template + void destroy(T t) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_owner && m_dispatch ); + m_owner.destroy( t, m_allocationCallbacks, *m_dispatch ); + } + + private: + OwnerType m_owner = {}; + Optional m_allocationCallbacks = nullptr; + Dispatch const * m_dispatch = nullptr; + }; + + class NoParent; + + template + class ObjectDestroy + { + public: + ObjectDestroy() = default; + + ObjectDestroy( Optional allocationCallbacks, + Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_allocationCallbacks( allocationCallbacks ) + , m_dispatch( &dispatch ) + {} + + Optional getAllocator() const VULKAN_HPP_NOEXCEPT { return m_allocationCallbacks; } + + protected: + template + void destroy(T t) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_dispatch ); + t.destroy( m_allocationCallbacks, *m_dispatch ); + } + + private: + Optional m_allocationCallbacks = nullptr; + Dispatch const * m_dispatch = nullptr; + }; + + template + class ObjectFree + { + public: + ObjectFree() = default; + + ObjectFree( OwnerType owner, + Optional allocationCallbacks VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, + Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_owner( owner ) + , m_allocationCallbacks( allocationCallbacks ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT + { + return m_owner; + } + + Optional getAllocator() const VULKAN_HPP_NOEXCEPT + { + return m_allocationCallbacks; + } + + protected: + template + void destroy( T t ) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_owner && m_dispatch ); + m_owner.free( t, m_allocationCallbacks, *m_dispatch ); + } + + private: + OwnerType m_owner = {}; + Optional m_allocationCallbacks = nullptr; + Dispatch const * m_dispatch = nullptr; + }; + + template + class ObjectRelease + { + public: + ObjectRelease() = default; + + ObjectRelease( OwnerType owner, Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_owner( owner ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT + { + return m_owner; + } + + protected: + template + void destroy( T t ) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT( m_owner && m_dispatch ); + m_owner.release( t, *m_dispatch ); + } + + private: + OwnerType m_owner = {}; + Dispatch const * m_dispatch = nullptr; + }; + + template + class PoolFree + { + public: + PoolFree() = default; + + PoolFree( OwnerType owner, + PoolType pool, + Dispatch const & dispatch = VULKAN_HPP_DEFAULT_DISPATCHER ) VULKAN_HPP_NOEXCEPT + : m_owner( owner ) + , m_pool( pool ) + , m_dispatch( &dispatch ) + {} + + OwnerType getOwner() const VULKAN_HPP_NOEXCEPT { return m_owner; } + PoolType getPool() const VULKAN_HPP_NOEXCEPT { return m_pool; } + + protected: + template + void destroy(T t) VULKAN_HPP_NOEXCEPT + { + m_owner.free( m_pool, t, *m_dispatch ); + } + + private: + OwnerType m_owner = OwnerType(); + PoolType m_pool = PoolType(); + Dispatch const * m_dispatch = nullptr; + }; + + using Bool32 = uint32_t; + using DeviceAddress = uint64_t; + using DeviceSize = uint64_t; + using SampleMask = uint32_t; + + template + struct CppType + {}; + + template + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = false; + }; + + VULKAN_HPP_INLINE std::string toHexString( uint32_t value ) + { + std::stringstream stream; + stream << std::hex << value; + return stream.str(); + } + + enum class AccelerationStructureBuildTypeKHR + { + eHost = VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR, + eDevice = VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, + eHostOrDevice = VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureBuildTypeKHR value ) + { + switch ( value ) + { + case AccelerationStructureBuildTypeKHR::eHost : return "Host"; + case AccelerationStructureBuildTypeKHR::eDevice : return "Device"; + case AccelerationStructureBuildTypeKHR::eHostOrDevice : return "HostOrDevice"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AccelerationStructureCompatibilityKHR + { + eCompatible = VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR, + eIncompatible = VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCompatibilityKHR value ) + { + switch ( value ) + { + case AccelerationStructureCompatibilityKHR::eCompatible : return "Compatible"; + case AccelerationStructureCompatibilityKHR::eIncompatible : return "Incompatible"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AccelerationStructureCreateFlagBitsKHR : VkAccelerationStructureCreateFlagsKHR + { + eDeviceAddressCaptureReplay = VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCreateFlagBitsKHR value ) + { + switch ( value ) + { + case AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AccelerationStructureMemoryRequirementsTypeNV + { + eObject = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV, + eBuildScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV, + eUpdateScratch = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV + }; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureMemoryRequirementsTypeNV value ) + { + switch ( value ) + { + case AccelerationStructureMemoryRequirementsTypeNV::eObject : return "Object"; + case AccelerationStructureMemoryRequirementsTypeNV::eBuildScratch : return "BuildScratch"; + case AccelerationStructureMemoryRequirementsTypeNV::eUpdateScratch : return "UpdateScratch"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AccelerationStructureTypeKHR + { + eTopLevel = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + eBottomLevel = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + eGeneric = VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR + }; + using AccelerationStructureTypeNV = AccelerationStructureTypeKHR; + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureTypeKHR value ) + { + switch ( value ) + { + case AccelerationStructureTypeKHR::eTopLevel : return "TopLevel"; + case AccelerationStructureTypeKHR::eBottomLevel : return "BottomLevel"; + case AccelerationStructureTypeKHR::eGeneric : return "Generic"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AccessFlagBits : VkAccessFlags + { + eIndirectCommandRead = VK_ACCESS_INDIRECT_COMMAND_READ_BIT, + eIndexRead = VK_ACCESS_INDEX_READ_BIT, + eVertexAttributeRead = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, + eUniformRead = VK_ACCESS_UNIFORM_READ_BIT, + eInputAttachmentRead = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, + eShaderRead = VK_ACCESS_SHADER_READ_BIT, + eShaderWrite = VK_ACCESS_SHADER_WRITE_BIT, + eColorAttachmentRead = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, + eColorAttachmentWrite = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + eDepthStencilAttachmentRead = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, + eDepthStencilAttachmentWrite = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, + eTransferRead = VK_ACCESS_TRANSFER_READ_BIT, + eTransferWrite = VK_ACCESS_TRANSFER_WRITE_BIT, + eHostRead = VK_ACCESS_HOST_READ_BIT, + eHostWrite = VK_ACCESS_HOST_WRITE_BIT, + eMemoryRead = VK_ACCESS_MEMORY_READ_BIT, + eMemoryWrite = VK_ACCESS_MEMORY_WRITE_BIT, + eTransformFeedbackWriteEXT = VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, + eTransformFeedbackCounterReadEXT = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT, + eTransformFeedbackCounterWriteEXT = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT, + eConditionalRenderingReadEXT = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT, + eColorAttachmentReadNoncoherentEXT = VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT, + eAccelerationStructureReadKHR = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, + eAccelerationStructureWriteKHR = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + eShadingRateImageReadNV = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, + eFragmentDensityMapReadEXT = VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT, + eCommandPreprocessReadNV = VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV, + eCommandPreprocessWriteNV = VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV, + eAccelerationStructureReadNV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV, + eAccelerationStructureWriteNV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV, + eFragmentShadingRateAttachmentReadKHR = VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( AccessFlagBits value ) + { + switch ( value ) + { + case AccessFlagBits::eIndirectCommandRead : return "IndirectCommandRead"; + case AccessFlagBits::eIndexRead : return "IndexRead"; + case AccessFlagBits::eVertexAttributeRead : return "VertexAttributeRead"; + case AccessFlagBits::eUniformRead : return "UniformRead"; + case AccessFlagBits::eInputAttachmentRead : return "InputAttachmentRead"; + case AccessFlagBits::eShaderRead : return "ShaderRead"; + case AccessFlagBits::eShaderWrite : return "ShaderWrite"; + case AccessFlagBits::eColorAttachmentRead : return "ColorAttachmentRead"; + case AccessFlagBits::eColorAttachmentWrite : return "ColorAttachmentWrite"; + case AccessFlagBits::eDepthStencilAttachmentRead : return "DepthStencilAttachmentRead"; + case AccessFlagBits::eDepthStencilAttachmentWrite : return "DepthStencilAttachmentWrite"; + case AccessFlagBits::eTransferRead : return "TransferRead"; + case AccessFlagBits::eTransferWrite : return "TransferWrite"; + case AccessFlagBits::eHostRead : return "HostRead"; + case AccessFlagBits::eHostWrite : return "HostWrite"; + case AccessFlagBits::eMemoryRead : return "MemoryRead"; + case AccessFlagBits::eMemoryWrite : return "MemoryWrite"; + case AccessFlagBits::eTransformFeedbackWriteEXT : return "TransformFeedbackWriteEXT"; + case AccessFlagBits::eTransformFeedbackCounterReadEXT : return "TransformFeedbackCounterReadEXT"; + case AccessFlagBits::eTransformFeedbackCounterWriteEXT : return "TransformFeedbackCounterWriteEXT"; + case AccessFlagBits::eConditionalRenderingReadEXT : return "ConditionalRenderingReadEXT"; + case AccessFlagBits::eColorAttachmentReadNoncoherentEXT : return "ColorAttachmentReadNoncoherentEXT"; + case AccessFlagBits::eAccelerationStructureReadKHR : return "AccelerationStructureReadKHR"; + case AccessFlagBits::eAccelerationStructureWriteKHR : return "AccelerationStructureWriteKHR"; + case AccessFlagBits::eShadingRateImageReadNV : return "ShadingRateImageReadNV"; + case AccessFlagBits::eFragmentDensityMapReadEXT : return "FragmentDensityMapReadEXT"; + case AccessFlagBits::eCommandPreprocessReadNV : return "CommandPreprocessReadNV"; + case AccessFlagBits::eCommandPreprocessWriteNV : return "CommandPreprocessWriteNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AcquireProfilingLockFlagBitsKHR : VkAcquireProfilingLockFlagsKHR + {}; + + VULKAN_HPP_INLINE std::string to_string( AcquireProfilingLockFlagBitsKHR ) + { + return "(void)"; + } + + enum class AttachmentDescriptionFlagBits : VkAttachmentDescriptionFlags + { + eMayAlias = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( AttachmentDescriptionFlagBits value ) + { + switch ( value ) + { + case AttachmentDescriptionFlagBits::eMayAlias : return "MayAlias"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AttachmentLoadOp + { + eLoad = VK_ATTACHMENT_LOAD_OP_LOAD, + eClear = VK_ATTACHMENT_LOAD_OP_CLEAR, + eDontCare = VK_ATTACHMENT_LOAD_OP_DONT_CARE + }; + + VULKAN_HPP_INLINE std::string to_string( AttachmentLoadOp value ) + { + switch ( value ) + { + case AttachmentLoadOp::eLoad : return "Load"; + case AttachmentLoadOp::eClear : return "Clear"; + case AttachmentLoadOp::eDontCare : return "DontCare"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class AttachmentStoreOp + { + eStore = VK_ATTACHMENT_STORE_OP_STORE, + eDontCare = VK_ATTACHMENT_STORE_OP_DONT_CARE, + eNoneQCOM = VK_ATTACHMENT_STORE_OP_NONE_QCOM + }; + + VULKAN_HPP_INLINE std::string to_string( AttachmentStoreOp value ) + { + switch ( value ) + { + case AttachmentStoreOp::eStore : return "Store"; + case AttachmentStoreOp::eDontCare : return "DontCare"; + case AttachmentStoreOp::eNoneQCOM : return "NoneQCOM"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BlendFactor + { + eZero = VK_BLEND_FACTOR_ZERO, + eOne = VK_BLEND_FACTOR_ONE, + eSrcColor = VK_BLEND_FACTOR_SRC_COLOR, + eOneMinusSrcColor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, + eDstColor = VK_BLEND_FACTOR_DST_COLOR, + eOneMinusDstColor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, + eSrcAlpha = VK_BLEND_FACTOR_SRC_ALPHA, + eOneMinusSrcAlpha = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + eDstAlpha = VK_BLEND_FACTOR_DST_ALPHA, + eOneMinusDstAlpha = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, + eConstantColor = VK_BLEND_FACTOR_CONSTANT_COLOR, + eOneMinusConstantColor = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, + eConstantAlpha = VK_BLEND_FACTOR_CONSTANT_ALPHA, + eOneMinusConstantAlpha = VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, + eSrcAlphaSaturate = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE, + eSrc1Color = VK_BLEND_FACTOR_SRC1_COLOR, + eOneMinusSrc1Color = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, + eSrc1Alpha = VK_BLEND_FACTOR_SRC1_ALPHA, + eOneMinusSrc1Alpha = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA + }; + + VULKAN_HPP_INLINE std::string to_string( BlendFactor value ) + { + switch ( value ) + { + case BlendFactor::eZero : return "Zero"; + case BlendFactor::eOne : return "One"; + case BlendFactor::eSrcColor : return "SrcColor"; + case BlendFactor::eOneMinusSrcColor : return "OneMinusSrcColor"; + case BlendFactor::eDstColor : return "DstColor"; + case BlendFactor::eOneMinusDstColor : return "OneMinusDstColor"; + case BlendFactor::eSrcAlpha : return "SrcAlpha"; + case BlendFactor::eOneMinusSrcAlpha : return "OneMinusSrcAlpha"; + case BlendFactor::eDstAlpha : return "DstAlpha"; + case BlendFactor::eOneMinusDstAlpha : return "OneMinusDstAlpha"; + case BlendFactor::eConstantColor : return "ConstantColor"; + case BlendFactor::eOneMinusConstantColor : return "OneMinusConstantColor"; + case BlendFactor::eConstantAlpha : return "ConstantAlpha"; + case BlendFactor::eOneMinusConstantAlpha : return "OneMinusConstantAlpha"; + case BlendFactor::eSrcAlphaSaturate : return "SrcAlphaSaturate"; + case BlendFactor::eSrc1Color : return "Src1Color"; + case BlendFactor::eOneMinusSrc1Color : return "OneMinusSrc1Color"; + case BlendFactor::eSrc1Alpha : return "Src1Alpha"; + case BlendFactor::eOneMinusSrc1Alpha : return "OneMinusSrc1Alpha"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BlendOp + { + eAdd = VK_BLEND_OP_ADD, + eSubtract = VK_BLEND_OP_SUBTRACT, + eReverseSubtract = VK_BLEND_OP_REVERSE_SUBTRACT, + eMin = VK_BLEND_OP_MIN, + eMax = VK_BLEND_OP_MAX, + eZeroEXT = VK_BLEND_OP_ZERO_EXT, + eSrcEXT = VK_BLEND_OP_SRC_EXT, + eDstEXT = VK_BLEND_OP_DST_EXT, + eSrcOverEXT = VK_BLEND_OP_SRC_OVER_EXT, + eDstOverEXT = VK_BLEND_OP_DST_OVER_EXT, + eSrcInEXT = VK_BLEND_OP_SRC_IN_EXT, + eDstInEXT = VK_BLEND_OP_DST_IN_EXT, + eSrcOutEXT = VK_BLEND_OP_SRC_OUT_EXT, + eDstOutEXT = VK_BLEND_OP_DST_OUT_EXT, + eSrcAtopEXT = VK_BLEND_OP_SRC_ATOP_EXT, + eDstAtopEXT = VK_BLEND_OP_DST_ATOP_EXT, + eXorEXT = VK_BLEND_OP_XOR_EXT, + eMultiplyEXT = VK_BLEND_OP_MULTIPLY_EXT, + eScreenEXT = VK_BLEND_OP_SCREEN_EXT, + eOverlayEXT = VK_BLEND_OP_OVERLAY_EXT, + eDarkenEXT = VK_BLEND_OP_DARKEN_EXT, + eLightenEXT = VK_BLEND_OP_LIGHTEN_EXT, + eColordodgeEXT = VK_BLEND_OP_COLORDODGE_EXT, + eColorburnEXT = VK_BLEND_OP_COLORBURN_EXT, + eHardlightEXT = VK_BLEND_OP_HARDLIGHT_EXT, + eSoftlightEXT = VK_BLEND_OP_SOFTLIGHT_EXT, + eDifferenceEXT = VK_BLEND_OP_DIFFERENCE_EXT, + eExclusionEXT = VK_BLEND_OP_EXCLUSION_EXT, + eInvertEXT = VK_BLEND_OP_INVERT_EXT, + eInvertRgbEXT = VK_BLEND_OP_INVERT_RGB_EXT, + eLineardodgeEXT = VK_BLEND_OP_LINEARDODGE_EXT, + eLinearburnEXT = VK_BLEND_OP_LINEARBURN_EXT, + eVividlightEXT = VK_BLEND_OP_VIVIDLIGHT_EXT, + eLinearlightEXT = VK_BLEND_OP_LINEARLIGHT_EXT, + ePinlightEXT = VK_BLEND_OP_PINLIGHT_EXT, + eHardmixEXT = VK_BLEND_OP_HARDMIX_EXT, + eHslHueEXT = VK_BLEND_OP_HSL_HUE_EXT, + eHslSaturationEXT = VK_BLEND_OP_HSL_SATURATION_EXT, + eHslColorEXT = VK_BLEND_OP_HSL_COLOR_EXT, + eHslLuminosityEXT = VK_BLEND_OP_HSL_LUMINOSITY_EXT, + ePlusEXT = VK_BLEND_OP_PLUS_EXT, + ePlusClampedEXT = VK_BLEND_OP_PLUS_CLAMPED_EXT, + ePlusClampedAlphaEXT = VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT, + ePlusDarkerEXT = VK_BLEND_OP_PLUS_DARKER_EXT, + eMinusEXT = VK_BLEND_OP_MINUS_EXT, + eMinusClampedEXT = VK_BLEND_OP_MINUS_CLAMPED_EXT, + eContrastEXT = VK_BLEND_OP_CONTRAST_EXT, + eInvertOvgEXT = VK_BLEND_OP_INVERT_OVG_EXT, + eRedEXT = VK_BLEND_OP_RED_EXT, + eGreenEXT = VK_BLEND_OP_GREEN_EXT, + eBlueEXT = VK_BLEND_OP_BLUE_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( BlendOp value ) + { + switch ( value ) + { + case BlendOp::eAdd : return "Add"; + case BlendOp::eSubtract : return "Subtract"; + case BlendOp::eReverseSubtract : return "ReverseSubtract"; + case BlendOp::eMin : return "Min"; + case BlendOp::eMax : return "Max"; + case BlendOp::eZeroEXT : return "ZeroEXT"; + case BlendOp::eSrcEXT : return "SrcEXT"; + case BlendOp::eDstEXT : return "DstEXT"; + case BlendOp::eSrcOverEXT : return "SrcOverEXT"; + case BlendOp::eDstOverEXT : return "DstOverEXT"; + case BlendOp::eSrcInEXT : return "SrcInEXT"; + case BlendOp::eDstInEXT : return "DstInEXT"; + case BlendOp::eSrcOutEXT : return "SrcOutEXT"; + case BlendOp::eDstOutEXT : return "DstOutEXT"; + case BlendOp::eSrcAtopEXT : return "SrcAtopEXT"; + case BlendOp::eDstAtopEXT : return "DstAtopEXT"; + case BlendOp::eXorEXT : return "XorEXT"; + case BlendOp::eMultiplyEXT : return "MultiplyEXT"; + case BlendOp::eScreenEXT : return "ScreenEXT"; + case BlendOp::eOverlayEXT : return "OverlayEXT"; + case BlendOp::eDarkenEXT : return "DarkenEXT"; + case BlendOp::eLightenEXT : return "LightenEXT"; + case BlendOp::eColordodgeEXT : return "ColordodgeEXT"; + case BlendOp::eColorburnEXT : return "ColorburnEXT"; + case BlendOp::eHardlightEXT : return "HardlightEXT"; + case BlendOp::eSoftlightEXT : return "SoftlightEXT"; + case BlendOp::eDifferenceEXT : return "DifferenceEXT"; + case BlendOp::eExclusionEXT : return "ExclusionEXT"; + case BlendOp::eInvertEXT : return "InvertEXT"; + case BlendOp::eInvertRgbEXT : return "InvertRgbEXT"; + case BlendOp::eLineardodgeEXT : return "LineardodgeEXT"; + case BlendOp::eLinearburnEXT : return "LinearburnEXT"; + case BlendOp::eVividlightEXT : return "VividlightEXT"; + case BlendOp::eLinearlightEXT : return "LinearlightEXT"; + case BlendOp::ePinlightEXT : return "PinlightEXT"; + case BlendOp::eHardmixEXT : return "HardmixEXT"; + case BlendOp::eHslHueEXT : return "HslHueEXT"; + case BlendOp::eHslSaturationEXT : return "HslSaturationEXT"; + case BlendOp::eHslColorEXT : return "HslColorEXT"; + case BlendOp::eHslLuminosityEXT : return "HslLuminosityEXT"; + case BlendOp::ePlusEXT : return "PlusEXT"; + case BlendOp::ePlusClampedEXT : return "PlusClampedEXT"; + case BlendOp::ePlusClampedAlphaEXT : return "PlusClampedAlphaEXT"; + case BlendOp::ePlusDarkerEXT : return "PlusDarkerEXT"; + case BlendOp::eMinusEXT : return "MinusEXT"; + case BlendOp::eMinusClampedEXT : return "MinusClampedEXT"; + case BlendOp::eContrastEXT : return "ContrastEXT"; + case BlendOp::eInvertOvgEXT : return "InvertOvgEXT"; + case BlendOp::eRedEXT : return "RedEXT"; + case BlendOp::eGreenEXT : return "GreenEXT"; + case BlendOp::eBlueEXT : return "BlueEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BlendOverlapEXT + { + eUncorrelated = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + eDisjoint = VK_BLEND_OVERLAP_DISJOINT_EXT, + eConjoint = VK_BLEND_OVERLAP_CONJOINT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( BlendOverlapEXT value ) + { + switch ( value ) + { + case BlendOverlapEXT::eUncorrelated : return "Uncorrelated"; + case BlendOverlapEXT::eDisjoint : return "Disjoint"; + case BlendOverlapEXT::eConjoint : return "Conjoint"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BorderColor + { + eFloatTransparentBlack = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + eIntTransparentBlack = VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, + eFloatOpaqueBlack = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK, + eIntOpaqueBlack = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + eFloatOpaqueWhite = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, + eIntOpaqueWhite = VK_BORDER_COLOR_INT_OPAQUE_WHITE, + eFloatCustomEXT = VK_BORDER_COLOR_FLOAT_CUSTOM_EXT, + eIntCustomEXT = VK_BORDER_COLOR_INT_CUSTOM_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( BorderColor value ) + { + switch ( value ) + { + case BorderColor::eFloatTransparentBlack : return "FloatTransparentBlack"; + case BorderColor::eIntTransparentBlack : return "IntTransparentBlack"; + case BorderColor::eFloatOpaqueBlack : return "FloatOpaqueBlack"; + case BorderColor::eIntOpaqueBlack : return "IntOpaqueBlack"; + case BorderColor::eFloatOpaqueWhite : return "FloatOpaqueWhite"; + case BorderColor::eIntOpaqueWhite : return "IntOpaqueWhite"; + case BorderColor::eFloatCustomEXT : return "FloatCustomEXT"; + case BorderColor::eIntCustomEXT : return "IntCustomEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BufferCreateFlagBits : VkBufferCreateFlags + { + eSparseBinding = VK_BUFFER_CREATE_SPARSE_BINDING_BIT, + eSparseResidency = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, + eSparseAliased = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT, + eProtected = VK_BUFFER_CREATE_PROTECTED_BIT, + eDeviceAddressCaptureReplay = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + eDeviceAddressCaptureReplayEXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT, + eDeviceAddressCaptureReplayKHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( BufferCreateFlagBits value ) + { + switch ( value ) + { + case BufferCreateFlagBits::eSparseBinding : return "SparseBinding"; + case BufferCreateFlagBits::eSparseResidency : return "SparseResidency"; + case BufferCreateFlagBits::eSparseAliased : return "SparseAliased"; + case BufferCreateFlagBits::eProtected : return "Protected"; + case BufferCreateFlagBits::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BufferUsageFlagBits : VkBufferUsageFlags + { + eTransferSrc = VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + eTransferDst = VK_BUFFER_USAGE_TRANSFER_DST_BIT, + eUniformTexelBuffer = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, + eStorageTexelBuffer = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, + eUniformBuffer = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + eStorageBuffer = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, + eIndexBuffer = VK_BUFFER_USAGE_INDEX_BUFFER_BIT, + eVertexBuffer = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + eIndirectBuffer = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, + eShaderDeviceAddress = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + eTransformFeedbackBufferEXT = VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT, + eTransformFeedbackCounterBufferEXT = VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT, + eConditionalRenderingEXT = VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT, + eAccelerationStructureBuildInputReadOnlyKHR = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR, + eAccelerationStructureStorageKHR = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR, + eShaderBindingTableKHR = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, + eRayTracingNV = VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, + eShaderDeviceAddressEXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT, + eShaderDeviceAddressKHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( BufferUsageFlagBits value ) + { + switch ( value ) + { + case BufferUsageFlagBits::eTransferSrc : return "TransferSrc"; + case BufferUsageFlagBits::eTransferDst : return "TransferDst"; + case BufferUsageFlagBits::eUniformTexelBuffer : return "UniformTexelBuffer"; + case BufferUsageFlagBits::eStorageTexelBuffer : return "StorageTexelBuffer"; + case BufferUsageFlagBits::eUniformBuffer : return "UniformBuffer"; + case BufferUsageFlagBits::eStorageBuffer : return "StorageBuffer"; + case BufferUsageFlagBits::eIndexBuffer : return "IndexBuffer"; + case BufferUsageFlagBits::eVertexBuffer : return "VertexBuffer"; + case BufferUsageFlagBits::eIndirectBuffer : return "IndirectBuffer"; + case BufferUsageFlagBits::eShaderDeviceAddress : return "ShaderDeviceAddress"; + case BufferUsageFlagBits::eTransformFeedbackBufferEXT : return "TransformFeedbackBufferEXT"; + case BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT : return "TransformFeedbackCounterBufferEXT"; + case BufferUsageFlagBits::eConditionalRenderingEXT : return "ConditionalRenderingEXT"; + case BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR : return "AccelerationStructureBuildInputReadOnlyKHR"; + case BufferUsageFlagBits::eAccelerationStructureStorageKHR : return "AccelerationStructureStorageKHR"; + case BufferUsageFlagBits::eShaderBindingTableKHR : return "ShaderBindingTableKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BuildAccelerationStructureFlagBitsKHR : VkBuildAccelerationStructureFlagsKHR + { + eAllowUpdate = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, + eAllowCompaction = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, + ePreferFastTrace = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + ePreferFastBuild = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, + eLowMemory = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR + }; + using BuildAccelerationStructureFlagBitsNV = BuildAccelerationStructureFlagBitsKHR; + + VULKAN_HPP_INLINE std::string to_string( BuildAccelerationStructureFlagBitsKHR value ) + { + switch ( value ) + { + case BuildAccelerationStructureFlagBitsKHR::eAllowUpdate : return "AllowUpdate"; + case BuildAccelerationStructureFlagBitsKHR::eAllowCompaction : return "AllowCompaction"; + case BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace : return "PreferFastTrace"; + case BuildAccelerationStructureFlagBitsKHR::ePreferFastBuild : return "PreferFastBuild"; + case BuildAccelerationStructureFlagBitsKHR::eLowMemory : return "LowMemory"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class BuildAccelerationStructureModeKHR + { + eBuild = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + eUpdate = VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( BuildAccelerationStructureModeKHR value ) + { + switch ( value ) + { + case BuildAccelerationStructureModeKHR::eBuild : return "Build"; + case BuildAccelerationStructureModeKHR::eUpdate : return "Update"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ChromaLocation + { + eCositedEven = VK_CHROMA_LOCATION_COSITED_EVEN, + eMidpoint = VK_CHROMA_LOCATION_MIDPOINT + }; + using ChromaLocationKHR = ChromaLocation; + + VULKAN_HPP_INLINE std::string to_string( ChromaLocation value ) + { + switch ( value ) + { + case ChromaLocation::eCositedEven : return "CositedEven"; + case ChromaLocation::eMidpoint : return "Midpoint"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CoarseSampleOrderTypeNV + { + eDefault = VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV, + eCustom = VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV, + ePixelMajor = VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV, + eSampleMajor = VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV + }; + + VULKAN_HPP_INLINE std::string to_string( CoarseSampleOrderTypeNV value ) + { + switch ( value ) + { + case CoarseSampleOrderTypeNV::eDefault : return "Default"; + case CoarseSampleOrderTypeNV::eCustom : return "Custom"; + case CoarseSampleOrderTypeNV::ePixelMajor : return "PixelMajor"; + case CoarseSampleOrderTypeNV::eSampleMajor : return "SampleMajor"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ColorComponentFlagBits : VkColorComponentFlags + { + eR = VK_COLOR_COMPONENT_R_BIT, + eG = VK_COLOR_COMPONENT_G_BIT, + eB = VK_COLOR_COMPONENT_B_BIT, + eA = VK_COLOR_COMPONENT_A_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( ColorComponentFlagBits value ) + { + switch ( value ) + { + case ColorComponentFlagBits::eR : return "R"; + case ColorComponentFlagBits::eG : return "G"; + case ColorComponentFlagBits::eB : return "B"; + case ColorComponentFlagBits::eA : return "A"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ColorSpaceKHR + { + eSrgbNonlinear = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + eDisplayP3NonlinearEXT = VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT, + eExtendedSrgbLinearEXT = VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT, + eDisplayP3LinearEXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + eDciP3NonlinearEXT = VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT, + eBt709LinearEXT = VK_COLOR_SPACE_BT709_LINEAR_EXT, + eBt709NonlinearEXT = VK_COLOR_SPACE_BT709_NONLINEAR_EXT, + eBt2020LinearEXT = VK_COLOR_SPACE_BT2020_LINEAR_EXT, + eHdr10St2084EXT = VK_COLOR_SPACE_HDR10_ST2084_EXT, + eDolbyvisionEXT = VK_COLOR_SPACE_DOLBYVISION_EXT, + eHdr10HlgEXT = VK_COLOR_SPACE_HDR10_HLG_EXT, + eAdobergbLinearEXT = VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT, + eAdobergbNonlinearEXT = VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT, + ePassThroughEXT = VK_COLOR_SPACE_PASS_THROUGH_EXT, + eExtendedSrgbNonlinearEXT = VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT, + eDisplayNativeAMD = VK_COLOR_SPACE_DISPLAY_NATIVE_AMD, + eVkColorspaceSrgbNonlinear = VK_COLORSPACE_SRGB_NONLINEAR_KHR, + eDciP3LinearEXT = VK_COLOR_SPACE_DCI_P3_LINEAR_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ColorSpaceKHR value ) + { + switch ( value ) + { + case ColorSpaceKHR::eSrgbNonlinear : return "SrgbNonlinear"; + case ColorSpaceKHR::eDisplayP3NonlinearEXT : return "DisplayP3NonlinearEXT"; + case ColorSpaceKHR::eExtendedSrgbLinearEXT : return "ExtendedSrgbLinearEXT"; + case ColorSpaceKHR::eDisplayP3LinearEXT : return "DisplayP3LinearEXT"; + case ColorSpaceKHR::eDciP3NonlinearEXT : return "DciP3NonlinearEXT"; + case ColorSpaceKHR::eBt709LinearEXT : return "Bt709LinearEXT"; + case ColorSpaceKHR::eBt709NonlinearEXT : return "Bt709NonlinearEXT"; + case ColorSpaceKHR::eBt2020LinearEXT : return "Bt2020LinearEXT"; + case ColorSpaceKHR::eHdr10St2084EXT : return "Hdr10St2084EXT"; + case ColorSpaceKHR::eDolbyvisionEXT : return "DolbyvisionEXT"; + case ColorSpaceKHR::eHdr10HlgEXT : return "Hdr10HlgEXT"; + case ColorSpaceKHR::eAdobergbLinearEXT : return "AdobergbLinearEXT"; + case ColorSpaceKHR::eAdobergbNonlinearEXT : return "AdobergbNonlinearEXT"; + case ColorSpaceKHR::ePassThroughEXT : return "PassThroughEXT"; + case ColorSpaceKHR::eExtendedSrgbNonlinearEXT : return "ExtendedSrgbNonlinearEXT"; + case ColorSpaceKHR::eDisplayNativeAMD : return "DisplayNativeAMD"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CommandBufferLevel + { + ePrimary = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + eSecondary = VK_COMMAND_BUFFER_LEVEL_SECONDARY + }; + + VULKAN_HPP_INLINE std::string to_string( CommandBufferLevel value ) + { + switch ( value ) + { + case CommandBufferLevel::ePrimary : return "Primary"; + case CommandBufferLevel::eSecondary : return "Secondary"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CommandBufferResetFlagBits : VkCommandBufferResetFlags + { + eReleaseResources = VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( CommandBufferResetFlagBits value ) + { + switch ( value ) + { + case CommandBufferResetFlagBits::eReleaseResources : return "ReleaseResources"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CommandBufferUsageFlagBits : VkCommandBufferUsageFlags + { + eOneTimeSubmit = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + eRenderPassContinue = VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, + eSimultaneousUse = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( CommandBufferUsageFlagBits value ) + { + switch ( value ) + { + case CommandBufferUsageFlagBits::eOneTimeSubmit : return "OneTimeSubmit"; + case CommandBufferUsageFlagBits::eRenderPassContinue : return "RenderPassContinue"; + case CommandBufferUsageFlagBits::eSimultaneousUse : return "SimultaneousUse"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CommandPoolCreateFlagBits : VkCommandPoolCreateFlags + { + eTransient = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + eResetCommandBuffer = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + eProtected = VK_COMMAND_POOL_CREATE_PROTECTED_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( CommandPoolCreateFlagBits value ) + { + switch ( value ) + { + case CommandPoolCreateFlagBits::eTransient : return "Transient"; + case CommandPoolCreateFlagBits::eResetCommandBuffer : return "ResetCommandBuffer"; + case CommandPoolCreateFlagBits::eProtected : return "Protected"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CommandPoolResetFlagBits : VkCommandPoolResetFlags + { + eReleaseResources = VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( CommandPoolResetFlagBits value ) + { + switch ( value ) + { + case CommandPoolResetFlagBits::eReleaseResources : return "ReleaseResources"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CompareOp + { + eNever = VK_COMPARE_OP_NEVER, + eLess = VK_COMPARE_OP_LESS, + eEqual = VK_COMPARE_OP_EQUAL, + eLessOrEqual = VK_COMPARE_OP_LESS_OR_EQUAL, + eGreater = VK_COMPARE_OP_GREATER, + eNotEqual = VK_COMPARE_OP_NOT_EQUAL, + eGreaterOrEqual = VK_COMPARE_OP_GREATER_OR_EQUAL, + eAlways = VK_COMPARE_OP_ALWAYS + }; + + VULKAN_HPP_INLINE std::string to_string( CompareOp value ) + { + switch ( value ) + { + case CompareOp::eNever : return "Never"; + case CompareOp::eLess : return "Less"; + case CompareOp::eEqual : return "Equal"; + case CompareOp::eLessOrEqual : return "LessOrEqual"; + case CompareOp::eGreater : return "Greater"; + case CompareOp::eNotEqual : return "NotEqual"; + case CompareOp::eGreaterOrEqual : return "GreaterOrEqual"; + case CompareOp::eAlways : return "Always"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ComponentSwizzle + { + eIdentity = VK_COMPONENT_SWIZZLE_IDENTITY, + eZero = VK_COMPONENT_SWIZZLE_ZERO, + eOne = VK_COMPONENT_SWIZZLE_ONE, + eR = VK_COMPONENT_SWIZZLE_R, + eG = VK_COMPONENT_SWIZZLE_G, + eB = VK_COMPONENT_SWIZZLE_B, + eA = VK_COMPONENT_SWIZZLE_A + }; + + VULKAN_HPP_INLINE std::string to_string( ComponentSwizzle value ) + { + switch ( value ) + { + case ComponentSwizzle::eIdentity : return "Identity"; + case ComponentSwizzle::eZero : return "Zero"; + case ComponentSwizzle::eOne : return "One"; + case ComponentSwizzle::eR : return "R"; + case ComponentSwizzle::eG : return "G"; + case ComponentSwizzle::eB : return "B"; + case ComponentSwizzle::eA : return "A"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ComponentTypeNV + { + eFloat16 = VK_COMPONENT_TYPE_FLOAT16_NV, + eFloat32 = VK_COMPONENT_TYPE_FLOAT32_NV, + eFloat64 = VK_COMPONENT_TYPE_FLOAT64_NV, + eSint8 = VK_COMPONENT_TYPE_SINT8_NV, + eSint16 = VK_COMPONENT_TYPE_SINT16_NV, + eSint32 = VK_COMPONENT_TYPE_SINT32_NV, + eSint64 = VK_COMPONENT_TYPE_SINT64_NV, + eUint8 = VK_COMPONENT_TYPE_UINT8_NV, + eUint16 = VK_COMPONENT_TYPE_UINT16_NV, + eUint32 = VK_COMPONENT_TYPE_UINT32_NV, + eUint64 = VK_COMPONENT_TYPE_UINT64_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ComponentTypeNV value ) + { + switch ( value ) + { + case ComponentTypeNV::eFloat16 : return "Float16"; + case ComponentTypeNV::eFloat32 : return "Float32"; + case ComponentTypeNV::eFloat64 : return "Float64"; + case ComponentTypeNV::eSint8 : return "Sint8"; + case ComponentTypeNV::eSint16 : return "Sint16"; + case ComponentTypeNV::eSint32 : return "Sint32"; + case ComponentTypeNV::eSint64 : return "Sint64"; + case ComponentTypeNV::eUint8 : return "Uint8"; + case ComponentTypeNV::eUint16 : return "Uint16"; + case ComponentTypeNV::eUint32 : return "Uint32"; + case ComponentTypeNV::eUint64 : return "Uint64"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CompositeAlphaFlagBitsKHR : VkCompositeAlphaFlagsKHR + { + eOpaque = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, + ePreMultiplied = VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR, + ePostMultiplied = VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR, + eInherit = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( CompositeAlphaFlagBitsKHR value ) + { + switch ( value ) + { + case CompositeAlphaFlagBitsKHR::eOpaque : return "Opaque"; + case CompositeAlphaFlagBitsKHR::ePreMultiplied : return "PreMultiplied"; + case CompositeAlphaFlagBitsKHR::ePostMultiplied : return "PostMultiplied"; + case CompositeAlphaFlagBitsKHR::eInherit : return "Inherit"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ConditionalRenderingFlagBitsEXT : VkConditionalRenderingFlagsEXT + { + eInverted = VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ConditionalRenderingFlagBitsEXT value ) + { + switch ( value ) + { + case ConditionalRenderingFlagBitsEXT::eInverted : return "Inverted"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ConservativeRasterizationModeEXT + { + eDisabled = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, + eOverestimate = VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT, + eUnderestimate = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ConservativeRasterizationModeEXT value ) + { + switch ( value ) + { + case ConservativeRasterizationModeEXT::eDisabled : return "Disabled"; + case ConservativeRasterizationModeEXT::eOverestimate : return "Overestimate"; + case ConservativeRasterizationModeEXT::eUnderestimate : return "Underestimate"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CopyAccelerationStructureModeKHR + { + eClone = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, + eCompact = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, + eSerialize = VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR, + eDeserialize = VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR + }; + using CopyAccelerationStructureModeNV = CopyAccelerationStructureModeKHR; + + VULKAN_HPP_INLINE std::string to_string( CopyAccelerationStructureModeKHR value ) + { + switch ( value ) + { + case CopyAccelerationStructureModeKHR::eClone : return "Clone"; + case CopyAccelerationStructureModeKHR::eCompact : return "Compact"; + case CopyAccelerationStructureModeKHR::eSerialize : return "Serialize"; + case CopyAccelerationStructureModeKHR::eDeserialize : return "Deserialize"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CoverageModulationModeNV + { + eNone = VK_COVERAGE_MODULATION_MODE_NONE_NV, + eRgb = VK_COVERAGE_MODULATION_MODE_RGB_NV, + eAlpha = VK_COVERAGE_MODULATION_MODE_ALPHA_NV, + eRgba = VK_COVERAGE_MODULATION_MODE_RGBA_NV + }; + + VULKAN_HPP_INLINE std::string to_string( CoverageModulationModeNV value ) + { + switch ( value ) + { + case CoverageModulationModeNV::eNone : return "None"; + case CoverageModulationModeNV::eRgb : return "Rgb"; + case CoverageModulationModeNV::eAlpha : return "Alpha"; + case CoverageModulationModeNV::eRgba : return "Rgba"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CoverageReductionModeNV + { + eMerge = VK_COVERAGE_REDUCTION_MODE_MERGE_NV, + eTruncate = VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV + }; + + VULKAN_HPP_INLINE std::string to_string( CoverageReductionModeNV value ) + { + switch ( value ) + { + case CoverageReductionModeNV::eMerge : return "Merge"; + case CoverageReductionModeNV::eTruncate : return "Truncate"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class CullModeFlagBits : VkCullModeFlags + { + eNone = VK_CULL_MODE_NONE, + eFront = VK_CULL_MODE_FRONT_BIT, + eBack = VK_CULL_MODE_BACK_BIT, + eFrontAndBack = VK_CULL_MODE_FRONT_AND_BACK + }; + + VULKAN_HPP_INLINE std::string to_string( CullModeFlagBits value ) + { + switch ( value ) + { + case CullModeFlagBits::eNone : return "None"; + case CullModeFlagBits::eFront : return "Front"; + case CullModeFlagBits::eBack : return "Back"; + case CullModeFlagBits::eFrontAndBack : return "FrontAndBack"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DebugReportFlagBitsEXT : VkDebugReportFlagsEXT + { + eInformation = VK_DEBUG_REPORT_INFORMATION_BIT_EXT, + eWarning = VK_DEBUG_REPORT_WARNING_BIT_EXT, + ePerformanceWarning = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, + eError = VK_DEBUG_REPORT_ERROR_BIT_EXT, + eDebug = VK_DEBUG_REPORT_DEBUG_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DebugReportFlagBitsEXT value ) + { + switch ( value ) + { + case DebugReportFlagBitsEXT::eInformation : return "Information"; + case DebugReportFlagBitsEXT::eWarning : return "Warning"; + case DebugReportFlagBitsEXT::ePerformanceWarning : return "PerformanceWarning"; + case DebugReportFlagBitsEXT::eError : return "Error"; + case DebugReportFlagBitsEXT::eDebug : return "Debug"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DebugReportObjectTypeEXT + { + eUnknown = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + eInstance = VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, + ePhysicalDevice = VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, + eDevice = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, + eQueue = VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, + eSemaphore = VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, + eCommandBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, + eFence = VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, + eDeviceMemory = VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, + eBuffer = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, + eImage = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, + eEvent = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, + eQueryPool = VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, + eBufferView = VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, + eImageView = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, + eShaderModule = VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, + ePipelineCache = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, + ePipelineLayout = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, + eRenderPass = VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, + ePipeline = VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, + eDescriptorSetLayout = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, + eSampler = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, + eDescriptorPool = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, + eDescriptorSet = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, + eFramebuffer = VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, + eCommandPool = VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, + eSurfaceKHR = VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, + eSwapchainKHR = VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, + eDebugReportCallbackEXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + eDisplayKHR = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT, + eDisplayModeKHR = VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT, + eValidationCacheEXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + eSamplerYcbcrConversion = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + eDescriptorUpdateTemplate = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + eAccelerationStructureKHR = VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT, + eAccelerationStructureNV = VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT, + eDebugReport = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT, + eDescriptorUpdateTemplateKHR = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT, + eSamplerYcbcrConversionKHR = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT, + eValidationCache = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DebugReportObjectTypeEXT value ) + { + switch ( value ) + { + case DebugReportObjectTypeEXT::eUnknown : return "Unknown"; + case DebugReportObjectTypeEXT::eInstance : return "Instance"; + case DebugReportObjectTypeEXT::ePhysicalDevice : return "PhysicalDevice"; + case DebugReportObjectTypeEXT::eDevice : return "Device"; + case DebugReportObjectTypeEXT::eQueue : return "Queue"; + case DebugReportObjectTypeEXT::eSemaphore : return "Semaphore"; + case DebugReportObjectTypeEXT::eCommandBuffer : return "CommandBuffer"; + case DebugReportObjectTypeEXT::eFence : return "Fence"; + case DebugReportObjectTypeEXT::eDeviceMemory : return "DeviceMemory"; + case DebugReportObjectTypeEXT::eBuffer : return "Buffer"; + case DebugReportObjectTypeEXT::eImage : return "Image"; + case DebugReportObjectTypeEXT::eEvent : return "Event"; + case DebugReportObjectTypeEXT::eQueryPool : return "QueryPool"; + case DebugReportObjectTypeEXT::eBufferView : return "BufferView"; + case DebugReportObjectTypeEXT::eImageView : return "ImageView"; + case DebugReportObjectTypeEXT::eShaderModule : return "ShaderModule"; + case DebugReportObjectTypeEXT::ePipelineCache : return "PipelineCache"; + case DebugReportObjectTypeEXT::ePipelineLayout : return "PipelineLayout"; + case DebugReportObjectTypeEXT::eRenderPass : return "RenderPass"; + case DebugReportObjectTypeEXT::ePipeline : return "Pipeline"; + case DebugReportObjectTypeEXT::eDescriptorSetLayout : return "DescriptorSetLayout"; + case DebugReportObjectTypeEXT::eSampler : return "Sampler"; + case DebugReportObjectTypeEXT::eDescriptorPool : return "DescriptorPool"; + case DebugReportObjectTypeEXT::eDescriptorSet : return "DescriptorSet"; + case DebugReportObjectTypeEXT::eFramebuffer : return "Framebuffer"; + case DebugReportObjectTypeEXT::eCommandPool : return "CommandPool"; + case DebugReportObjectTypeEXT::eSurfaceKHR : return "SurfaceKHR"; + case DebugReportObjectTypeEXT::eSwapchainKHR : return "SwapchainKHR"; + case DebugReportObjectTypeEXT::eDebugReportCallbackEXT : return "DebugReportCallbackEXT"; + case DebugReportObjectTypeEXT::eDisplayKHR : return "DisplayKHR"; + case DebugReportObjectTypeEXT::eDisplayModeKHR : return "DisplayModeKHR"; + case DebugReportObjectTypeEXT::eValidationCacheEXT : return "ValidationCacheEXT"; + case DebugReportObjectTypeEXT::eSamplerYcbcrConversion : return "SamplerYcbcrConversion"; + case DebugReportObjectTypeEXT::eDescriptorUpdateTemplate : return "DescriptorUpdateTemplate"; + case DebugReportObjectTypeEXT::eAccelerationStructureKHR : return "AccelerationStructureKHR"; + case DebugReportObjectTypeEXT::eAccelerationStructureNV : return "AccelerationStructureNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DebugUtilsMessageSeverityFlagBitsEXT : VkDebugUtilsMessageSeverityFlagsEXT + { + eVerbose = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT, + eInfo = VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT, + eWarning = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT, + eError = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessageSeverityFlagBitsEXT value ) + { + switch ( value ) + { + case DebugUtilsMessageSeverityFlagBitsEXT::eVerbose : return "Verbose"; + case DebugUtilsMessageSeverityFlagBitsEXT::eInfo : return "Info"; + case DebugUtilsMessageSeverityFlagBitsEXT::eWarning : return "Warning"; + case DebugUtilsMessageSeverityFlagBitsEXT::eError : return "Error"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DebugUtilsMessageTypeFlagBitsEXT : VkDebugUtilsMessageTypeFlagsEXT + { + eGeneral = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT, + eValidation = VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT, + ePerformance = VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessageTypeFlagBitsEXT value ) + { + switch ( value ) + { + case DebugUtilsMessageTypeFlagBitsEXT::eGeneral : return "General"; + case DebugUtilsMessageTypeFlagBitsEXT::eValidation : return "Validation"; + case DebugUtilsMessageTypeFlagBitsEXT::ePerformance : return "Performance"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DependencyFlagBits : VkDependencyFlags + { + eByRegion = VK_DEPENDENCY_BY_REGION_BIT, + eDeviceGroup = VK_DEPENDENCY_DEVICE_GROUP_BIT, + eViewLocal = VK_DEPENDENCY_VIEW_LOCAL_BIT, + eDeviceGroupKHR = VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR, + eViewLocalKHR = VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( DependencyFlagBits value ) + { + switch ( value ) + { + case DependencyFlagBits::eByRegion : return "ByRegion"; + case DependencyFlagBits::eDeviceGroup : return "DeviceGroup"; + case DependencyFlagBits::eViewLocal : return "ViewLocal"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DescriptorBindingFlagBits : VkDescriptorBindingFlags + { + eUpdateAfterBind = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + eUpdateUnusedWhilePending = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + ePartiallyBound = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + eVariableDescriptorCount = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT + }; + using DescriptorBindingFlagBitsEXT = DescriptorBindingFlagBits; + + VULKAN_HPP_INLINE std::string to_string( DescriptorBindingFlagBits value ) + { + switch ( value ) + { + case DescriptorBindingFlagBits::eUpdateAfterBind : return "UpdateAfterBind"; + case DescriptorBindingFlagBits::eUpdateUnusedWhilePending : return "UpdateUnusedWhilePending"; + case DescriptorBindingFlagBits::ePartiallyBound : return "PartiallyBound"; + case DescriptorBindingFlagBits::eVariableDescriptorCount : return "VariableDescriptorCount"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DescriptorPoolCreateFlagBits : VkDescriptorPoolCreateFlags + { + eFreeDescriptorSet = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, + eUpdateAfterBind = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + eUpdateAfterBindEXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DescriptorPoolCreateFlagBits value ) + { + switch ( value ) + { + case DescriptorPoolCreateFlagBits::eFreeDescriptorSet : return "FreeDescriptorSet"; + case DescriptorPoolCreateFlagBits::eUpdateAfterBind : return "UpdateAfterBind"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DescriptorSetLayoutCreateFlagBits : VkDescriptorSetLayoutCreateFlags + { + eUpdateAfterBindPool = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + ePushDescriptorKHR = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, + eUpdateAfterBindPoolEXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DescriptorSetLayoutCreateFlagBits value ) + { + switch ( value ) + { + case DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool : return "UpdateAfterBindPool"; + case DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR : return "PushDescriptorKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DescriptorType + { + eSampler = VK_DESCRIPTOR_TYPE_SAMPLER, + eCombinedImageSampler = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + eSampledImage = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + eStorageImage = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + eUniformTexelBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, + eStorageTexelBuffer = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, + eUniformBuffer = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + eStorageBuffer = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + eUniformBufferDynamic = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, + eStorageBufferDynamic = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, + eInputAttachment = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + eInlineUniformBlockEXT = VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT, + eAccelerationStructureKHR = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, + eAccelerationStructureNV = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV + }; + + VULKAN_HPP_INLINE std::string to_string( DescriptorType value ) + { + switch ( value ) + { + case DescriptorType::eSampler : return "Sampler"; + case DescriptorType::eCombinedImageSampler : return "CombinedImageSampler"; + case DescriptorType::eSampledImage : return "SampledImage"; + case DescriptorType::eStorageImage : return "StorageImage"; + case DescriptorType::eUniformTexelBuffer : return "UniformTexelBuffer"; + case DescriptorType::eStorageTexelBuffer : return "StorageTexelBuffer"; + case DescriptorType::eUniformBuffer : return "UniformBuffer"; + case DescriptorType::eStorageBuffer : return "StorageBuffer"; + case DescriptorType::eUniformBufferDynamic : return "UniformBufferDynamic"; + case DescriptorType::eStorageBufferDynamic : return "StorageBufferDynamic"; + case DescriptorType::eInputAttachment : return "InputAttachment"; + case DescriptorType::eInlineUniformBlockEXT : return "InlineUniformBlockEXT"; + case DescriptorType::eAccelerationStructureKHR : return "AccelerationStructureKHR"; + case DescriptorType::eAccelerationStructureNV : return "AccelerationStructureNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DescriptorUpdateTemplateType + { + eDescriptorSet = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + ePushDescriptorsKHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR + }; + using DescriptorUpdateTemplateTypeKHR = DescriptorUpdateTemplateType; + + VULKAN_HPP_INLINE std::string to_string( DescriptorUpdateTemplateType value ) + { + switch ( value ) + { + case DescriptorUpdateTemplateType::eDescriptorSet : return "DescriptorSet"; + case DescriptorUpdateTemplateType::ePushDescriptorsKHR : return "PushDescriptorsKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DeviceCreateFlagBits + {}; + + VULKAN_HPP_INLINE std::string to_string( DeviceCreateFlagBits ) + { + return "(void)"; + } + + enum class DeviceDiagnosticsConfigFlagBitsNV : VkDeviceDiagnosticsConfigFlagsNV + { + eEnableShaderDebugInfo = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV, + eEnableResourceTracking = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV, + eEnableAutomaticCheckpoints = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceDiagnosticsConfigFlagBitsNV value ) + { + switch ( value ) + { + case DeviceDiagnosticsConfigFlagBitsNV::eEnableShaderDebugInfo : return "EnableShaderDebugInfo"; + case DeviceDiagnosticsConfigFlagBitsNV::eEnableResourceTracking : return "EnableResourceTracking"; + case DeviceDiagnosticsConfigFlagBitsNV::eEnableAutomaticCheckpoints : return "EnableAutomaticCheckpoints"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DeviceEventTypeEXT + { + eDisplayHotplug = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceEventTypeEXT value ) + { + switch ( value ) + { + case DeviceEventTypeEXT::eDisplayHotplug : return "DisplayHotplug"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DeviceGroupPresentModeFlagBitsKHR : VkDeviceGroupPresentModeFlagsKHR + { + eLocal = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR, + eRemote = VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR, + eSum = VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR, + eLocalMultiDevice = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceGroupPresentModeFlagBitsKHR value ) + { + switch ( value ) + { + case DeviceGroupPresentModeFlagBitsKHR::eLocal : return "Local"; + case DeviceGroupPresentModeFlagBitsKHR::eRemote : return "Remote"; + case DeviceGroupPresentModeFlagBitsKHR::eSum : return "Sum"; + case DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice : return "LocalMultiDevice"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DeviceMemoryReportEventTypeEXT + { + eAllocate = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, + eFree = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT, + eImport = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT, + eUnimport = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT, + eAllocationFailed = VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportEventTypeEXT value ) + { + switch ( value ) + { + case DeviceMemoryReportEventTypeEXT::eAllocate : return "Allocate"; + case DeviceMemoryReportEventTypeEXT::eFree : return "Free"; + case DeviceMemoryReportEventTypeEXT::eImport : return "Import"; + case DeviceMemoryReportEventTypeEXT::eUnimport : return "Unimport"; + case DeviceMemoryReportEventTypeEXT::eAllocationFailed : return "AllocationFailed"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DeviceQueueCreateFlagBits : VkDeviceQueueCreateFlags + { + eProtected = VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( DeviceQueueCreateFlagBits value ) + { + switch ( value ) + { + case DeviceQueueCreateFlagBits::eProtected : return "Protected"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DiscardRectangleModeEXT + { + eInclusive = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + eExclusive = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DiscardRectangleModeEXT value ) + { + switch ( value ) + { + case DiscardRectangleModeEXT::eInclusive : return "Inclusive"; + case DiscardRectangleModeEXT::eExclusive : return "Exclusive"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DisplayEventTypeEXT + { + eFirstPixelOut = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DisplayEventTypeEXT value ) + { + switch ( value ) + { + case DisplayEventTypeEXT::eFirstPixelOut : return "FirstPixelOut"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DisplayPlaneAlphaFlagBitsKHR : VkDisplayPlaneAlphaFlagsKHR + { + eOpaque = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR, + eGlobal = VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR, + ePerPixel = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR, + ePerPixelPremultiplied = VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( DisplayPlaneAlphaFlagBitsKHR value ) + { + switch ( value ) + { + case DisplayPlaneAlphaFlagBitsKHR::eOpaque : return "Opaque"; + case DisplayPlaneAlphaFlagBitsKHR::eGlobal : return "Global"; + case DisplayPlaneAlphaFlagBitsKHR::ePerPixel : return "PerPixel"; + case DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied : return "PerPixelPremultiplied"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DisplayPowerStateEXT + { + eOff = VK_DISPLAY_POWER_STATE_OFF_EXT, + eSuspend = VK_DISPLAY_POWER_STATE_SUSPEND_EXT, + eOn = VK_DISPLAY_POWER_STATE_ON_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DisplayPowerStateEXT value ) + { + switch ( value ) + { + case DisplayPowerStateEXT::eOff : return "Off"; + case DisplayPowerStateEXT::eSuspend : return "Suspend"; + case DisplayPowerStateEXT::eOn : return "On"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DriverId + { + eAmdProprietary = VK_DRIVER_ID_AMD_PROPRIETARY, + eAmdOpenSource = VK_DRIVER_ID_AMD_OPEN_SOURCE, + eMesaRadv = VK_DRIVER_ID_MESA_RADV, + eNvidiaProprietary = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + eIntelProprietaryWindows = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + eIntelOpenSourceMESA = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + eImaginationProprietary = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + eQualcommProprietary = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + eArmProprietary = VK_DRIVER_ID_ARM_PROPRIETARY, + eGoogleSwiftshader = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + eGgpProprietary = VK_DRIVER_ID_GGP_PROPRIETARY, + eBroadcomProprietary = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + eMesaLlvmpipe = VK_DRIVER_ID_MESA_LLVMPIPE, + eMoltenvk = VK_DRIVER_ID_MOLTENVK, + eIntelOpenSourceMesa = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR + }; + using DriverIdKHR = DriverId; + + VULKAN_HPP_INLINE std::string to_string( DriverId value ) + { + switch ( value ) + { + case DriverId::eAmdProprietary : return "AmdProprietary"; + case DriverId::eAmdOpenSource : return "AmdOpenSource"; + case DriverId::eMesaRadv : return "MesaRadv"; + case DriverId::eNvidiaProprietary : return "NvidiaProprietary"; + case DriverId::eIntelProprietaryWindows : return "IntelProprietaryWindows"; + case DriverId::eIntelOpenSourceMESA : return "IntelOpenSourceMESA"; + case DriverId::eImaginationProprietary : return "ImaginationProprietary"; + case DriverId::eQualcommProprietary : return "QualcommProprietary"; + case DriverId::eArmProprietary : return "ArmProprietary"; + case DriverId::eGoogleSwiftshader : return "GoogleSwiftshader"; + case DriverId::eGgpProprietary : return "GgpProprietary"; + case DriverId::eBroadcomProprietary : return "BroadcomProprietary"; + case DriverId::eMesaLlvmpipe : return "MesaLlvmpipe"; + case DriverId::eMoltenvk : return "Moltenvk"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class DynamicState + { + eViewport = VK_DYNAMIC_STATE_VIEWPORT, + eScissor = VK_DYNAMIC_STATE_SCISSOR, + eLineWidth = VK_DYNAMIC_STATE_LINE_WIDTH, + eDepthBias = VK_DYNAMIC_STATE_DEPTH_BIAS, + eBlendConstants = VK_DYNAMIC_STATE_BLEND_CONSTANTS, + eDepthBounds = VK_DYNAMIC_STATE_DEPTH_BOUNDS, + eStencilCompareMask = VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, + eStencilWriteMask = VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, + eStencilReference = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + eViewportWScalingNV = VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, + eDiscardRectangleEXT = VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, + eSampleLocationsEXT = VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, + eRayTracingPipelineStackSizeKHR = VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR, + eViewportShadingRatePaletteNV = VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV, + eViewportCoarseSampleOrderNV = VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV, + eExclusiveScissorNV = VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV, + eFragmentShadingRateKHR = VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR, + eLineStippleEXT = VK_DYNAMIC_STATE_LINE_STIPPLE_EXT, + eCullModeEXT = VK_DYNAMIC_STATE_CULL_MODE_EXT, + eFrontFaceEXT = VK_DYNAMIC_STATE_FRONT_FACE_EXT, + ePrimitiveTopologyEXT = VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT, + eViewportWithCountEXT = VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT, + eScissorWithCountEXT = VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT, + eVertexInputBindingStrideEXT = VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT, + eDepthTestEnableEXT = VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT, + eDepthWriteEnableEXT = VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT, + eDepthCompareOpEXT = VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT, + eDepthBoundsTestEnableEXT = VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT, + eStencilTestEnableEXT = VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT, + eStencilOpEXT = VK_DYNAMIC_STATE_STENCIL_OP_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( DynamicState value ) + { + switch ( value ) + { + case DynamicState::eViewport : return "Viewport"; + case DynamicState::eScissor : return "Scissor"; + case DynamicState::eLineWidth : return "LineWidth"; + case DynamicState::eDepthBias : return "DepthBias"; + case DynamicState::eBlendConstants : return "BlendConstants"; + case DynamicState::eDepthBounds : return "DepthBounds"; + case DynamicState::eStencilCompareMask : return "StencilCompareMask"; + case DynamicState::eStencilWriteMask : return "StencilWriteMask"; + case DynamicState::eStencilReference : return "StencilReference"; + case DynamicState::eViewportWScalingNV : return "ViewportWScalingNV"; + case DynamicState::eDiscardRectangleEXT : return "DiscardRectangleEXT"; + case DynamicState::eSampleLocationsEXT : return "SampleLocationsEXT"; + case DynamicState::eRayTracingPipelineStackSizeKHR : return "RayTracingPipelineStackSizeKHR"; + case DynamicState::eViewportShadingRatePaletteNV : return "ViewportShadingRatePaletteNV"; + case DynamicState::eViewportCoarseSampleOrderNV : return "ViewportCoarseSampleOrderNV"; + case DynamicState::eExclusiveScissorNV : return "ExclusiveScissorNV"; + case DynamicState::eFragmentShadingRateKHR : return "FragmentShadingRateKHR"; + case DynamicState::eLineStippleEXT : return "LineStippleEXT"; + case DynamicState::eCullModeEXT : return "CullModeEXT"; + case DynamicState::eFrontFaceEXT : return "FrontFaceEXT"; + case DynamicState::ePrimitiveTopologyEXT : return "PrimitiveTopologyEXT"; + case DynamicState::eViewportWithCountEXT : return "ViewportWithCountEXT"; + case DynamicState::eScissorWithCountEXT : return "ScissorWithCountEXT"; + case DynamicState::eVertexInputBindingStrideEXT : return "VertexInputBindingStrideEXT"; + case DynamicState::eDepthTestEnableEXT : return "DepthTestEnableEXT"; + case DynamicState::eDepthWriteEnableEXT : return "DepthWriteEnableEXT"; + case DynamicState::eDepthCompareOpEXT : return "DepthCompareOpEXT"; + case DynamicState::eDepthBoundsTestEnableEXT : return "DepthBoundsTestEnableEXT"; + case DynamicState::eStencilTestEnableEXT : return "StencilTestEnableEXT"; + case DynamicState::eStencilOpEXT : return "StencilOpEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalFenceFeatureFlagBits : VkExternalFenceFeatureFlags + { + eExportable = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT + }; + using ExternalFenceFeatureFlagBitsKHR = ExternalFenceFeatureFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalFenceFeatureFlagBits value ) + { + switch ( value ) + { + case ExternalFenceFeatureFlagBits::eExportable : return "Exportable"; + case ExternalFenceFeatureFlagBits::eImportable : return "Importable"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalFenceHandleTypeFlagBits : VkExternalFenceHandleTypeFlags + { + eOpaqueFd = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eSyncFd = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT + }; + using ExternalFenceHandleTypeFlagBitsKHR = ExternalFenceHandleTypeFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalFenceHandleTypeFlagBits value ) + { + switch ( value ) + { + case ExternalFenceHandleTypeFlagBits::eOpaqueFd : return "OpaqueFd"; + case ExternalFenceHandleTypeFlagBits::eOpaqueWin32 : return "OpaqueWin32"; + case ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; + case ExternalFenceHandleTypeFlagBits::eSyncFd : return "SyncFd"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalMemoryFeatureFlagBits : VkExternalMemoryFeatureFlags + { + eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT + }; + using ExternalMemoryFeatureFlagBitsKHR = ExternalMemoryFeatureFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryFeatureFlagBits value ) + { + switch ( value ) + { + case ExternalMemoryFeatureFlagBits::eDedicatedOnly : return "DedicatedOnly"; + case ExternalMemoryFeatureFlagBits::eExportable : return "Exportable"; + case ExternalMemoryFeatureFlagBits::eImportable : return "Importable"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalMemoryFeatureFlagBitsNV : VkExternalMemoryFeatureFlagsNV + { + eDedicatedOnly = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV, + eExportable = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV, + eImportable = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryFeatureFlagBitsNV value ) + { + switch ( value ) + { + case ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly : return "DedicatedOnly"; + case ExternalMemoryFeatureFlagBitsNV::eExportable : return "Exportable"; + case ExternalMemoryFeatureFlagBitsNV::eImportable : return "Importable"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalMemoryHandleTypeFlagBits : VkExternalMemoryHandleTypeFlags + { + eOpaqueFd = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eD3D11Texture = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + eD3D11TextureKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + eD3D12Heap = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + eD3D12Resource = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + eDmaBufEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + eAndroidHardwareBufferANDROID = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID, + eHostAllocationEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT, + eHostMappedForeignMemoryEXT = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT + }; + using ExternalMemoryHandleTypeFlagBitsKHR = ExternalMemoryHandleTypeFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryHandleTypeFlagBits value ) + { + switch ( value ) + { + case ExternalMemoryHandleTypeFlagBits::eOpaqueFd : return "OpaqueFd"; + case ExternalMemoryHandleTypeFlagBits::eOpaqueWin32 : return "OpaqueWin32"; + case ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; + case ExternalMemoryHandleTypeFlagBits::eD3D11Texture : return "D3D11Texture"; + case ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt : return "D3D11TextureKmt"; + case ExternalMemoryHandleTypeFlagBits::eD3D12Heap : return "D3D12Heap"; + case ExternalMemoryHandleTypeFlagBits::eD3D12Resource : return "D3D12Resource"; + case ExternalMemoryHandleTypeFlagBits::eDmaBufEXT : return "DmaBufEXT"; + case ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID : return "AndroidHardwareBufferANDROID"; + case ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT : return "HostAllocationEXT"; + case ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT : return "HostMappedForeignMemoryEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalMemoryHandleTypeFlagBitsNV : VkExternalMemoryHandleTypeFlagsNV + { + eOpaqueWin32 = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV, + eOpaqueWin32Kmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV, + eD3D11Image = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV, + eD3D11ImageKmt = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryHandleTypeFlagBitsNV value ) + { + switch ( value ) + { + case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32 : return "OpaqueWin32"; + case ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; + case ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image : return "D3D11Image"; + case ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt : return "D3D11ImageKmt"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalSemaphoreFeatureFlagBits : VkExternalSemaphoreFeatureFlags + { + eExportable = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + eImportable = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT + }; + using ExternalSemaphoreFeatureFlagBitsKHR = ExternalSemaphoreFeatureFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalSemaphoreFeatureFlagBits value ) + { + switch ( value ) + { + case ExternalSemaphoreFeatureFlagBits::eExportable : return "Exportable"; + case ExternalSemaphoreFeatureFlagBits::eImportable : return "Importable"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ExternalSemaphoreHandleTypeFlagBits : VkExternalSemaphoreHandleTypeFlags + { + eOpaqueFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + eOpaqueWin32 = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + eOpaqueWin32Kmt = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + eD3D12Fence = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + eSyncFd = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + eD3D11Fence = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT + }; + using ExternalSemaphoreHandleTypeFlagBitsKHR = ExternalSemaphoreHandleTypeFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ExternalSemaphoreHandleTypeFlagBits value ) + { + switch ( value ) + { + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd : return "OpaqueFd"; + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32 : return "OpaqueWin32"; + case ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt : return "OpaqueWin32Kmt"; + case ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence : return "D3D12Fence"; + case ExternalSemaphoreHandleTypeFlagBits::eSyncFd : return "SyncFd"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FenceCreateFlagBits : VkFenceCreateFlags + { + eSignaled = VK_FENCE_CREATE_SIGNALED_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( FenceCreateFlagBits value ) + { + switch ( value ) + { + case FenceCreateFlagBits::eSignaled : return "Signaled"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FenceImportFlagBits : VkFenceImportFlags + { + eTemporary = VK_FENCE_IMPORT_TEMPORARY_BIT + }; + using FenceImportFlagBitsKHR = FenceImportFlagBits; + + VULKAN_HPP_INLINE std::string to_string( FenceImportFlagBits value ) + { + switch ( value ) + { + case FenceImportFlagBits::eTemporary : return "Temporary"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class Filter + { + eNearest = VK_FILTER_NEAREST, + eLinear = VK_FILTER_LINEAR, + eCubicIMG = VK_FILTER_CUBIC_IMG, + eCubicEXT = VK_FILTER_CUBIC_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( Filter value ) + { + switch ( value ) + { + case Filter::eNearest : return "Nearest"; + case Filter::eLinear : return "Linear"; + case Filter::eCubicIMG : return "CubicIMG"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class Format + { + eUndefined = VK_FORMAT_UNDEFINED, + eR4G4UnormPack8 = VK_FORMAT_R4G4_UNORM_PACK8, + eR4G4B4A4UnormPack16 = VK_FORMAT_R4G4B4A4_UNORM_PACK16, + eB4G4R4A4UnormPack16 = VK_FORMAT_B4G4R4A4_UNORM_PACK16, + eR5G6B5UnormPack16 = VK_FORMAT_R5G6B5_UNORM_PACK16, + eB5G6R5UnormPack16 = VK_FORMAT_B5G6R5_UNORM_PACK16, + eR5G5B5A1UnormPack16 = VK_FORMAT_R5G5B5A1_UNORM_PACK16, + eB5G5R5A1UnormPack16 = VK_FORMAT_B5G5R5A1_UNORM_PACK16, + eA1R5G5B5UnormPack16 = VK_FORMAT_A1R5G5B5_UNORM_PACK16, + eR8Unorm = VK_FORMAT_R8_UNORM, + eR8Snorm = VK_FORMAT_R8_SNORM, + eR8Uscaled = VK_FORMAT_R8_USCALED, + eR8Sscaled = VK_FORMAT_R8_SSCALED, + eR8Uint = VK_FORMAT_R8_UINT, + eR8Sint = VK_FORMAT_R8_SINT, + eR8Srgb = VK_FORMAT_R8_SRGB, + eR8G8Unorm = VK_FORMAT_R8G8_UNORM, + eR8G8Snorm = VK_FORMAT_R8G8_SNORM, + eR8G8Uscaled = VK_FORMAT_R8G8_USCALED, + eR8G8Sscaled = VK_FORMAT_R8G8_SSCALED, + eR8G8Uint = VK_FORMAT_R8G8_UINT, + eR8G8Sint = VK_FORMAT_R8G8_SINT, + eR8G8Srgb = VK_FORMAT_R8G8_SRGB, + eR8G8B8Unorm = VK_FORMAT_R8G8B8_UNORM, + eR8G8B8Snorm = VK_FORMAT_R8G8B8_SNORM, + eR8G8B8Uscaled = VK_FORMAT_R8G8B8_USCALED, + eR8G8B8Sscaled = VK_FORMAT_R8G8B8_SSCALED, + eR8G8B8Uint = VK_FORMAT_R8G8B8_UINT, + eR8G8B8Sint = VK_FORMAT_R8G8B8_SINT, + eR8G8B8Srgb = VK_FORMAT_R8G8B8_SRGB, + eB8G8R8Unorm = VK_FORMAT_B8G8R8_UNORM, + eB8G8R8Snorm = VK_FORMAT_B8G8R8_SNORM, + eB8G8R8Uscaled = VK_FORMAT_B8G8R8_USCALED, + eB8G8R8Sscaled = VK_FORMAT_B8G8R8_SSCALED, + eB8G8R8Uint = VK_FORMAT_B8G8R8_UINT, + eB8G8R8Sint = VK_FORMAT_B8G8R8_SINT, + eB8G8R8Srgb = VK_FORMAT_B8G8R8_SRGB, + eR8G8B8A8Unorm = VK_FORMAT_R8G8B8A8_UNORM, + eR8G8B8A8Snorm = VK_FORMAT_R8G8B8A8_SNORM, + eR8G8B8A8Uscaled = VK_FORMAT_R8G8B8A8_USCALED, + eR8G8B8A8Sscaled = VK_FORMAT_R8G8B8A8_SSCALED, + eR8G8B8A8Uint = VK_FORMAT_R8G8B8A8_UINT, + eR8G8B8A8Sint = VK_FORMAT_R8G8B8A8_SINT, + eR8G8B8A8Srgb = VK_FORMAT_R8G8B8A8_SRGB, + eB8G8R8A8Unorm = VK_FORMAT_B8G8R8A8_UNORM, + eB8G8R8A8Snorm = VK_FORMAT_B8G8R8A8_SNORM, + eB8G8R8A8Uscaled = VK_FORMAT_B8G8R8A8_USCALED, + eB8G8R8A8Sscaled = VK_FORMAT_B8G8R8A8_SSCALED, + eB8G8R8A8Uint = VK_FORMAT_B8G8R8A8_UINT, + eB8G8R8A8Sint = VK_FORMAT_B8G8R8A8_SINT, + eB8G8R8A8Srgb = VK_FORMAT_B8G8R8A8_SRGB, + eA8B8G8R8UnormPack32 = VK_FORMAT_A8B8G8R8_UNORM_PACK32, + eA8B8G8R8SnormPack32 = VK_FORMAT_A8B8G8R8_SNORM_PACK32, + eA8B8G8R8UscaledPack32 = VK_FORMAT_A8B8G8R8_USCALED_PACK32, + eA8B8G8R8SscaledPack32 = VK_FORMAT_A8B8G8R8_SSCALED_PACK32, + eA8B8G8R8UintPack32 = VK_FORMAT_A8B8G8R8_UINT_PACK32, + eA8B8G8R8SintPack32 = VK_FORMAT_A8B8G8R8_SINT_PACK32, + eA8B8G8R8SrgbPack32 = VK_FORMAT_A8B8G8R8_SRGB_PACK32, + eA2R10G10B10UnormPack32 = VK_FORMAT_A2R10G10B10_UNORM_PACK32, + eA2R10G10B10SnormPack32 = VK_FORMAT_A2R10G10B10_SNORM_PACK32, + eA2R10G10B10UscaledPack32 = VK_FORMAT_A2R10G10B10_USCALED_PACK32, + eA2R10G10B10SscaledPack32 = VK_FORMAT_A2R10G10B10_SSCALED_PACK32, + eA2R10G10B10UintPack32 = VK_FORMAT_A2R10G10B10_UINT_PACK32, + eA2R10G10B10SintPack32 = VK_FORMAT_A2R10G10B10_SINT_PACK32, + eA2B10G10R10UnormPack32 = VK_FORMAT_A2B10G10R10_UNORM_PACK32, + eA2B10G10R10SnormPack32 = VK_FORMAT_A2B10G10R10_SNORM_PACK32, + eA2B10G10R10UscaledPack32 = VK_FORMAT_A2B10G10R10_USCALED_PACK32, + eA2B10G10R10SscaledPack32 = VK_FORMAT_A2B10G10R10_SSCALED_PACK32, + eA2B10G10R10UintPack32 = VK_FORMAT_A2B10G10R10_UINT_PACK32, + eA2B10G10R10SintPack32 = VK_FORMAT_A2B10G10R10_SINT_PACK32, + eR16Unorm = VK_FORMAT_R16_UNORM, + eR16Snorm = VK_FORMAT_R16_SNORM, + eR16Uscaled = VK_FORMAT_R16_USCALED, + eR16Sscaled = VK_FORMAT_R16_SSCALED, + eR16Uint = VK_FORMAT_R16_UINT, + eR16Sint = VK_FORMAT_R16_SINT, + eR16Sfloat = VK_FORMAT_R16_SFLOAT, + eR16G16Unorm = VK_FORMAT_R16G16_UNORM, + eR16G16Snorm = VK_FORMAT_R16G16_SNORM, + eR16G16Uscaled = VK_FORMAT_R16G16_USCALED, + eR16G16Sscaled = VK_FORMAT_R16G16_SSCALED, + eR16G16Uint = VK_FORMAT_R16G16_UINT, + eR16G16Sint = VK_FORMAT_R16G16_SINT, + eR16G16Sfloat = VK_FORMAT_R16G16_SFLOAT, + eR16G16B16Unorm = VK_FORMAT_R16G16B16_UNORM, + eR16G16B16Snorm = VK_FORMAT_R16G16B16_SNORM, + eR16G16B16Uscaled = VK_FORMAT_R16G16B16_USCALED, + eR16G16B16Sscaled = VK_FORMAT_R16G16B16_SSCALED, + eR16G16B16Uint = VK_FORMAT_R16G16B16_UINT, + eR16G16B16Sint = VK_FORMAT_R16G16B16_SINT, + eR16G16B16Sfloat = VK_FORMAT_R16G16B16_SFLOAT, + eR16G16B16A16Unorm = VK_FORMAT_R16G16B16A16_UNORM, + eR16G16B16A16Snorm = VK_FORMAT_R16G16B16A16_SNORM, + eR16G16B16A16Uscaled = VK_FORMAT_R16G16B16A16_USCALED, + eR16G16B16A16Sscaled = VK_FORMAT_R16G16B16A16_SSCALED, + eR16G16B16A16Uint = VK_FORMAT_R16G16B16A16_UINT, + eR16G16B16A16Sint = VK_FORMAT_R16G16B16A16_SINT, + eR16G16B16A16Sfloat = VK_FORMAT_R16G16B16A16_SFLOAT, + eR32Uint = VK_FORMAT_R32_UINT, + eR32Sint = VK_FORMAT_R32_SINT, + eR32Sfloat = VK_FORMAT_R32_SFLOAT, + eR32G32Uint = VK_FORMAT_R32G32_UINT, + eR32G32Sint = VK_FORMAT_R32G32_SINT, + eR32G32Sfloat = VK_FORMAT_R32G32_SFLOAT, + eR32G32B32Uint = VK_FORMAT_R32G32B32_UINT, + eR32G32B32Sint = VK_FORMAT_R32G32B32_SINT, + eR32G32B32Sfloat = VK_FORMAT_R32G32B32_SFLOAT, + eR32G32B32A32Uint = VK_FORMAT_R32G32B32A32_UINT, + eR32G32B32A32Sint = VK_FORMAT_R32G32B32A32_SINT, + eR32G32B32A32Sfloat = VK_FORMAT_R32G32B32A32_SFLOAT, + eR64Uint = VK_FORMAT_R64_UINT, + eR64Sint = VK_FORMAT_R64_SINT, + eR64Sfloat = VK_FORMAT_R64_SFLOAT, + eR64G64Uint = VK_FORMAT_R64G64_UINT, + eR64G64Sint = VK_FORMAT_R64G64_SINT, + eR64G64Sfloat = VK_FORMAT_R64G64_SFLOAT, + eR64G64B64Uint = VK_FORMAT_R64G64B64_UINT, + eR64G64B64Sint = VK_FORMAT_R64G64B64_SINT, + eR64G64B64Sfloat = VK_FORMAT_R64G64B64_SFLOAT, + eR64G64B64A64Uint = VK_FORMAT_R64G64B64A64_UINT, + eR64G64B64A64Sint = VK_FORMAT_R64G64B64A64_SINT, + eR64G64B64A64Sfloat = VK_FORMAT_R64G64B64A64_SFLOAT, + eB10G11R11UfloatPack32 = VK_FORMAT_B10G11R11_UFLOAT_PACK32, + eE5B9G9R9UfloatPack32 = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, + eD16Unorm = VK_FORMAT_D16_UNORM, + eX8D24UnormPack32 = VK_FORMAT_X8_D24_UNORM_PACK32, + eD32Sfloat = VK_FORMAT_D32_SFLOAT, + eS8Uint = VK_FORMAT_S8_UINT, + eD16UnormS8Uint = VK_FORMAT_D16_UNORM_S8_UINT, + eD24UnormS8Uint = VK_FORMAT_D24_UNORM_S8_UINT, + eD32SfloatS8Uint = VK_FORMAT_D32_SFLOAT_S8_UINT, + eBc1RgbUnormBlock = VK_FORMAT_BC1_RGB_UNORM_BLOCK, + eBc1RgbSrgbBlock = VK_FORMAT_BC1_RGB_SRGB_BLOCK, + eBc1RgbaUnormBlock = VK_FORMAT_BC1_RGBA_UNORM_BLOCK, + eBc1RgbaSrgbBlock = VK_FORMAT_BC1_RGBA_SRGB_BLOCK, + eBc2UnormBlock = VK_FORMAT_BC2_UNORM_BLOCK, + eBc2SrgbBlock = VK_FORMAT_BC2_SRGB_BLOCK, + eBc3UnormBlock = VK_FORMAT_BC3_UNORM_BLOCK, + eBc3SrgbBlock = VK_FORMAT_BC3_SRGB_BLOCK, + eBc4UnormBlock = VK_FORMAT_BC4_UNORM_BLOCK, + eBc4SnormBlock = VK_FORMAT_BC4_SNORM_BLOCK, + eBc5UnormBlock = VK_FORMAT_BC5_UNORM_BLOCK, + eBc5SnormBlock = VK_FORMAT_BC5_SNORM_BLOCK, + eBc6HUfloatBlock = VK_FORMAT_BC6H_UFLOAT_BLOCK, + eBc6HSfloatBlock = VK_FORMAT_BC6H_SFLOAT_BLOCK, + eBc7UnormBlock = VK_FORMAT_BC7_UNORM_BLOCK, + eBc7SrgbBlock = VK_FORMAT_BC7_SRGB_BLOCK, + eEtc2R8G8B8UnormBlock = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, + eEtc2R8G8B8SrgbBlock = VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, + eEtc2R8G8B8A1UnormBlock = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, + eEtc2R8G8B8A1SrgbBlock = VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, + eEtc2R8G8B8A8UnormBlock = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, + eEtc2R8G8B8A8SrgbBlock = VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, + eEacR11UnormBlock = VK_FORMAT_EAC_R11_UNORM_BLOCK, + eEacR11SnormBlock = VK_FORMAT_EAC_R11_SNORM_BLOCK, + eEacR11G11UnormBlock = VK_FORMAT_EAC_R11G11_UNORM_BLOCK, + eEacR11G11SnormBlock = VK_FORMAT_EAC_R11G11_SNORM_BLOCK, + eAstc4x4UnormBlock = VK_FORMAT_ASTC_4x4_UNORM_BLOCK, + eAstc4x4SrgbBlock = VK_FORMAT_ASTC_4x4_SRGB_BLOCK, + eAstc5x4UnormBlock = VK_FORMAT_ASTC_5x4_UNORM_BLOCK, + eAstc5x4SrgbBlock = VK_FORMAT_ASTC_5x4_SRGB_BLOCK, + eAstc5x5UnormBlock = VK_FORMAT_ASTC_5x5_UNORM_BLOCK, + eAstc5x5SrgbBlock = VK_FORMAT_ASTC_5x5_SRGB_BLOCK, + eAstc6x5UnormBlock = VK_FORMAT_ASTC_6x5_UNORM_BLOCK, + eAstc6x5SrgbBlock = VK_FORMAT_ASTC_6x5_SRGB_BLOCK, + eAstc6x6UnormBlock = VK_FORMAT_ASTC_6x6_UNORM_BLOCK, + eAstc6x6SrgbBlock = VK_FORMAT_ASTC_6x6_SRGB_BLOCK, + eAstc8x5UnormBlock = VK_FORMAT_ASTC_8x5_UNORM_BLOCK, + eAstc8x5SrgbBlock = VK_FORMAT_ASTC_8x5_SRGB_BLOCK, + eAstc8x6UnormBlock = VK_FORMAT_ASTC_8x6_UNORM_BLOCK, + eAstc8x6SrgbBlock = VK_FORMAT_ASTC_8x6_SRGB_BLOCK, + eAstc8x8UnormBlock = VK_FORMAT_ASTC_8x8_UNORM_BLOCK, + eAstc8x8SrgbBlock = VK_FORMAT_ASTC_8x8_SRGB_BLOCK, + eAstc10x5UnormBlock = VK_FORMAT_ASTC_10x5_UNORM_BLOCK, + eAstc10x5SrgbBlock = VK_FORMAT_ASTC_10x5_SRGB_BLOCK, + eAstc10x6UnormBlock = VK_FORMAT_ASTC_10x6_UNORM_BLOCK, + eAstc10x6SrgbBlock = VK_FORMAT_ASTC_10x6_SRGB_BLOCK, + eAstc10x8UnormBlock = VK_FORMAT_ASTC_10x8_UNORM_BLOCK, + eAstc10x8SrgbBlock = VK_FORMAT_ASTC_10x8_SRGB_BLOCK, + eAstc10x10UnormBlock = VK_FORMAT_ASTC_10x10_UNORM_BLOCK, + eAstc10x10SrgbBlock = VK_FORMAT_ASTC_10x10_SRGB_BLOCK, + eAstc12x10UnormBlock = VK_FORMAT_ASTC_12x10_UNORM_BLOCK, + eAstc12x10SrgbBlock = VK_FORMAT_ASTC_12x10_SRGB_BLOCK, + eAstc12x12UnormBlock = VK_FORMAT_ASTC_12x12_UNORM_BLOCK, + eAstc12x12SrgbBlock = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + eG8B8G8R8422Unorm = VK_FORMAT_G8B8G8R8_422_UNORM, + eB8G8R8G8422Unorm = VK_FORMAT_B8G8R8G8_422_UNORM, + eG8B8R83Plane420Unorm = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + eG8B8R82Plane420Unorm = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + eG8B8R83Plane422Unorm = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + eG8B8R82Plane422Unorm = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + eG8B8R83Plane444Unorm = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + eR10X6UnormPack16 = VK_FORMAT_R10X6_UNORM_PACK16, + eR10X6G10X6Unorm2Pack16 = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + eR10X6G10X6B10X6A10X6Unorm4Pack16 = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + eG10X6B10X6G10X6R10X6422Unorm4Pack16 = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + eB10X6G10X6R10X6G10X6422Unorm4Pack16 = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + eG10X6B10X6R10X63Plane420Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X62Plane420Unorm3Pack16 = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + eG10X6B10X6R10X63Plane422Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X62Plane422Unorm3Pack16 = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + eG10X6B10X6R10X63Plane444Unorm3Pack16 = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + eR12X4UnormPack16 = VK_FORMAT_R12X4_UNORM_PACK16, + eR12X4G12X4Unorm2Pack16 = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + eR12X4G12X4B12X4A12X4Unorm4Pack16 = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + eG12X4B12X4G12X4R12X4422Unorm4Pack16 = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + eB12X4G12X4R12X4G12X4422Unorm4Pack16 = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + eG12X4B12X4R12X43Plane420Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X42Plane420Unorm3Pack16 = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + eG12X4B12X4R12X43Plane422Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X42Plane422Unorm3Pack16 = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + eG12X4B12X4R12X43Plane444Unorm3Pack16 = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + eG16B16G16R16422Unorm = VK_FORMAT_G16B16G16R16_422_UNORM, + eB16G16R16G16422Unorm = VK_FORMAT_B16G16R16G16_422_UNORM, + eG16B16R163Plane420Unorm = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + eG16B16R162Plane420Unorm = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + eG16B16R163Plane422Unorm = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + eG16B16R162Plane422Unorm = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + eG16B16R163Plane444Unorm = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + ePvrtc12BppUnormBlockIMG = VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG, + ePvrtc14BppUnormBlockIMG = VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG, + ePvrtc22BppUnormBlockIMG = VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG, + ePvrtc24BppUnormBlockIMG = VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG, + ePvrtc12BppSrgbBlockIMG = VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG, + ePvrtc14BppSrgbBlockIMG = VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG, + ePvrtc22BppSrgbBlockIMG = VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG, + ePvrtc24BppSrgbBlockIMG = VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG, + eAstc4x4SfloatBlockEXT = VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT, + eAstc5x4SfloatBlockEXT = VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT, + eAstc5x5SfloatBlockEXT = VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT, + eAstc6x5SfloatBlockEXT = VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT, + eAstc6x6SfloatBlockEXT = VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT, + eAstc8x5SfloatBlockEXT = VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT, + eAstc8x6SfloatBlockEXT = VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT, + eAstc8x8SfloatBlockEXT = VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT, + eAstc10x5SfloatBlockEXT = VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT, + eAstc10x6SfloatBlockEXT = VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT, + eAstc10x8SfloatBlockEXT = VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT, + eAstc10x10SfloatBlockEXT = VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT, + eAstc12x10SfloatBlockEXT = VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT, + eAstc12x12SfloatBlockEXT = VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT, + eA4R4G4B4UnormPack16EXT = VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT, + eA4B4G4R4UnormPack16EXT = VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT, + eB10X6G10X6R10X6G10X6422Unorm4Pack16KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR, + eB12X4G12X4R12X4G12X4422Unorm4Pack16KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR, + eB16G16R16G16422UnormKHR = VK_FORMAT_B16G16R16G16_422_UNORM_KHR, + eB8G8R8G8422UnormKHR = VK_FORMAT_B8G8R8G8_422_UNORM_KHR, + eG10X6B10X6G10X6R10X6422Unorm4Pack16KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR, + eG10X6B10X6R10X62Plane420Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR, + eG10X6B10X6R10X62Plane422Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR, + eG10X6B10X6R10X63Plane420Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR, + eG10X6B10X6R10X63Plane422Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR, + eG10X6B10X6R10X63Plane444Unorm3Pack16KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR, + eG12X4B12X4G12X4R12X4422Unorm4Pack16KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR, + eG12X4B12X4R12X42Plane420Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR, + eG12X4B12X4R12X42Plane422Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR, + eG12X4B12X4R12X43Plane420Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR, + eG12X4B12X4R12X43Plane422Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR, + eG12X4B12X4R12X43Plane444Unorm3Pack16KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR, + eG16B16G16R16422UnormKHR = VK_FORMAT_G16B16G16R16_422_UNORM_KHR, + eG16B16R162Plane420UnormKHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR, + eG16B16R162Plane422UnormKHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR, + eG16B16R163Plane420UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR, + eG16B16R163Plane422UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR, + eG16B16R163Plane444UnormKHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR, + eG8B8G8R8422UnormKHR = VK_FORMAT_G8B8G8R8_422_UNORM_KHR, + eG8B8R82Plane420UnormKHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR, + eG8B8R82Plane422UnormKHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR, + eG8B8R83Plane420UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR, + eG8B8R83Plane422UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, + eG8B8R83Plane444UnormKHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR, + eR10X6G10X6B10X6A10X6Unorm4Pack16KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR, + eR10X6G10X6Unorm2Pack16KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR, + eR10X6UnormPack16KHR = VK_FORMAT_R10X6_UNORM_PACK16_KHR, + eR12X4G12X4B12X4A12X4Unorm4Pack16KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR, + eR12X4G12X4Unorm2Pack16KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR, + eR12X4UnormPack16KHR = VK_FORMAT_R12X4_UNORM_PACK16_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( Format value ) + { + switch ( value ) + { + case Format::eUndefined : return "Undefined"; + case Format::eR4G4UnormPack8 : return "R4G4UnormPack8"; + case Format::eR4G4B4A4UnormPack16 : return "R4G4B4A4UnormPack16"; + case Format::eB4G4R4A4UnormPack16 : return "B4G4R4A4UnormPack16"; + case Format::eR5G6B5UnormPack16 : return "R5G6B5UnormPack16"; + case Format::eB5G6R5UnormPack16 : return "B5G6R5UnormPack16"; + case Format::eR5G5B5A1UnormPack16 : return "R5G5B5A1UnormPack16"; + case Format::eB5G5R5A1UnormPack16 : return "B5G5R5A1UnormPack16"; + case Format::eA1R5G5B5UnormPack16 : return "A1R5G5B5UnormPack16"; + case Format::eR8Unorm : return "R8Unorm"; + case Format::eR8Snorm : return "R8Snorm"; + case Format::eR8Uscaled : return "R8Uscaled"; + case Format::eR8Sscaled : return "R8Sscaled"; + case Format::eR8Uint : return "R8Uint"; + case Format::eR8Sint : return "R8Sint"; + case Format::eR8Srgb : return "R8Srgb"; + case Format::eR8G8Unorm : return "R8G8Unorm"; + case Format::eR8G8Snorm : return "R8G8Snorm"; + case Format::eR8G8Uscaled : return "R8G8Uscaled"; + case Format::eR8G8Sscaled : return "R8G8Sscaled"; + case Format::eR8G8Uint : return "R8G8Uint"; + case Format::eR8G8Sint : return "R8G8Sint"; + case Format::eR8G8Srgb : return "R8G8Srgb"; + case Format::eR8G8B8Unorm : return "R8G8B8Unorm"; + case Format::eR8G8B8Snorm : return "R8G8B8Snorm"; + case Format::eR8G8B8Uscaled : return "R8G8B8Uscaled"; + case Format::eR8G8B8Sscaled : return "R8G8B8Sscaled"; + case Format::eR8G8B8Uint : return "R8G8B8Uint"; + case Format::eR8G8B8Sint : return "R8G8B8Sint"; + case Format::eR8G8B8Srgb : return "R8G8B8Srgb"; + case Format::eB8G8R8Unorm : return "B8G8R8Unorm"; + case Format::eB8G8R8Snorm : return "B8G8R8Snorm"; + case Format::eB8G8R8Uscaled : return "B8G8R8Uscaled"; + case Format::eB8G8R8Sscaled : return "B8G8R8Sscaled"; + case Format::eB8G8R8Uint : return "B8G8R8Uint"; + case Format::eB8G8R8Sint : return "B8G8R8Sint"; + case Format::eB8G8R8Srgb : return "B8G8R8Srgb"; + case Format::eR8G8B8A8Unorm : return "R8G8B8A8Unorm"; + case Format::eR8G8B8A8Snorm : return "R8G8B8A8Snorm"; + case Format::eR8G8B8A8Uscaled : return "R8G8B8A8Uscaled"; + case Format::eR8G8B8A8Sscaled : return "R8G8B8A8Sscaled"; + case Format::eR8G8B8A8Uint : return "R8G8B8A8Uint"; + case Format::eR8G8B8A8Sint : return "R8G8B8A8Sint"; + case Format::eR8G8B8A8Srgb : return "R8G8B8A8Srgb"; + case Format::eB8G8R8A8Unorm : return "B8G8R8A8Unorm"; + case Format::eB8G8R8A8Snorm : return "B8G8R8A8Snorm"; + case Format::eB8G8R8A8Uscaled : return "B8G8R8A8Uscaled"; + case Format::eB8G8R8A8Sscaled : return "B8G8R8A8Sscaled"; + case Format::eB8G8R8A8Uint : return "B8G8R8A8Uint"; + case Format::eB8G8R8A8Sint : return "B8G8R8A8Sint"; + case Format::eB8G8R8A8Srgb : return "B8G8R8A8Srgb"; + case Format::eA8B8G8R8UnormPack32 : return "A8B8G8R8UnormPack32"; + case Format::eA8B8G8R8SnormPack32 : return "A8B8G8R8SnormPack32"; + case Format::eA8B8G8R8UscaledPack32 : return "A8B8G8R8UscaledPack32"; + case Format::eA8B8G8R8SscaledPack32 : return "A8B8G8R8SscaledPack32"; + case Format::eA8B8G8R8UintPack32 : return "A8B8G8R8UintPack32"; + case Format::eA8B8G8R8SintPack32 : return "A8B8G8R8SintPack32"; + case Format::eA8B8G8R8SrgbPack32 : return "A8B8G8R8SrgbPack32"; + case Format::eA2R10G10B10UnormPack32 : return "A2R10G10B10UnormPack32"; + case Format::eA2R10G10B10SnormPack32 : return "A2R10G10B10SnormPack32"; + case Format::eA2R10G10B10UscaledPack32 : return "A2R10G10B10UscaledPack32"; + case Format::eA2R10G10B10SscaledPack32 : return "A2R10G10B10SscaledPack32"; + case Format::eA2R10G10B10UintPack32 : return "A2R10G10B10UintPack32"; + case Format::eA2R10G10B10SintPack32 : return "A2R10G10B10SintPack32"; + case Format::eA2B10G10R10UnormPack32 : return "A2B10G10R10UnormPack32"; + case Format::eA2B10G10R10SnormPack32 : return "A2B10G10R10SnormPack32"; + case Format::eA2B10G10R10UscaledPack32 : return "A2B10G10R10UscaledPack32"; + case Format::eA2B10G10R10SscaledPack32 : return "A2B10G10R10SscaledPack32"; + case Format::eA2B10G10R10UintPack32 : return "A2B10G10R10UintPack32"; + case Format::eA2B10G10R10SintPack32 : return "A2B10G10R10SintPack32"; + case Format::eR16Unorm : return "R16Unorm"; + case Format::eR16Snorm : return "R16Snorm"; + case Format::eR16Uscaled : return "R16Uscaled"; + case Format::eR16Sscaled : return "R16Sscaled"; + case Format::eR16Uint : return "R16Uint"; + case Format::eR16Sint : return "R16Sint"; + case Format::eR16Sfloat : return "R16Sfloat"; + case Format::eR16G16Unorm : return "R16G16Unorm"; + case Format::eR16G16Snorm : return "R16G16Snorm"; + case Format::eR16G16Uscaled : return "R16G16Uscaled"; + case Format::eR16G16Sscaled : return "R16G16Sscaled"; + case Format::eR16G16Uint : return "R16G16Uint"; + case Format::eR16G16Sint : return "R16G16Sint"; + case Format::eR16G16Sfloat : return "R16G16Sfloat"; + case Format::eR16G16B16Unorm : return "R16G16B16Unorm"; + case Format::eR16G16B16Snorm : return "R16G16B16Snorm"; + case Format::eR16G16B16Uscaled : return "R16G16B16Uscaled"; + case Format::eR16G16B16Sscaled : return "R16G16B16Sscaled"; + case Format::eR16G16B16Uint : return "R16G16B16Uint"; + case Format::eR16G16B16Sint : return "R16G16B16Sint"; + case Format::eR16G16B16Sfloat : return "R16G16B16Sfloat"; + case Format::eR16G16B16A16Unorm : return "R16G16B16A16Unorm"; + case Format::eR16G16B16A16Snorm : return "R16G16B16A16Snorm"; + case Format::eR16G16B16A16Uscaled : return "R16G16B16A16Uscaled"; + case Format::eR16G16B16A16Sscaled : return "R16G16B16A16Sscaled"; + case Format::eR16G16B16A16Uint : return "R16G16B16A16Uint"; + case Format::eR16G16B16A16Sint : return "R16G16B16A16Sint"; + case Format::eR16G16B16A16Sfloat : return "R16G16B16A16Sfloat"; + case Format::eR32Uint : return "R32Uint"; + case Format::eR32Sint : return "R32Sint"; + case Format::eR32Sfloat : return "R32Sfloat"; + case Format::eR32G32Uint : return "R32G32Uint"; + case Format::eR32G32Sint : return "R32G32Sint"; + case Format::eR32G32Sfloat : return "R32G32Sfloat"; + case Format::eR32G32B32Uint : return "R32G32B32Uint"; + case Format::eR32G32B32Sint : return "R32G32B32Sint"; + case Format::eR32G32B32Sfloat : return "R32G32B32Sfloat"; + case Format::eR32G32B32A32Uint : return "R32G32B32A32Uint"; + case Format::eR32G32B32A32Sint : return "R32G32B32A32Sint"; + case Format::eR32G32B32A32Sfloat : return "R32G32B32A32Sfloat"; + case Format::eR64Uint : return "R64Uint"; + case Format::eR64Sint : return "R64Sint"; + case Format::eR64Sfloat : return "R64Sfloat"; + case Format::eR64G64Uint : return "R64G64Uint"; + case Format::eR64G64Sint : return "R64G64Sint"; + case Format::eR64G64Sfloat : return "R64G64Sfloat"; + case Format::eR64G64B64Uint : return "R64G64B64Uint"; + case Format::eR64G64B64Sint : return "R64G64B64Sint"; + case Format::eR64G64B64Sfloat : return "R64G64B64Sfloat"; + case Format::eR64G64B64A64Uint : return "R64G64B64A64Uint"; + case Format::eR64G64B64A64Sint : return "R64G64B64A64Sint"; + case Format::eR64G64B64A64Sfloat : return "R64G64B64A64Sfloat"; + case Format::eB10G11R11UfloatPack32 : return "B10G11R11UfloatPack32"; + case Format::eE5B9G9R9UfloatPack32 : return "E5B9G9R9UfloatPack32"; + case Format::eD16Unorm : return "D16Unorm"; + case Format::eX8D24UnormPack32 : return "X8D24UnormPack32"; + case Format::eD32Sfloat : return "D32Sfloat"; + case Format::eS8Uint : return "S8Uint"; + case Format::eD16UnormS8Uint : return "D16UnormS8Uint"; + case Format::eD24UnormS8Uint : return "D24UnormS8Uint"; + case Format::eD32SfloatS8Uint : return "D32SfloatS8Uint"; + case Format::eBc1RgbUnormBlock : return "Bc1RgbUnormBlock"; + case Format::eBc1RgbSrgbBlock : return "Bc1RgbSrgbBlock"; + case Format::eBc1RgbaUnormBlock : return "Bc1RgbaUnormBlock"; + case Format::eBc1RgbaSrgbBlock : return "Bc1RgbaSrgbBlock"; + case Format::eBc2UnormBlock : return "Bc2UnormBlock"; + case Format::eBc2SrgbBlock : return "Bc2SrgbBlock"; + case Format::eBc3UnormBlock : return "Bc3UnormBlock"; + case Format::eBc3SrgbBlock : return "Bc3SrgbBlock"; + case Format::eBc4UnormBlock : return "Bc4UnormBlock"; + case Format::eBc4SnormBlock : return "Bc4SnormBlock"; + case Format::eBc5UnormBlock : return "Bc5UnormBlock"; + case Format::eBc5SnormBlock : return "Bc5SnormBlock"; + case Format::eBc6HUfloatBlock : return "Bc6HUfloatBlock"; + case Format::eBc6HSfloatBlock : return "Bc6HSfloatBlock"; + case Format::eBc7UnormBlock : return "Bc7UnormBlock"; + case Format::eBc7SrgbBlock : return "Bc7SrgbBlock"; + case Format::eEtc2R8G8B8UnormBlock : return "Etc2R8G8B8UnormBlock"; + case Format::eEtc2R8G8B8SrgbBlock : return "Etc2R8G8B8SrgbBlock"; + case Format::eEtc2R8G8B8A1UnormBlock : return "Etc2R8G8B8A1UnormBlock"; + case Format::eEtc2R8G8B8A1SrgbBlock : return "Etc2R8G8B8A1SrgbBlock"; + case Format::eEtc2R8G8B8A8UnormBlock : return "Etc2R8G8B8A8UnormBlock"; + case Format::eEtc2R8G8B8A8SrgbBlock : return "Etc2R8G8B8A8SrgbBlock"; + case Format::eEacR11UnormBlock : return "EacR11UnormBlock"; + case Format::eEacR11SnormBlock : return "EacR11SnormBlock"; + case Format::eEacR11G11UnormBlock : return "EacR11G11UnormBlock"; + case Format::eEacR11G11SnormBlock : return "EacR11G11SnormBlock"; + case Format::eAstc4x4UnormBlock : return "Astc4x4UnormBlock"; + case Format::eAstc4x4SrgbBlock : return "Astc4x4SrgbBlock"; + case Format::eAstc5x4UnormBlock : return "Astc5x4UnormBlock"; + case Format::eAstc5x4SrgbBlock : return "Astc5x4SrgbBlock"; + case Format::eAstc5x5UnormBlock : return "Astc5x5UnormBlock"; + case Format::eAstc5x5SrgbBlock : return "Astc5x5SrgbBlock"; + case Format::eAstc6x5UnormBlock : return "Astc6x5UnormBlock"; + case Format::eAstc6x5SrgbBlock : return "Astc6x5SrgbBlock"; + case Format::eAstc6x6UnormBlock : return "Astc6x6UnormBlock"; + case Format::eAstc6x6SrgbBlock : return "Astc6x6SrgbBlock"; + case Format::eAstc8x5UnormBlock : return "Astc8x5UnormBlock"; + case Format::eAstc8x5SrgbBlock : return "Astc8x5SrgbBlock"; + case Format::eAstc8x6UnormBlock : return "Astc8x6UnormBlock"; + case Format::eAstc8x6SrgbBlock : return "Astc8x6SrgbBlock"; + case Format::eAstc8x8UnormBlock : return "Astc8x8UnormBlock"; + case Format::eAstc8x8SrgbBlock : return "Astc8x8SrgbBlock"; + case Format::eAstc10x5UnormBlock : return "Astc10x5UnormBlock"; + case Format::eAstc10x5SrgbBlock : return "Astc10x5SrgbBlock"; + case Format::eAstc10x6UnormBlock : return "Astc10x6UnormBlock"; + case Format::eAstc10x6SrgbBlock : return "Astc10x6SrgbBlock"; + case Format::eAstc10x8UnormBlock : return "Astc10x8UnormBlock"; + case Format::eAstc10x8SrgbBlock : return "Astc10x8SrgbBlock"; + case Format::eAstc10x10UnormBlock : return "Astc10x10UnormBlock"; + case Format::eAstc10x10SrgbBlock : return "Astc10x10SrgbBlock"; + case Format::eAstc12x10UnormBlock : return "Astc12x10UnormBlock"; + case Format::eAstc12x10SrgbBlock : return "Astc12x10SrgbBlock"; + case Format::eAstc12x12UnormBlock : return "Astc12x12UnormBlock"; + case Format::eAstc12x12SrgbBlock : return "Astc12x12SrgbBlock"; + case Format::eG8B8G8R8422Unorm : return "G8B8G8R8422Unorm"; + case Format::eB8G8R8G8422Unorm : return "B8G8R8G8422Unorm"; + case Format::eG8B8R83Plane420Unorm : return "G8B8R83Plane420Unorm"; + case Format::eG8B8R82Plane420Unorm : return "G8B8R82Plane420Unorm"; + case Format::eG8B8R83Plane422Unorm : return "G8B8R83Plane422Unorm"; + case Format::eG8B8R82Plane422Unorm : return "G8B8R82Plane422Unorm"; + case Format::eG8B8R83Plane444Unorm : return "G8B8R83Plane444Unorm"; + case Format::eR10X6UnormPack16 : return "R10X6UnormPack16"; + case Format::eR10X6G10X6Unorm2Pack16 : return "R10X6G10X6Unorm2Pack16"; + case Format::eR10X6G10X6B10X6A10X6Unorm4Pack16 : return "R10X6G10X6B10X6A10X6Unorm4Pack16"; + case Format::eG10X6B10X6G10X6R10X6422Unorm4Pack16 : return "G10X6B10X6G10X6R10X6422Unorm4Pack16"; + case Format::eB10X6G10X6R10X6G10X6422Unorm4Pack16 : return "B10X6G10X6R10X6G10X6422Unorm4Pack16"; + case Format::eG10X6B10X6R10X63Plane420Unorm3Pack16 : return "G10X6B10X6R10X63Plane420Unorm3Pack16"; + case Format::eG10X6B10X6R10X62Plane420Unorm3Pack16 : return "G10X6B10X6R10X62Plane420Unorm3Pack16"; + case Format::eG10X6B10X6R10X63Plane422Unorm3Pack16 : return "G10X6B10X6R10X63Plane422Unorm3Pack16"; + case Format::eG10X6B10X6R10X62Plane422Unorm3Pack16 : return "G10X6B10X6R10X62Plane422Unorm3Pack16"; + case Format::eG10X6B10X6R10X63Plane444Unorm3Pack16 : return "G10X6B10X6R10X63Plane444Unorm3Pack16"; + case Format::eR12X4UnormPack16 : return "R12X4UnormPack16"; + case Format::eR12X4G12X4Unorm2Pack16 : return "R12X4G12X4Unorm2Pack16"; + case Format::eR12X4G12X4B12X4A12X4Unorm4Pack16 : return "R12X4G12X4B12X4A12X4Unorm4Pack16"; + case Format::eG12X4B12X4G12X4R12X4422Unorm4Pack16 : return "G12X4B12X4G12X4R12X4422Unorm4Pack16"; + case Format::eB12X4G12X4R12X4G12X4422Unorm4Pack16 : return "B12X4G12X4R12X4G12X4422Unorm4Pack16"; + case Format::eG12X4B12X4R12X43Plane420Unorm3Pack16 : return "G12X4B12X4R12X43Plane420Unorm3Pack16"; + case Format::eG12X4B12X4R12X42Plane420Unorm3Pack16 : return "G12X4B12X4R12X42Plane420Unorm3Pack16"; + case Format::eG12X4B12X4R12X43Plane422Unorm3Pack16 : return "G12X4B12X4R12X43Plane422Unorm3Pack16"; + case Format::eG12X4B12X4R12X42Plane422Unorm3Pack16 : return "G12X4B12X4R12X42Plane422Unorm3Pack16"; + case Format::eG12X4B12X4R12X43Plane444Unorm3Pack16 : return "G12X4B12X4R12X43Plane444Unorm3Pack16"; + case Format::eG16B16G16R16422Unorm : return "G16B16G16R16422Unorm"; + case Format::eB16G16R16G16422Unorm : return "B16G16R16G16422Unorm"; + case Format::eG16B16R163Plane420Unorm : return "G16B16R163Plane420Unorm"; + case Format::eG16B16R162Plane420Unorm : return "G16B16R162Plane420Unorm"; + case Format::eG16B16R163Plane422Unorm : return "G16B16R163Plane422Unorm"; + case Format::eG16B16R162Plane422Unorm : return "G16B16R162Plane422Unorm"; + case Format::eG16B16R163Plane444Unorm : return "G16B16R163Plane444Unorm"; + case Format::ePvrtc12BppUnormBlockIMG : return "Pvrtc12BppUnormBlockIMG"; + case Format::ePvrtc14BppUnormBlockIMG : return "Pvrtc14BppUnormBlockIMG"; + case Format::ePvrtc22BppUnormBlockIMG : return "Pvrtc22BppUnormBlockIMG"; + case Format::ePvrtc24BppUnormBlockIMG : return "Pvrtc24BppUnormBlockIMG"; + case Format::ePvrtc12BppSrgbBlockIMG : return "Pvrtc12BppSrgbBlockIMG"; + case Format::ePvrtc14BppSrgbBlockIMG : return "Pvrtc14BppSrgbBlockIMG"; + case Format::ePvrtc22BppSrgbBlockIMG : return "Pvrtc22BppSrgbBlockIMG"; + case Format::ePvrtc24BppSrgbBlockIMG : return "Pvrtc24BppSrgbBlockIMG"; + case Format::eAstc4x4SfloatBlockEXT : return "Astc4x4SfloatBlockEXT"; + case Format::eAstc5x4SfloatBlockEXT : return "Astc5x4SfloatBlockEXT"; + case Format::eAstc5x5SfloatBlockEXT : return "Astc5x5SfloatBlockEXT"; + case Format::eAstc6x5SfloatBlockEXT : return "Astc6x5SfloatBlockEXT"; + case Format::eAstc6x6SfloatBlockEXT : return "Astc6x6SfloatBlockEXT"; + case Format::eAstc8x5SfloatBlockEXT : return "Astc8x5SfloatBlockEXT"; + case Format::eAstc8x6SfloatBlockEXT : return "Astc8x6SfloatBlockEXT"; + case Format::eAstc8x8SfloatBlockEXT : return "Astc8x8SfloatBlockEXT"; + case Format::eAstc10x5SfloatBlockEXT : return "Astc10x5SfloatBlockEXT"; + case Format::eAstc10x6SfloatBlockEXT : return "Astc10x6SfloatBlockEXT"; + case Format::eAstc10x8SfloatBlockEXT : return "Astc10x8SfloatBlockEXT"; + case Format::eAstc10x10SfloatBlockEXT : return "Astc10x10SfloatBlockEXT"; + case Format::eAstc12x10SfloatBlockEXT : return "Astc12x10SfloatBlockEXT"; + case Format::eAstc12x12SfloatBlockEXT : return "Astc12x12SfloatBlockEXT"; + case Format::eA4R4G4B4UnormPack16EXT : return "A4R4G4B4UnormPack16EXT"; + case Format::eA4B4G4R4UnormPack16EXT : return "A4B4G4R4UnormPack16EXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FormatFeatureFlagBits : VkFormatFeatureFlags + { + eSampledImage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, + eStorageImage = VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, + eStorageImageAtomic = VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT, + eUniformTexelBuffer = VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT, + eStorageTexelBuffer = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT, + eStorageTexelBufferAtomic = VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT, + eVertexBuffer = VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT, + eColorAttachment = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, + eColorAttachmentBlend = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT, + eDepthStencilAttachment = VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, + eBlitSrc = VK_FORMAT_FEATURE_BLIT_SRC_BIT, + eBlitDst = VK_FORMAT_FEATURE_BLIT_DST_BIT, + eSampledImageFilterLinear = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, + eTransferSrc = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + eTransferDst = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + eMidpointChromaSamples = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + eSampledImageYcbcrConversionLinearFilter = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + eSampledImageYcbcrConversionSeparateReconstructionFilter = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicit = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + eSampledImageYcbcrConversionChromaReconstructionExplicitForceable = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + eDisjoint = VK_FORMAT_FEATURE_DISJOINT_BIT, + eCositedChromaSamples = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + eSampledImageFilterMinmax = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + eSampledImageFilterCubicIMG = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + eAccelerationStructureVertexBufferKHR = VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR, + eFragmentDensityMapEXT = VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT, + eFragmentShadingRateAttachmentKHR = VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + eCositedChromaSamplesKHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR, + eDisjointKHR = VK_FORMAT_FEATURE_DISJOINT_BIT_KHR, + eMidpointChromaSamplesKHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR, + eSampledImageFilterCubicEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT, + eSampledImageFilterMinmaxEXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT, + eSampledImageYcbcrConversionChromaReconstructionExplicitKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR, + eSampledImageYcbcrConversionChromaReconstructionExplicitForceableKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR, + eSampledImageYcbcrConversionLinearFilterKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR, + eSampledImageYcbcrConversionSeparateReconstructionFilterKHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR, + eTransferDstKHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR, + eTransferSrcKHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( FormatFeatureFlagBits value ) + { + switch ( value ) + { + case FormatFeatureFlagBits::eSampledImage : return "SampledImage"; + case FormatFeatureFlagBits::eStorageImage : return "StorageImage"; + case FormatFeatureFlagBits::eStorageImageAtomic : return "StorageImageAtomic"; + case FormatFeatureFlagBits::eUniformTexelBuffer : return "UniformTexelBuffer"; + case FormatFeatureFlagBits::eStorageTexelBuffer : return "StorageTexelBuffer"; + case FormatFeatureFlagBits::eStorageTexelBufferAtomic : return "StorageTexelBufferAtomic"; + case FormatFeatureFlagBits::eVertexBuffer : return "VertexBuffer"; + case FormatFeatureFlagBits::eColorAttachment : return "ColorAttachment"; + case FormatFeatureFlagBits::eColorAttachmentBlend : return "ColorAttachmentBlend"; + case FormatFeatureFlagBits::eDepthStencilAttachment : return "DepthStencilAttachment"; + case FormatFeatureFlagBits::eBlitSrc : return "BlitSrc"; + case FormatFeatureFlagBits::eBlitDst : return "BlitDst"; + case FormatFeatureFlagBits::eSampledImageFilterLinear : return "SampledImageFilterLinear"; + case FormatFeatureFlagBits::eTransferSrc : return "TransferSrc"; + case FormatFeatureFlagBits::eTransferDst : return "TransferDst"; + case FormatFeatureFlagBits::eMidpointChromaSamples : return "MidpointChromaSamples"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter : return "SampledImageYcbcrConversionLinearFilter"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter : return "SampledImageYcbcrConversionSeparateReconstructionFilter"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit : return "SampledImageYcbcrConversionChromaReconstructionExplicit"; + case FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable : return "SampledImageYcbcrConversionChromaReconstructionExplicitForceable"; + case FormatFeatureFlagBits::eDisjoint : return "Disjoint"; + case FormatFeatureFlagBits::eCositedChromaSamples : return "CositedChromaSamples"; + case FormatFeatureFlagBits::eSampledImageFilterMinmax : return "SampledImageFilterMinmax"; + case FormatFeatureFlagBits::eSampledImageFilterCubicIMG : return "SampledImageFilterCubicIMG"; + case FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR : return "AccelerationStructureVertexBufferKHR"; + case FormatFeatureFlagBits::eFragmentDensityMapEXT : return "FragmentDensityMapEXT"; + case FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR : return "FragmentShadingRateAttachmentKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FragmentShadingRateCombinerOpKHR + { + eKeep = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR, + eReplace = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR, + eMin = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR, + eMax = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR, + eMul = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateCombinerOpKHR value ) + { + switch ( value ) + { + case FragmentShadingRateCombinerOpKHR::eKeep : return "Keep"; + case FragmentShadingRateCombinerOpKHR::eReplace : return "Replace"; + case FragmentShadingRateCombinerOpKHR::eMin : return "Min"; + case FragmentShadingRateCombinerOpKHR::eMax : return "Max"; + case FragmentShadingRateCombinerOpKHR::eMul : return "Mul"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FragmentShadingRateNV + { + e1InvocationPerPixel = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV, + e1InvocationPer1X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV, + e1InvocationPer2X1Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV, + e1InvocationPer2X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV, + e1InvocationPer2X4Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV, + e1InvocationPer4X2Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV, + e1InvocationPer4X4Pixels = VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV, + e2InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV, + e4InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV, + e8InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV, + e16InvocationsPerPixel = VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV, + eNoInvocations = VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateNV value ) + { + switch ( value ) + { + case FragmentShadingRateNV::e1InvocationPerPixel : return "1InvocationPerPixel"; + case FragmentShadingRateNV::e1InvocationPer1X2Pixels : return "1InvocationPer1X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X1Pixels : return "1InvocationPer2X1Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X2Pixels : return "1InvocationPer2X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer2X4Pixels : return "1InvocationPer2X4Pixels"; + case FragmentShadingRateNV::e1InvocationPer4X2Pixels : return "1InvocationPer4X2Pixels"; + case FragmentShadingRateNV::e1InvocationPer4X4Pixels : return "1InvocationPer4X4Pixels"; + case FragmentShadingRateNV::e2InvocationsPerPixel : return "2InvocationsPerPixel"; + case FragmentShadingRateNV::e4InvocationsPerPixel : return "4InvocationsPerPixel"; + case FragmentShadingRateNV::e8InvocationsPerPixel : return "8InvocationsPerPixel"; + case FragmentShadingRateNV::e16InvocationsPerPixel : return "16InvocationsPerPixel"; + case FragmentShadingRateNV::eNoInvocations : return "NoInvocations"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FragmentShadingRateTypeNV + { + eFragmentSize = VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV, + eEnums = VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( FragmentShadingRateTypeNV value ) + { + switch ( value ) + { + case FragmentShadingRateTypeNV::eFragmentSize : return "FragmentSize"; + case FragmentShadingRateTypeNV::eEnums : return "Enums"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FramebufferCreateFlagBits : VkFramebufferCreateFlags + { + eImageless = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + eImagelessKHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( FramebufferCreateFlagBits value ) + { + switch ( value ) + { + case FramebufferCreateFlagBits::eImageless : return "Imageless"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class FrontFace + { + eCounterClockwise = VK_FRONT_FACE_COUNTER_CLOCKWISE, + eClockwise = VK_FRONT_FACE_CLOCKWISE + }; + + VULKAN_HPP_INLINE std::string to_string( FrontFace value ) + { + switch ( value ) + { + case FrontFace::eCounterClockwise : return "CounterClockwise"; + case FrontFace::eClockwise : return "Clockwise"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + +#ifdef VK_USE_PLATFORM_WIN32_KHR + enum class FullScreenExclusiveEXT + { + eDefault = VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT, + eAllowed = VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT, + eDisallowed = VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT, + eApplicationControlled = VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( FullScreenExclusiveEXT value ) + { + switch ( value ) + { + case FullScreenExclusiveEXT::eDefault : return "Default"; + case FullScreenExclusiveEXT::eAllowed : return "Allowed"; + case FullScreenExclusiveEXT::eDisallowed : return "Disallowed"; + case FullScreenExclusiveEXT::eApplicationControlled : return "ApplicationControlled"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + enum class GeometryFlagBitsKHR : VkGeometryFlagsKHR + { + eOpaque = VK_GEOMETRY_OPAQUE_BIT_KHR, + eNoDuplicateAnyHitInvocation = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR + }; + using GeometryFlagBitsNV = GeometryFlagBitsKHR; + + VULKAN_HPP_INLINE std::string to_string( GeometryFlagBitsKHR value ) + { + switch ( value ) + { + case GeometryFlagBitsKHR::eOpaque : return "Opaque"; + case GeometryFlagBitsKHR::eNoDuplicateAnyHitInvocation : return "NoDuplicateAnyHitInvocation"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class GeometryInstanceFlagBitsKHR : VkGeometryInstanceFlagsKHR + { + eTriangleFacingCullDisable = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, + eTriangleFrontCounterclockwise = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, + eForceOpaque = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, + eForceNoOpaque = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, + eTriangleCullDisable = VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV + }; + using GeometryInstanceFlagBitsNV = GeometryInstanceFlagBitsKHR; + + VULKAN_HPP_INLINE std::string to_string( GeometryInstanceFlagBitsKHR value ) + { + switch ( value ) + { + case GeometryInstanceFlagBitsKHR::eTriangleFacingCullDisable : return "TriangleFacingCullDisable"; + case GeometryInstanceFlagBitsKHR::eTriangleFrontCounterclockwise : return "TriangleFrontCounterclockwise"; + case GeometryInstanceFlagBitsKHR::eForceOpaque : return "ForceOpaque"; + case GeometryInstanceFlagBitsKHR::eForceNoOpaque : return "ForceNoOpaque"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class GeometryTypeKHR + { + eTriangles = VK_GEOMETRY_TYPE_TRIANGLES_KHR, + eAabbs = VK_GEOMETRY_TYPE_AABBS_KHR, + eInstances = VK_GEOMETRY_TYPE_INSTANCES_KHR + }; + using GeometryTypeNV = GeometryTypeKHR; + + VULKAN_HPP_INLINE std::string to_string( GeometryTypeKHR value ) + { + switch ( value ) + { + case GeometryTypeKHR::eTriangles : return "Triangles"; + case GeometryTypeKHR::eAabbs : return "Aabbs"; + case GeometryTypeKHR::eInstances : return "Instances"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageAspectFlagBits : VkImageAspectFlags + { + eColor = VK_IMAGE_ASPECT_COLOR_BIT, + eDepth = VK_IMAGE_ASPECT_DEPTH_BIT, + eStencil = VK_IMAGE_ASPECT_STENCIL_BIT, + eMetadata = VK_IMAGE_ASPECT_METADATA_BIT, + ePlane0 = VK_IMAGE_ASPECT_PLANE_0_BIT, + ePlane1 = VK_IMAGE_ASPECT_PLANE_1_BIT, + ePlane2 = VK_IMAGE_ASPECT_PLANE_2_BIT, + eMemoryPlane0EXT = VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT, + eMemoryPlane1EXT = VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT, + eMemoryPlane2EXT = VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT, + eMemoryPlane3EXT = VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT, + ePlane0KHR = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR, + ePlane1KHR = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR, + ePlane2KHR = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ImageAspectFlagBits value ) + { + switch ( value ) + { + case ImageAspectFlagBits::eColor : return "Color"; + case ImageAspectFlagBits::eDepth : return "Depth"; + case ImageAspectFlagBits::eStencil : return "Stencil"; + case ImageAspectFlagBits::eMetadata : return "Metadata"; + case ImageAspectFlagBits::ePlane0 : return "Plane0"; + case ImageAspectFlagBits::ePlane1 : return "Plane1"; + case ImageAspectFlagBits::ePlane2 : return "Plane2"; + case ImageAspectFlagBits::eMemoryPlane0EXT : return "MemoryPlane0EXT"; + case ImageAspectFlagBits::eMemoryPlane1EXT : return "MemoryPlane1EXT"; + case ImageAspectFlagBits::eMemoryPlane2EXT : return "MemoryPlane2EXT"; + case ImageAspectFlagBits::eMemoryPlane3EXT : return "MemoryPlane3EXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageCreateFlagBits : VkImageCreateFlags + { + eSparseBinding = VK_IMAGE_CREATE_SPARSE_BINDING_BIT, + eSparseResidency = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, + eSparseAliased = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT, + eMutableFormat = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, + eCubeCompatible = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, + eAlias = VK_IMAGE_CREATE_ALIAS_BIT, + eSplitInstanceBindRegions = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + e2DArrayCompatible = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + eBlockTexelViewCompatible = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + eExtendedUsage = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + eProtected = VK_IMAGE_CREATE_PROTECTED_BIT, + eDisjoint = VK_IMAGE_CREATE_DISJOINT_BIT, + eCornerSampledNV = VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV, + eSampleLocationsCompatibleDepthEXT = VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT, + eSubsampledEXT = VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT, + e2DArrayCompatibleKHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, + eAliasKHR = VK_IMAGE_CREATE_ALIAS_BIT_KHR, + eBlockTexelViewCompatibleKHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, + eDisjointKHR = VK_IMAGE_CREATE_DISJOINT_BIT_KHR, + eExtendedUsageKHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR, + eSplitInstanceBindRegionsKHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ImageCreateFlagBits value ) + { + switch ( value ) + { + case ImageCreateFlagBits::eSparseBinding : return "SparseBinding"; + case ImageCreateFlagBits::eSparseResidency : return "SparseResidency"; + case ImageCreateFlagBits::eSparseAliased : return "SparseAliased"; + case ImageCreateFlagBits::eMutableFormat : return "MutableFormat"; + case ImageCreateFlagBits::eCubeCompatible : return "CubeCompatible"; + case ImageCreateFlagBits::eAlias : return "Alias"; + case ImageCreateFlagBits::eSplitInstanceBindRegions : return "SplitInstanceBindRegions"; + case ImageCreateFlagBits::e2DArrayCompatible : return "2DArrayCompatible"; + case ImageCreateFlagBits::eBlockTexelViewCompatible : return "BlockTexelViewCompatible"; + case ImageCreateFlagBits::eExtendedUsage : return "ExtendedUsage"; + case ImageCreateFlagBits::eProtected : return "Protected"; + case ImageCreateFlagBits::eDisjoint : return "Disjoint"; + case ImageCreateFlagBits::eCornerSampledNV : return "CornerSampledNV"; + case ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT : return "SampleLocationsCompatibleDepthEXT"; + case ImageCreateFlagBits::eSubsampledEXT : return "SubsampledEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageLayout + { + eUndefined = VK_IMAGE_LAYOUT_UNDEFINED, + eGeneral = VK_IMAGE_LAYOUT_GENERAL, + eColorAttachmentOptimal = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + eDepthStencilAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + eDepthStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, + eShaderReadOnlyOptimal = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + eTransferSrcOptimal = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + eTransferDstOptimal = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + ePreinitialized = VK_IMAGE_LAYOUT_PREINITIALIZED, + eDepthReadOnlyStencilAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + eDepthAttachmentStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + eDepthAttachmentOptimal = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + eDepthReadOnlyOptimal = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + eStencilAttachmentOptimal = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + eStencilReadOnlyOptimal = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + ePresentSrcKHR = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + eSharedPresentKHR = VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, + eShadingRateOptimalNV = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, + eFragmentDensityMapOptimalEXT = VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, + eDepthAttachmentOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR, + eDepthAttachmentStencilReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR, + eDepthReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR, + eDepthReadOnlyStencilAttachmentOptimalKHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR, + eFragmentShadingRateAttachmentOptimalKHR = VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR, + eStencilAttachmentOptimalKHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR, + eStencilReadOnlyOptimalKHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ImageLayout value ) + { + switch ( value ) + { + case ImageLayout::eUndefined : return "Undefined"; + case ImageLayout::eGeneral : return "General"; + case ImageLayout::eColorAttachmentOptimal : return "ColorAttachmentOptimal"; + case ImageLayout::eDepthStencilAttachmentOptimal : return "DepthStencilAttachmentOptimal"; + case ImageLayout::eDepthStencilReadOnlyOptimal : return "DepthStencilReadOnlyOptimal"; + case ImageLayout::eShaderReadOnlyOptimal : return "ShaderReadOnlyOptimal"; + case ImageLayout::eTransferSrcOptimal : return "TransferSrcOptimal"; + case ImageLayout::eTransferDstOptimal : return "TransferDstOptimal"; + case ImageLayout::ePreinitialized : return "Preinitialized"; + case ImageLayout::eDepthReadOnlyStencilAttachmentOptimal : return "DepthReadOnlyStencilAttachmentOptimal"; + case ImageLayout::eDepthAttachmentStencilReadOnlyOptimal : return "DepthAttachmentStencilReadOnlyOptimal"; + case ImageLayout::eDepthAttachmentOptimal : return "DepthAttachmentOptimal"; + case ImageLayout::eDepthReadOnlyOptimal : return "DepthReadOnlyOptimal"; + case ImageLayout::eStencilAttachmentOptimal : return "StencilAttachmentOptimal"; + case ImageLayout::eStencilReadOnlyOptimal : return "StencilReadOnlyOptimal"; + case ImageLayout::ePresentSrcKHR : return "PresentSrcKHR"; + case ImageLayout::eSharedPresentKHR : return "SharedPresentKHR"; + case ImageLayout::eShadingRateOptimalNV : return "ShadingRateOptimalNV"; + case ImageLayout::eFragmentDensityMapOptimalEXT : return "FragmentDensityMapOptimalEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageTiling + { + eOptimal = VK_IMAGE_TILING_OPTIMAL, + eLinear = VK_IMAGE_TILING_LINEAR, + eDrmFormatModifierEXT = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ImageTiling value ) + { + switch ( value ) + { + case ImageTiling::eOptimal : return "Optimal"; + case ImageTiling::eLinear : return "Linear"; + case ImageTiling::eDrmFormatModifierEXT : return "DrmFormatModifierEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageType + { + e1D = VK_IMAGE_TYPE_1D, + e2D = VK_IMAGE_TYPE_2D, + e3D = VK_IMAGE_TYPE_3D + }; + + VULKAN_HPP_INLINE std::string to_string( ImageType value ) + { + switch ( value ) + { + case ImageType::e1D : return "1D"; + case ImageType::e2D : return "2D"; + case ImageType::e3D : return "3D"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageUsageFlagBits : VkImageUsageFlags + { + eTransferSrc = VK_IMAGE_USAGE_TRANSFER_SRC_BIT, + eTransferDst = VK_IMAGE_USAGE_TRANSFER_DST_BIT, + eSampled = VK_IMAGE_USAGE_SAMPLED_BIT, + eStorage = VK_IMAGE_USAGE_STORAGE_BIT, + eColorAttachment = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, + eDepthStencilAttachment = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, + eTransientAttachment = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT, + eInputAttachment = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, + eShadingRateImageNV = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, + eFragmentDensityMapEXT = VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, + eFragmentShadingRateAttachmentKHR = VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ImageUsageFlagBits value ) + { + switch ( value ) + { + case ImageUsageFlagBits::eTransferSrc : return "TransferSrc"; + case ImageUsageFlagBits::eTransferDst : return "TransferDst"; + case ImageUsageFlagBits::eSampled : return "Sampled"; + case ImageUsageFlagBits::eStorage : return "Storage"; + case ImageUsageFlagBits::eColorAttachment : return "ColorAttachment"; + case ImageUsageFlagBits::eDepthStencilAttachment : return "DepthStencilAttachment"; + case ImageUsageFlagBits::eTransientAttachment : return "TransientAttachment"; + case ImageUsageFlagBits::eInputAttachment : return "InputAttachment"; + case ImageUsageFlagBits::eShadingRateImageNV : return "ShadingRateImageNV"; + case ImageUsageFlagBits::eFragmentDensityMapEXT : return "FragmentDensityMapEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageViewCreateFlagBits : VkImageViewCreateFlags + { + eFragmentDensityMapDynamicEXT = VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT, + eFragmentDensityMapDeferredEXT = VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ImageViewCreateFlagBits value ) + { + switch ( value ) + { + case ImageViewCreateFlagBits::eFragmentDensityMapDynamicEXT : return "FragmentDensityMapDynamicEXT"; + case ImageViewCreateFlagBits::eFragmentDensityMapDeferredEXT : return "FragmentDensityMapDeferredEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ImageViewType + { + e1D = VK_IMAGE_VIEW_TYPE_1D, + e2D = VK_IMAGE_VIEW_TYPE_2D, + e3D = VK_IMAGE_VIEW_TYPE_3D, + eCube = VK_IMAGE_VIEW_TYPE_CUBE, + e1DArray = VK_IMAGE_VIEW_TYPE_1D_ARRAY, + e2DArray = VK_IMAGE_VIEW_TYPE_2D_ARRAY, + eCubeArray = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY + }; + + VULKAN_HPP_INLINE std::string to_string( ImageViewType value ) + { + switch ( value ) + { + case ImageViewType::e1D : return "1D"; + case ImageViewType::e2D : return "2D"; + case ImageViewType::e3D : return "3D"; + case ImageViewType::eCube : return "Cube"; + case ImageViewType::e1DArray : return "1DArray"; + case ImageViewType::e2DArray : return "2DArray"; + case ImageViewType::eCubeArray : return "CubeArray"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class IndexType + { + eUint16 = VK_INDEX_TYPE_UINT16, + eUint32 = VK_INDEX_TYPE_UINT32, + eNoneKHR = VK_INDEX_TYPE_NONE_KHR, + eUint8EXT = VK_INDEX_TYPE_UINT8_EXT, + eNoneNV = VK_INDEX_TYPE_NONE_NV + }; + + VULKAN_HPP_INLINE std::string to_string( IndexType value ) + { + switch ( value ) + { + case IndexType::eUint16 : return "Uint16"; + case IndexType::eUint32 : return "Uint32"; + case IndexType::eNoneKHR : return "NoneKHR"; + case IndexType::eUint8EXT : return "Uint8EXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class IndirectCommandsLayoutUsageFlagBitsNV : VkIndirectCommandsLayoutUsageFlagsNV + { + eExplicitPreprocess = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV, + eIndexedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV, + eUnorderedSequences = VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( IndirectCommandsLayoutUsageFlagBitsNV value ) + { + switch ( value ) + { + case IndirectCommandsLayoutUsageFlagBitsNV::eExplicitPreprocess : return "ExplicitPreprocess"; + case IndirectCommandsLayoutUsageFlagBitsNV::eIndexedSequences : return "IndexedSequences"; + case IndirectCommandsLayoutUsageFlagBitsNV::eUnorderedSequences : return "UnorderedSequences"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class IndirectCommandsTokenTypeNV + { + eShaderGroup = VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV, + eStateFlags = VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV, + eIndexBuffer = VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV, + eVertexBuffer = VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV, + ePushConstant = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV, + eDrawIndexed = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV, + eDraw = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV, + eDrawTasks = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( IndirectCommandsTokenTypeNV value ) + { + switch ( value ) + { + case IndirectCommandsTokenTypeNV::eShaderGroup : return "ShaderGroup"; + case IndirectCommandsTokenTypeNV::eStateFlags : return "StateFlags"; + case IndirectCommandsTokenTypeNV::eIndexBuffer : return "IndexBuffer"; + case IndirectCommandsTokenTypeNV::eVertexBuffer : return "VertexBuffer"; + case IndirectCommandsTokenTypeNV::ePushConstant : return "PushConstant"; + case IndirectCommandsTokenTypeNV::eDrawIndexed : return "DrawIndexed"; + case IndirectCommandsTokenTypeNV::eDraw : return "Draw"; + case IndirectCommandsTokenTypeNV::eDrawTasks : return "DrawTasks"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class IndirectStateFlagBitsNV : VkIndirectStateFlagsNV + { + eFlagFrontface = VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( IndirectStateFlagBitsNV value ) + { + switch ( value ) + { + case IndirectStateFlagBitsNV::eFlagFrontface : return "FlagFrontface"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class InstanceCreateFlagBits + {}; + + VULKAN_HPP_INLINE std::string to_string( InstanceCreateFlagBits ) + { + return "(void)"; + } + + enum class InternalAllocationType + { + eExecutable = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + }; + + VULKAN_HPP_INLINE std::string to_string( InternalAllocationType value ) + { + switch ( value ) + { + case InternalAllocationType::eExecutable : return "Executable"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class LineRasterizationModeEXT + { + eDefault = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT, + eRectangular = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT, + eBresenham = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT, + eRectangularSmooth = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( LineRasterizationModeEXT value ) + { + switch ( value ) + { + case LineRasterizationModeEXT::eDefault : return "Default"; + case LineRasterizationModeEXT::eRectangular : return "Rectangular"; + case LineRasterizationModeEXT::eBresenham : return "Bresenham"; + case LineRasterizationModeEXT::eRectangularSmooth : return "RectangularSmooth"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class LogicOp + { + eClear = VK_LOGIC_OP_CLEAR, + eAnd = VK_LOGIC_OP_AND, + eAndReverse = VK_LOGIC_OP_AND_REVERSE, + eCopy = VK_LOGIC_OP_COPY, + eAndInverted = VK_LOGIC_OP_AND_INVERTED, + eNoOp = VK_LOGIC_OP_NO_OP, + eXor = VK_LOGIC_OP_XOR, + eOr = VK_LOGIC_OP_OR, + eNor = VK_LOGIC_OP_NOR, + eEquivalent = VK_LOGIC_OP_EQUIVALENT, + eInvert = VK_LOGIC_OP_INVERT, + eOrReverse = VK_LOGIC_OP_OR_REVERSE, + eCopyInverted = VK_LOGIC_OP_COPY_INVERTED, + eOrInverted = VK_LOGIC_OP_OR_INVERTED, + eNand = VK_LOGIC_OP_NAND, + eSet = VK_LOGIC_OP_SET + }; + + VULKAN_HPP_INLINE std::string to_string( LogicOp value ) + { + switch ( value ) + { + case LogicOp::eClear : return "Clear"; + case LogicOp::eAnd : return "And"; + case LogicOp::eAndReverse : return "AndReverse"; + case LogicOp::eCopy : return "Copy"; + case LogicOp::eAndInverted : return "AndInverted"; + case LogicOp::eNoOp : return "NoOp"; + case LogicOp::eXor : return "Xor"; + case LogicOp::eOr : return "Or"; + case LogicOp::eNor : return "Nor"; + case LogicOp::eEquivalent : return "Equivalent"; + case LogicOp::eInvert : return "Invert"; + case LogicOp::eOrReverse : return "OrReverse"; + case LogicOp::eCopyInverted : return "CopyInverted"; + case LogicOp::eOrInverted : return "OrInverted"; + case LogicOp::eNand : return "Nand"; + case LogicOp::eSet : return "Set"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class MemoryAllocateFlagBits : VkMemoryAllocateFlags + { + eDeviceMask = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + eDeviceAddress = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + eDeviceAddressCaptureReplay = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT + }; + using MemoryAllocateFlagBitsKHR = MemoryAllocateFlagBits; + + VULKAN_HPP_INLINE std::string to_string( MemoryAllocateFlagBits value ) + { + switch ( value ) + { + case MemoryAllocateFlagBits::eDeviceMask : return "DeviceMask"; + case MemoryAllocateFlagBits::eDeviceAddress : return "DeviceAddress"; + case MemoryAllocateFlagBits::eDeviceAddressCaptureReplay : return "DeviceAddressCaptureReplay"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class MemoryHeapFlagBits : VkMemoryHeapFlags + { + eDeviceLocal = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT, + eMultiInstance = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + eMultiInstanceKHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( MemoryHeapFlagBits value ) + { + switch ( value ) + { + case MemoryHeapFlagBits::eDeviceLocal : return "DeviceLocal"; + case MemoryHeapFlagBits::eMultiInstance : return "MultiInstance"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class MemoryOverallocationBehaviorAMD + { + eDefault = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD, + eAllowed = VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD, + eDisallowed = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD + }; + + VULKAN_HPP_INLINE std::string to_string( MemoryOverallocationBehaviorAMD value ) + { + switch ( value ) + { + case MemoryOverallocationBehaviorAMD::eDefault : return "Default"; + case MemoryOverallocationBehaviorAMD::eAllowed : return "Allowed"; + case MemoryOverallocationBehaviorAMD::eDisallowed : return "Disallowed"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class MemoryPropertyFlagBits : VkMemoryPropertyFlags + { + eDeviceLocal = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, + eHostVisible = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, + eHostCoherent = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + eHostCached = VK_MEMORY_PROPERTY_HOST_CACHED_BIT, + eLazilyAllocated = VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, + eProtected = VK_MEMORY_PROPERTY_PROTECTED_BIT, + eDeviceCoherentAMD = VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD, + eDeviceUncachedAMD = VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD + }; + + VULKAN_HPP_INLINE std::string to_string( MemoryPropertyFlagBits value ) + { + switch ( value ) + { + case MemoryPropertyFlagBits::eDeviceLocal : return "DeviceLocal"; + case MemoryPropertyFlagBits::eHostVisible : return "HostVisible"; + case MemoryPropertyFlagBits::eHostCoherent : return "HostCoherent"; + case MemoryPropertyFlagBits::eHostCached : return "HostCached"; + case MemoryPropertyFlagBits::eLazilyAllocated : return "LazilyAllocated"; + case MemoryPropertyFlagBits::eProtected : return "Protected"; + case MemoryPropertyFlagBits::eDeviceCoherentAMD : return "DeviceCoherentAMD"; + case MemoryPropertyFlagBits::eDeviceUncachedAMD : return "DeviceUncachedAMD"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ObjectType + { + eUnknown = VK_OBJECT_TYPE_UNKNOWN, + eInstance = VK_OBJECT_TYPE_INSTANCE, + ePhysicalDevice = VK_OBJECT_TYPE_PHYSICAL_DEVICE, + eDevice = VK_OBJECT_TYPE_DEVICE, + eQueue = VK_OBJECT_TYPE_QUEUE, + eSemaphore = VK_OBJECT_TYPE_SEMAPHORE, + eCommandBuffer = VK_OBJECT_TYPE_COMMAND_BUFFER, + eFence = VK_OBJECT_TYPE_FENCE, + eDeviceMemory = VK_OBJECT_TYPE_DEVICE_MEMORY, + eBuffer = VK_OBJECT_TYPE_BUFFER, + eImage = VK_OBJECT_TYPE_IMAGE, + eEvent = VK_OBJECT_TYPE_EVENT, + eQueryPool = VK_OBJECT_TYPE_QUERY_POOL, + eBufferView = VK_OBJECT_TYPE_BUFFER_VIEW, + eImageView = VK_OBJECT_TYPE_IMAGE_VIEW, + eShaderModule = VK_OBJECT_TYPE_SHADER_MODULE, + ePipelineCache = VK_OBJECT_TYPE_PIPELINE_CACHE, + ePipelineLayout = VK_OBJECT_TYPE_PIPELINE_LAYOUT, + eRenderPass = VK_OBJECT_TYPE_RENDER_PASS, + ePipeline = VK_OBJECT_TYPE_PIPELINE, + eDescriptorSetLayout = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT, + eSampler = VK_OBJECT_TYPE_SAMPLER, + eDescriptorPool = VK_OBJECT_TYPE_DESCRIPTOR_POOL, + eDescriptorSet = VK_OBJECT_TYPE_DESCRIPTOR_SET, + eFramebuffer = VK_OBJECT_TYPE_FRAMEBUFFER, + eCommandPool = VK_OBJECT_TYPE_COMMAND_POOL, + eSamplerYcbcrConversion = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + eDescriptorUpdateTemplate = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + eSurfaceKHR = VK_OBJECT_TYPE_SURFACE_KHR, + eSwapchainKHR = VK_OBJECT_TYPE_SWAPCHAIN_KHR, + eDisplayKHR = VK_OBJECT_TYPE_DISPLAY_KHR, + eDisplayModeKHR = VK_OBJECT_TYPE_DISPLAY_MODE_KHR, + eDebugReportCallbackEXT = VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT, + eDebugUtilsMessengerEXT = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, + eAccelerationStructureKHR = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR, + eValidationCacheEXT = VK_OBJECT_TYPE_VALIDATION_CACHE_EXT, + eAccelerationStructureNV = VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV, + ePerformanceConfigurationINTEL = VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL, + eDeferredOperationKHR = VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR, + eIndirectCommandsLayoutNV = VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV, + ePrivateDataSlotEXT = VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT, + eDescriptorUpdateTemplateKHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR, + eSamplerYcbcrConversionKHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ObjectType value ) + { + switch ( value ) + { + case ObjectType::eUnknown : return "Unknown"; + case ObjectType::eInstance : return "Instance"; + case ObjectType::ePhysicalDevice : return "PhysicalDevice"; + case ObjectType::eDevice : return "Device"; + case ObjectType::eQueue : return "Queue"; + case ObjectType::eSemaphore : return "Semaphore"; + case ObjectType::eCommandBuffer : return "CommandBuffer"; + case ObjectType::eFence : return "Fence"; + case ObjectType::eDeviceMemory : return "DeviceMemory"; + case ObjectType::eBuffer : return "Buffer"; + case ObjectType::eImage : return "Image"; + case ObjectType::eEvent : return "Event"; + case ObjectType::eQueryPool : return "QueryPool"; + case ObjectType::eBufferView : return "BufferView"; + case ObjectType::eImageView : return "ImageView"; + case ObjectType::eShaderModule : return "ShaderModule"; + case ObjectType::ePipelineCache : return "PipelineCache"; + case ObjectType::ePipelineLayout : return "PipelineLayout"; + case ObjectType::eRenderPass : return "RenderPass"; + case ObjectType::ePipeline : return "Pipeline"; + case ObjectType::eDescriptorSetLayout : return "DescriptorSetLayout"; + case ObjectType::eSampler : return "Sampler"; + case ObjectType::eDescriptorPool : return "DescriptorPool"; + case ObjectType::eDescriptorSet : return "DescriptorSet"; + case ObjectType::eFramebuffer : return "Framebuffer"; + case ObjectType::eCommandPool : return "CommandPool"; + case ObjectType::eSamplerYcbcrConversion : return "SamplerYcbcrConversion"; + case ObjectType::eDescriptorUpdateTemplate : return "DescriptorUpdateTemplate"; + case ObjectType::eSurfaceKHR : return "SurfaceKHR"; + case ObjectType::eSwapchainKHR : return "SwapchainKHR"; + case ObjectType::eDisplayKHR : return "DisplayKHR"; + case ObjectType::eDisplayModeKHR : return "DisplayModeKHR"; + case ObjectType::eDebugReportCallbackEXT : return "DebugReportCallbackEXT"; + case ObjectType::eDebugUtilsMessengerEXT : return "DebugUtilsMessengerEXT"; + case ObjectType::eAccelerationStructureKHR : return "AccelerationStructureKHR"; + case ObjectType::eValidationCacheEXT : return "ValidationCacheEXT"; + case ObjectType::eAccelerationStructureNV : return "AccelerationStructureNV"; + case ObjectType::ePerformanceConfigurationINTEL : return "PerformanceConfigurationINTEL"; + case ObjectType::eDeferredOperationKHR : return "DeferredOperationKHR"; + case ObjectType::eIndirectCommandsLayoutNV : return "IndirectCommandsLayoutNV"; + case ObjectType::ePrivateDataSlotEXT : return "PrivateDataSlotEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + template + struct cpp_type + {}; + + enum class PeerMemoryFeatureFlagBits : VkPeerMemoryFeatureFlags + { + eCopySrc = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + eCopyDst = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + eGenericSrc = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + eGenericDst = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT + }; + using PeerMemoryFeatureFlagBitsKHR = PeerMemoryFeatureFlagBits; + + VULKAN_HPP_INLINE std::string to_string( PeerMemoryFeatureFlagBits value ) + { + switch ( value ) + { + case PeerMemoryFeatureFlagBits::eCopySrc : return "CopySrc"; + case PeerMemoryFeatureFlagBits::eCopyDst : return "CopyDst"; + case PeerMemoryFeatureFlagBits::eGenericSrc : return "GenericSrc"; + case PeerMemoryFeatureFlagBits::eGenericDst : return "GenericDst"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceConfigurationTypeINTEL + { + eCommandQueueMetricsDiscoveryActivated = VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceConfigurationTypeINTEL value ) + { + switch ( value ) + { + case PerformanceConfigurationTypeINTEL::eCommandQueueMetricsDiscoveryActivated : return "CommandQueueMetricsDiscoveryActivated"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceCounterDescriptionFlagBitsKHR : VkPerformanceCounterDescriptionFlagsKHR + { + ePerformanceImpacting = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + eConcurrentlyImpacted = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceCounterDescriptionFlagBitsKHR value ) + { + switch ( value ) + { + case PerformanceCounterDescriptionFlagBitsKHR::ePerformanceImpacting : return "PerformanceImpacting"; + case PerformanceCounterDescriptionFlagBitsKHR::eConcurrentlyImpacted : return "ConcurrentlyImpacted"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceCounterScopeKHR + { + eCommandBuffer = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + eRenderPass = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + eCommand = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + eVkQueryScopeCommandBuffer = VK_QUERY_SCOPE_COMMAND_BUFFER_KHR, + eVkQueryScopeCommand = VK_QUERY_SCOPE_COMMAND_KHR, + eVkQueryScopeRenderPass = VK_QUERY_SCOPE_RENDER_PASS_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceCounterScopeKHR value ) + { + switch ( value ) + { + case PerformanceCounterScopeKHR::eCommandBuffer : return "CommandBuffer"; + case PerformanceCounterScopeKHR::eRenderPass : return "RenderPass"; + case PerformanceCounterScopeKHR::eCommand : return "Command"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceCounterStorageKHR + { + eInt32 = VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR, + eInt64 = VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR, + eUint32 = VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR, + eUint64 = VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR, + eFloat32 = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR, + eFloat64 = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceCounterStorageKHR value ) + { + switch ( value ) + { + case PerformanceCounterStorageKHR::eInt32 : return "Int32"; + case PerformanceCounterStorageKHR::eInt64 : return "Int64"; + case PerformanceCounterStorageKHR::eUint32 : return "Uint32"; + case PerformanceCounterStorageKHR::eUint64 : return "Uint64"; + case PerformanceCounterStorageKHR::eFloat32 : return "Float32"; + case PerformanceCounterStorageKHR::eFloat64 : return "Float64"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceCounterUnitKHR + { + eGeneric = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR, + ePercentage = VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR, + eNanoseconds = VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR, + eBytes = VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR, + eBytesPerSecond = VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR, + eKelvin = VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR, + eWatts = VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR, + eVolts = VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR, + eAmps = VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR, + eHertz = VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR, + eCycles = VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceCounterUnitKHR value ) + { + switch ( value ) + { + case PerformanceCounterUnitKHR::eGeneric : return "Generic"; + case PerformanceCounterUnitKHR::ePercentage : return "Percentage"; + case PerformanceCounterUnitKHR::eNanoseconds : return "Nanoseconds"; + case PerformanceCounterUnitKHR::eBytes : return "Bytes"; + case PerformanceCounterUnitKHR::eBytesPerSecond : return "BytesPerSecond"; + case PerformanceCounterUnitKHR::eKelvin : return "Kelvin"; + case PerformanceCounterUnitKHR::eWatts : return "Watts"; + case PerformanceCounterUnitKHR::eVolts : return "Volts"; + case PerformanceCounterUnitKHR::eAmps : return "Amps"; + case PerformanceCounterUnitKHR::eHertz : return "Hertz"; + case PerformanceCounterUnitKHR::eCycles : return "Cycles"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceOverrideTypeINTEL + { + eNullHardware = VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL, + eFlushGpuCaches = VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceOverrideTypeINTEL value ) + { + switch ( value ) + { + case PerformanceOverrideTypeINTEL::eNullHardware : return "NullHardware"; + case PerformanceOverrideTypeINTEL::eFlushGpuCaches : return "FlushGpuCaches"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceParameterTypeINTEL + { + eHwCountersSupported = VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL, + eStreamMarkerValidBits = VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceParameterTypeINTEL value ) + { + switch ( value ) + { + case PerformanceParameterTypeINTEL::eHwCountersSupported : return "HwCountersSupported"; + case PerformanceParameterTypeINTEL::eStreamMarkerValidBits : return "StreamMarkerValidBits"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PerformanceValueTypeINTEL + { + eUint32 = VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL, + eUint64 = VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL, + eFloat = VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL, + eBool = VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL, + eString = VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( PerformanceValueTypeINTEL value ) + { + switch ( value ) + { + case PerformanceValueTypeINTEL::eUint32 : return "Uint32"; + case PerformanceValueTypeINTEL::eUint64 : return "Uint64"; + case PerformanceValueTypeINTEL::eFloat : return "Float"; + case PerformanceValueTypeINTEL::eBool : return "Bool"; + case PerformanceValueTypeINTEL::eString : return "String"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PhysicalDeviceType + { + eOther = VK_PHYSICAL_DEVICE_TYPE_OTHER, + eIntegratedGpu = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU, + eDiscreteGpu = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU, + eVirtualGpu = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU, + eCpu = VK_PHYSICAL_DEVICE_TYPE_CPU + }; + + VULKAN_HPP_INLINE std::string to_string( PhysicalDeviceType value ) + { + switch ( value ) + { + case PhysicalDeviceType::eOther : return "Other"; + case PhysicalDeviceType::eIntegratedGpu : return "IntegratedGpu"; + case PhysicalDeviceType::eDiscreteGpu : return "DiscreteGpu"; + case PhysicalDeviceType::eVirtualGpu : return "VirtualGpu"; + case PhysicalDeviceType::eCpu : return "Cpu"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineBindPoint + { + eGraphics = VK_PIPELINE_BIND_POINT_GRAPHICS, + eCompute = VK_PIPELINE_BIND_POINT_COMPUTE, + eRayTracingKHR = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + eRayTracingNV = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineBindPoint value ) + { + switch ( value ) + { + case PipelineBindPoint::eGraphics : return "Graphics"; + case PipelineBindPoint::eCompute : return "Compute"; + case PipelineBindPoint::eRayTracingKHR : return "RayTracingKHR"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineCacheCreateFlagBits : VkPipelineCacheCreateFlags + { + eExternallySynchronizedEXT = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineCacheCreateFlagBits value ) + { + switch ( value ) + { + case PipelineCacheCreateFlagBits::eExternallySynchronizedEXT : return "ExternallySynchronizedEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineCacheHeaderVersion + { + eOne = VK_PIPELINE_CACHE_HEADER_VERSION_ONE + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineCacheHeaderVersion value ) + { + switch ( value ) + { + case PipelineCacheHeaderVersion::eOne : return "One"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineCompilerControlFlagBitsAMD : VkPipelineCompilerControlFlagsAMD + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineCompilerControlFlagBitsAMD ) + { + return "(void)"; + } + + enum class PipelineCreateFlagBits : VkPipelineCreateFlags + { + eDisableOptimization = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, + eAllowDerivatives = VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT, + eDerivative = VK_PIPELINE_CREATE_DERIVATIVE_BIT, + eViewIndexFromDeviceIndex = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + eDispatchBase = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + eRayTracingNoNullAnyHitShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR, + eRayTracingNoNullClosestHitShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR, + eRayTracingNoNullMissShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR, + eRayTracingNoNullIntersectionShadersKHR = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR, + eRayTracingSkipTrianglesKHR = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR, + eRayTracingSkipAabbsKHR = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR, + eRayTracingShaderGroupHandleCaptureReplayKHR = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR, + eDeferCompileNV = VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV, + eCaptureStatisticsKHR = VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR, + eCaptureInternalRepresentationsKHR = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR, + eIndirectBindableNV = VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV, + eLibraryKHR = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR, + eFailOnPipelineCompileRequiredEXT = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT, + eEarlyReturnOnFailureEXT = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT, + eDispatchBaseKHR = VK_PIPELINE_CREATE_DISPATCH_BASE_KHR, + eViewIndexFromDeviceIndexKHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlagBits value ) + { + switch ( value ) + { + case PipelineCreateFlagBits::eDisableOptimization : return "DisableOptimization"; + case PipelineCreateFlagBits::eAllowDerivatives : return "AllowDerivatives"; + case PipelineCreateFlagBits::eDerivative : return "Derivative"; + case PipelineCreateFlagBits::eViewIndexFromDeviceIndex : return "ViewIndexFromDeviceIndex"; + case PipelineCreateFlagBits::eDispatchBase : return "DispatchBase"; + case PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR : return "RayTracingNoNullAnyHitShadersKHR"; + case PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR : return "RayTracingNoNullClosestHitShadersKHR"; + case PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR : return "RayTracingNoNullMissShadersKHR"; + case PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR : return "RayTracingNoNullIntersectionShadersKHR"; + case PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR : return "RayTracingSkipTrianglesKHR"; + case PipelineCreateFlagBits::eRayTracingSkipAabbsKHR : return "RayTracingSkipAabbsKHR"; + case PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR : return "RayTracingShaderGroupHandleCaptureReplayKHR"; + case PipelineCreateFlagBits::eDeferCompileNV : return "DeferCompileNV"; + case PipelineCreateFlagBits::eCaptureStatisticsKHR : return "CaptureStatisticsKHR"; + case PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR : return "CaptureInternalRepresentationsKHR"; + case PipelineCreateFlagBits::eIndirectBindableNV : return "IndirectBindableNV"; + case PipelineCreateFlagBits::eLibraryKHR : return "LibraryKHR"; + case PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT : return "FailOnPipelineCompileRequiredEXT"; + case PipelineCreateFlagBits::eEarlyReturnOnFailureEXT : return "EarlyReturnOnFailureEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineCreationFeedbackFlagBitsEXT : VkPipelineCreationFeedbackFlagsEXT + { + eValid = VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT, + eApplicationPipelineCacheHit = VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT, + eBasePipelineAcceleration = VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineCreationFeedbackFlagBitsEXT value ) + { + switch ( value ) + { + case PipelineCreationFeedbackFlagBitsEXT::eValid : return "Valid"; + case PipelineCreationFeedbackFlagBitsEXT::eApplicationPipelineCacheHit : return "ApplicationPipelineCacheHit"; + case PipelineCreationFeedbackFlagBitsEXT::eBasePipelineAcceleration : return "BasePipelineAcceleration"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineExecutableStatisticFormatKHR + { + eBool32 = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR, + eInt64 = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR, + eUint64 = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR, + eFloat64 = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineExecutableStatisticFormatKHR value ) + { + switch ( value ) + { + case PipelineExecutableStatisticFormatKHR::eBool32 : return "Bool32"; + case PipelineExecutableStatisticFormatKHR::eInt64 : return "Int64"; + case PipelineExecutableStatisticFormatKHR::eUint64 : return "Uint64"; + case PipelineExecutableStatisticFormatKHR::eFloat64 : return "Float64"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineShaderStageCreateFlagBits : VkPipelineShaderStageCreateFlags + { + eAllowVaryingSubgroupSizeEXT = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT, + eRequireFullSubgroupsEXT = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineShaderStageCreateFlagBits value ) + { + switch ( value ) + { + case PipelineShaderStageCreateFlagBits::eAllowVaryingSubgroupSizeEXT : return "AllowVaryingSubgroupSizeEXT"; + case PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT : return "RequireFullSubgroupsEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PipelineStageFlagBits : VkPipelineStageFlags + { + eTopOfPipe = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + eDrawIndirect = VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, + eVertexInput = VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, + eVertexShader = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, + eTessellationControlShader = VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, + eTessellationEvaluationShader = VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, + eGeometryShader = VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, + eFragmentShader = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + eEarlyFragmentTests = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, + eLateFragmentTests = VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, + eColorAttachmentOutput = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + eComputeShader = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, + eTransfer = VK_PIPELINE_STAGE_TRANSFER_BIT, + eBottomOfPipe = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + eHost = VK_PIPELINE_STAGE_HOST_BIT, + eAllGraphics = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, + eAllCommands = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, + eTransformFeedbackEXT = VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT, + eConditionalRenderingEXT = VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, + eAccelerationStructureBuildKHR = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + eRayTracingShaderKHR = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, + eShadingRateImageNV = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, + eTaskShaderNV = VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV, + eMeshShaderNV = VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV, + eFragmentDensityProcessEXT = VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT, + eCommandPreprocessNV = VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV, + eAccelerationStructureBuildNV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV, + eFragmentShadingRateAttachmentKHR = VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, + eRayTracingShaderNV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( PipelineStageFlagBits value ) + { + switch ( value ) + { + case PipelineStageFlagBits::eTopOfPipe : return "TopOfPipe"; + case PipelineStageFlagBits::eDrawIndirect : return "DrawIndirect"; + case PipelineStageFlagBits::eVertexInput : return "VertexInput"; + case PipelineStageFlagBits::eVertexShader : return "VertexShader"; + case PipelineStageFlagBits::eTessellationControlShader : return "TessellationControlShader"; + case PipelineStageFlagBits::eTessellationEvaluationShader : return "TessellationEvaluationShader"; + case PipelineStageFlagBits::eGeometryShader : return "GeometryShader"; + case PipelineStageFlagBits::eFragmentShader : return "FragmentShader"; + case PipelineStageFlagBits::eEarlyFragmentTests : return "EarlyFragmentTests"; + case PipelineStageFlagBits::eLateFragmentTests : return "LateFragmentTests"; + case PipelineStageFlagBits::eColorAttachmentOutput : return "ColorAttachmentOutput"; + case PipelineStageFlagBits::eComputeShader : return "ComputeShader"; + case PipelineStageFlagBits::eTransfer : return "Transfer"; + case PipelineStageFlagBits::eBottomOfPipe : return "BottomOfPipe"; + case PipelineStageFlagBits::eHost : return "Host"; + case PipelineStageFlagBits::eAllGraphics : return "AllGraphics"; + case PipelineStageFlagBits::eAllCommands : return "AllCommands"; + case PipelineStageFlagBits::eTransformFeedbackEXT : return "TransformFeedbackEXT"; + case PipelineStageFlagBits::eConditionalRenderingEXT : return "ConditionalRenderingEXT"; + case PipelineStageFlagBits::eAccelerationStructureBuildKHR : return "AccelerationStructureBuildKHR"; + case PipelineStageFlagBits::eRayTracingShaderKHR : return "RayTracingShaderKHR"; + case PipelineStageFlagBits::eShadingRateImageNV : return "ShadingRateImageNV"; + case PipelineStageFlagBits::eTaskShaderNV : return "TaskShaderNV"; + case PipelineStageFlagBits::eMeshShaderNV : return "MeshShaderNV"; + case PipelineStageFlagBits::eFragmentDensityProcessEXT : return "FragmentDensityProcessEXT"; + case PipelineStageFlagBits::eCommandPreprocessNV : return "CommandPreprocessNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PointClippingBehavior + { + eAllClipPlanes = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + eUserClipPlanesOnly = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY + }; + using PointClippingBehaviorKHR = PointClippingBehavior; + + VULKAN_HPP_INLINE std::string to_string( PointClippingBehavior value ) + { + switch ( value ) + { + case PointClippingBehavior::eAllClipPlanes : return "AllClipPlanes"; + case PointClippingBehavior::eUserClipPlanesOnly : return "UserClipPlanesOnly"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PolygonMode + { + eFill = VK_POLYGON_MODE_FILL, + eLine = VK_POLYGON_MODE_LINE, + ePoint = VK_POLYGON_MODE_POINT, + eFillRectangleNV = VK_POLYGON_MODE_FILL_RECTANGLE_NV + }; + + VULKAN_HPP_INLINE std::string to_string( PolygonMode value ) + { + switch ( value ) + { + case PolygonMode::eFill : return "Fill"; + case PolygonMode::eLine : return "Line"; + case PolygonMode::ePoint : return "Point"; + case PolygonMode::eFillRectangleNV : return "FillRectangleNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PresentModeKHR + { + eImmediate = VK_PRESENT_MODE_IMMEDIATE_KHR, + eMailbox = VK_PRESENT_MODE_MAILBOX_KHR, + eFifo = VK_PRESENT_MODE_FIFO_KHR, + eFifoRelaxed = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + eSharedDemandRefresh = VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR, + eSharedContinuousRefresh = VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( PresentModeKHR value ) + { + switch ( value ) + { + case PresentModeKHR::eImmediate : return "Immediate"; + case PresentModeKHR::eMailbox : return "Mailbox"; + case PresentModeKHR::eFifo : return "Fifo"; + case PresentModeKHR::eFifoRelaxed : return "FifoRelaxed"; + case PresentModeKHR::eSharedDemandRefresh : return "SharedDemandRefresh"; + case PresentModeKHR::eSharedContinuousRefresh : return "SharedContinuousRefresh"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PrimitiveTopology + { + ePointList = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + eLineList = VK_PRIMITIVE_TOPOLOGY_LINE_LIST, + eLineStrip = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP, + eTriangleList = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, + eTriangleStrip = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, + eTriangleFan = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN, + eLineListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY, + eLineStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY, + eTriangleListWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY, + eTriangleStripWithAdjacency = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY, + ePatchList = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST + }; + + VULKAN_HPP_INLINE std::string to_string( PrimitiveTopology value ) + { + switch ( value ) + { + case PrimitiveTopology::ePointList : return "PointList"; + case PrimitiveTopology::eLineList : return "LineList"; + case PrimitiveTopology::eLineStrip : return "LineStrip"; + case PrimitiveTopology::eTriangleList : return "TriangleList"; + case PrimitiveTopology::eTriangleStrip : return "TriangleStrip"; + case PrimitiveTopology::eTriangleFan : return "TriangleFan"; + case PrimitiveTopology::eLineListWithAdjacency : return "LineListWithAdjacency"; + case PrimitiveTopology::eLineStripWithAdjacency : return "LineStripWithAdjacency"; + case PrimitiveTopology::eTriangleListWithAdjacency : return "TriangleListWithAdjacency"; + case PrimitiveTopology::eTriangleStripWithAdjacency : return "TriangleStripWithAdjacency"; + case PrimitiveTopology::ePatchList : return "PatchList"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class PrivateDataSlotCreateFlagBitsEXT : VkPrivateDataSlotCreateFlagsEXT + {}; + + VULKAN_HPP_INLINE std::string to_string( PrivateDataSlotCreateFlagBitsEXT ) + { + return "(void)"; + } + + enum class QueryControlFlagBits : VkQueryControlFlags + { + ePrecise = VK_QUERY_CONTROL_PRECISE_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( QueryControlFlagBits value ) + { + switch ( value ) + { + case QueryControlFlagBits::ePrecise : return "Precise"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueryPipelineStatisticFlagBits : VkQueryPipelineStatisticFlags + { + eInputAssemblyVertices = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT, + eInputAssemblyPrimitives = VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT, + eVertexShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT, + eGeometryShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT, + eGeometryShaderPrimitives = VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT, + eClippingInvocations = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT, + eClippingPrimitives = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT, + eFragmentShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT, + eTessellationControlShaderPatches = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT, + eTessellationEvaluationShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT, + eComputeShaderInvocations = VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( QueryPipelineStatisticFlagBits value ) + { + switch ( value ) + { + case QueryPipelineStatisticFlagBits::eInputAssemblyVertices : return "InputAssemblyVertices"; + case QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives : return "InputAssemblyPrimitives"; + case QueryPipelineStatisticFlagBits::eVertexShaderInvocations : return "VertexShaderInvocations"; + case QueryPipelineStatisticFlagBits::eGeometryShaderInvocations : return "GeometryShaderInvocations"; + case QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives : return "GeometryShaderPrimitives"; + case QueryPipelineStatisticFlagBits::eClippingInvocations : return "ClippingInvocations"; + case QueryPipelineStatisticFlagBits::eClippingPrimitives : return "ClippingPrimitives"; + case QueryPipelineStatisticFlagBits::eFragmentShaderInvocations : return "FragmentShaderInvocations"; + case QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches : return "TessellationControlShaderPatches"; + case QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations : return "TessellationEvaluationShaderInvocations"; + case QueryPipelineStatisticFlagBits::eComputeShaderInvocations : return "ComputeShaderInvocations"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueryPoolCreateFlagBits + {}; + + VULKAN_HPP_INLINE std::string to_string( QueryPoolCreateFlagBits ) + { + return "(void)"; + } + + enum class QueryPoolSamplingModeINTEL + { + eManual = VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( QueryPoolSamplingModeINTEL value ) + { + switch ( value ) + { + case QueryPoolSamplingModeINTEL::eManual : return "Manual"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueryResultFlagBits : VkQueryResultFlags + { + e64 = VK_QUERY_RESULT_64_BIT, + eWait = VK_QUERY_RESULT_WAIT_BIT, + eWithAvailability = VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, + ePartial = VK_QUERY_RESULT_PARTIAL_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( QueryResultFlagBits value ) + { + switch ( value ) + { + case QueryResultFlagBits::e64 : return "64"; + case QueryResultFlagBits::eWait : return "Wait"; + case QueryResultFlagBits::eWithAvailability : return "WithAvailability"; + case QueryResultFlagBits::ePartial : return "Partial"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueryType + { + eOcclusion = VK_QUERY_TYPE_OCCLUSION, + ePipelineStatistics = VK_QUERY_TYPE_PIPELINE_STATISTICS, + eTimestamp = VK_QUERY_TYPE_TIMESTAMP, + eTransformFeedbackStreamEXT = VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT, + ePerformanceQueryKHR = VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR, + eAccelerationStructureCompactedSizeKHR = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR, + eAccelerationStructureSerializationSizeKHR = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR, + eAccelerationStructureCompactedSizeNV = VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV, + ePerformanceQueryINTEL = VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL + }; + + VULKAN_HPP_INLINE std::string to_string( QueryType value ) + { + switch ( value ) + { + case QueryType::eOcclusion : return "Occlusion"; + case QueryType::ePipelineStatistics : return "PipelineStatistics"; + case QueryType::eTimestamp : return "Timestamp"; + case QueryType::eTransformFeedbackStreamEXT : return "TransformFeedbackStreamEXT"; + case QueryType::ePerformanceQueryKHR : return "PerformanceQueryKHR"; + case QueryType::eAccelerationStructureCompactedSizeKHR : return "AccelerationStructureCompactedSizeKHR"; + case QueryType::eAccelerationStructureSerializationSizeKHR : return "AccelerationStructureSerializationSizeKHR"; + case QueryType::eAccelerationStructureCompactedSizeNV : return "AccelerationStructureCompactedSizeNV"; + case QueryType::ePerformanceQueryINTEL : return "PerformanceQueryINTEL"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueueFlagBits : VkQueueFlags + { + eGraphics = VK_QUEUE_GRAPHICS_BIT, + eCompute = VK_QUEUE_COMPUTE_BIT, + eTransfer = VK_QUEUE_TRANSFER_BIT, + eSparseBinding = VK_QUEUE_SPARSE_BINDING_BIT, + eProtected = VK_QUEUE_PROTECTED_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( QueueFlagBits value ) + { + switch ( value ) + { + case QueueFlagBits::eGraphics : return "Graphics"; + case QueueFlagBits::eCompute : return "Compute"; + case QueueFlagBits::eTransfer : return "Transfer"; + case QueueFlagBits::eSparseBinding : return "SparseBinding"; + case QueueFlagBits::eProtected : return "Protected"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class QueueGlobalPriorityEXT + { + eLow = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT, + eMedium = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT, + eHigh = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT, + eRealtime = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( QueueGlobalPriorityEXT value ) + { + switch ( value ) + { + case QueueGlobalPriorityEXT::eLow : return "Low"; + case QueueGlobalPriorityEXT::eMedium : return "Medium"; + case QueueGlobalPriorityEXT::eHigh : return "High"; + case QueueGlobalPriorityEXT::eRealtime : return "Realtime"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class RasterizationOrderAMD + { + eStrict = VK_RASTERIZATION_ORDER_STRICT_AMD, + eRelaxed = VK_RASTERIZATION_ORDER_RELAXED_AMD + }; + + VULKAN_HPP_INLINE std::string to_string( RasterizationOrderAMD value ) + { + switch ( value ) + { + case RasterizationOrderAMD::eStrict : return "Strict"; + case RasterizationOrderAMD::eRelaxed : return "Relaxed"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class RayTracingShaderGroupTypeKHR + { + eGeneral = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + eTrianglesHitGroup = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + eProceduralHitGroup = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR + }; + using RayTracingShaderGroupTypeNV = RayTracingShaderGroupTypeKHR; + + VULKAN_HPP_INLINE std::string to_string( RayTracingShaderGroupTypeKHR value ) + { + switch ( value ) + { + case RayTracingShaderGroupTypeKHR::eGeneral : return "General"; + case RayTracingShaderGroupTypeKHR::eTrianglesHitGroup : return "TrianglesHitGroup"; + case RayTracingShaderGroupTypeKHR::eProceduralHitGroup : return "ProceduralHitGroup"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class RenderPassCreateFlagBits : VkRenderPassCreateFlags + { + eTransformQCOM = VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM + }; + + VULKAN_HPP_INLINE std::string to_string( RenderPassCreateFlagBits value ) + { + switch ( value ) + { + case RenderPassCreateFlagBits::eTransformQCOM : return "TransformQCOM"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ResolveModeFlagBits : VkResolveModeFlags + { + eNone = VK_RESOLVE_MODE_NONE, + eSampleZero = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + eAverage = VK_RESOLVE_MODE_AVERAGE_BIT, + eMin = VK_RESOLVE_MODE_MIN_BIT, + eMax = VK_RESOLVE_MODE_MAX_BIT + }; + using ResolveModeFlagBitsKHR = ResolveModeFlagBits; + + VULKAN_HPP_INLINE std::string to_string( ResolveModeFlagBits value ) + { + switch ( value ) + { + case ResolveModeFlagBits::eNone : return "None"; + case ResolveModeFlagBits::eSampleZero : return "SampleZero"; + case ResolveModeFlagBits::eAverage : return "Average"; + case ResolveModeFlagBits::eMin : return "Min"; + case ResolveModeFlagBits::eMax : return "Max"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class Result + { + eSuccess = VK_SUCCESS, + eNotReady = VK_NOT_READY, + eTimeout = VK_TIMEOUT, + eEventSet = VK_EVENT_SET, + eEventReset = VK_EVENT_RESET, + eIncomplete = VK_INCOMPLETE, + eErrorOutOfHostMemory = VK_ERROR_OUT_OF_HOST_MEMORY, + eErrorOutOfDeviceMemory = VK_ERROR_OUT_OF_DEVICE_MEMORY, + eErrorInitializationFailed = VK_ERROR_INITIALIZATION_FAILED, + eErrorDeviceLost = VK_ERROR_DEVICE_LOST, + eErrorMemoryMapFailed = VK_ERROR_MEMORY_MAP_FAILED, + eErrorLayerNotPresent = VK_ERROR_LAYER_NOT_PRESENT, + eErrorExtensionNotPresent = VK_ERROR_EXTENSION_NOT_PRESENT, + eErrorFeatureNotPresent = VK_ERROR_FEATURE_NOT_PRESENT, + eErrorIncompatibleDriver = VK_ERROR_INCOMPATIBLE_DRIVER, + eErrorTooManyObjects = VK_ERROR_TOO_MANY_OBJECTS, + eErrorFormatNotSupported = VK_ERROR_FORMAT_NOT_SUPPORTED, + eErrorFragmentedPool = VK_ERROR_FRAGMENTED_POOL, + eErrorUnknown = VK_ERROR_UNKNOWN, + eErrorOutOfPoolMemory = VK_ERROR_OUT_OF_POOL_MEMORY, + eErrorInvalidExternalHandle = VK_ERROR_INVALID_EXTERNAL_HANDLE, + eErrorFragmentation = VK_ERROR_FRAGMENTATION, + eErrorInvalidOpaqueCaptureAddress = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + eErrorSurfaceLostKHR = VK_ERROR_SURFACE_LOST_KHR, + eErrorNativeWindowInUseKHR = VK_ERROR_NATIVE_WINDOW_IN_USE_KHR, + eSuboptimalKHR = VK_SUBOPTIMAL_KHR, + eErrorOutOfDateKHR = VK_ERROR_OUT_OF_DATE_KHR, + eErrorIncompatibleDisplayKHR = VK_ERROR_INCOMPATIBLE_DISPLAY_KHR, + eErrorValidationFailedEXT = VK_ERROR_VALIDATION_FAILED_EXT, + eErrorInvalidShaderNV = VK_ERROR_INVALID_SHADER_NV, + eErrorInvalidDrmFormatModifierPlaneLayoutEXT = VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT, + eErrorNotPermittedEXT = VK_ERROR_NOT_PERMITTED_EXT, + eErrorFullScreenExclusiveModeLostEXT = VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT, + eThreadIdleKHR = VK_THREAD_IDLE_KHR, + eThreadDoneKHR = VK_THREAD_DONE_KHR, + eOperationDeferredKHR = VK_OPERATION_DEFERRED_KHR, + eOperationNotDeferredKHR = VK_OPERATION_NOT_DEFERRED_KHR, + ePipelineCompileRequiredEXT = VK_PIPELINE_COMPILE_REQUIRED_EXT, + eErrorFragmentationEXT = VK_ERROR_FRAGMENTATION_EXT, + eErrorInvalidDeviceAddressEXT = VK_ERROR_INVALID_DEVICE_ADDRESS_EXT, + eErrorInvalidExternalHandleKHR = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR, + eErrorInvalidOpaqueCaptureAddressKHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR, + eErrorOutOfPoolMemoryKHR = VK_ERROR_OUT_OF_POOL_MEMORY_KHR, + eErrorPipelineCompileRequiredEXT = VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( Result value ) + { + switch ( value ) + { + case Result::eSuccess : return "Success"; + case Result::eNotReady : return "NotReady"; + case Result::eTimeout : return "Timeout"; + case Result::eEventSet : return "EventSet"; + case Result::eEventReset : return "EventReset"; + case Result::eIncomplete : return "Incomplete"; + case Result::eErrorOutOfHostMemory : return "ErrorOutOfHostMemory"; + case Result::eErrorOutOfDeviceMemory : return "ErrorOutOfDeviceMemory"; + case Result::eErrorInitializationFailed : return "ErrorInitializationFailed"; + case Result::eErrorDeviceLost : return "ErrorDeviceLost"; + case Result::eErrorMemoryMapFailed : return "ErrorMemoryMapFailed"; + case Result::eErrorLayerNotPresent : return "ErrorLayerNotPresent"; + case Result::eErrorExtensionNotPresent : return "ErrorExtensionNotPresent"; + case Result::eErrorFeatureNotPresent : return "ErrorFeatureNotPresent"; + case Result::eErrorIncompatibleDriver : return "ErrorIncompatibleDriver"; + case Result::eErrorTooManyObjects : return "ErrorTooManyObjects"; + case Result::eErrorFormatNotSupported : return "ErrorFormatNotSupported"; + case Result::eErrorFragmentedPool : return "ErrorFragmentedPool"; + case Result::eErrorUnknown : return "ErrorUnknown"; + case Result::eErrorOutOfPoolMemory : return "ErrorOutOfPoolMemory"; + case Result::eErrorInvalidExternalHandle : return "ErrorInvalidExternalHandle"; + case Result::eErrorFragmentation : return "ErrorFragmentation"; + case Result::eErrorInvalidOpaqueCaptureAddress : return "ErrorInvalidOpaqueCaptureAddress"; + case Result::eErrorSurfaceLostKHR : return "ErrorSurfaceLostKHR"; + case Result::eErrorNativeWindowInUseKHR : return "ErrorNativeWindowInUseKHR"; + case Result::eSuboptimalKHR : return "SuboptimalKHR"; + case Result::eErrorOutOfDateKHR : return "ErrorOutOfDateKHR"; + case Result::eErrorIncompatibleDisplayKHR : return "ErrorIncompatibleDisplayKHR"; + case Result::eErrorValidationFailedEXT : return "ErrorValidationFailedEXT"; + case Result::eErrorInvalidShaderNV : return "ErrorInvalidShaderNV"; + case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT : return "ErrorInvalidDrmFormatModifierPlaneLayoutEXT"; + case Result::eErrorNotPermittedEXT : return "ErrorNotPermittedEXT"; + case Result::eErrorFullScreenExclusiveModeLostEXT : return "ErrorFullScreenExclusiveModeLostEXT"; + case Result::eThreadIdleKHR : return "ThreadIdleKHR"; + case Result::eThreadDoneKHR : return "ThreadDoneKHR"; + case Result::eOperationDeferredKHR : return "OperationDeferredKHR"; + case Result::eOperationNotDeferredKHR : return "OperationNotDeferredKHR"; + case Result::ePipelineCompileRequiredEXT : return "PipelineCompileRequiredEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SampleCountFlagBits : VkSampleCountFlags + { + e1 = VK_SAMPLE_COUNT_1_BIT, + e2 = VK_SAMPLE_COUNT_2_BIT, + e4 = VK_SAMPLE_COUNT_4_BIT, + e8 = VK_SAMPLE_COUNT_8_BIT, + e16 = VK_SAMPLE_COUNT_16_BIT, + e32 = VK_SAMPLE_COUNT_32_BIT, + e64 = VK_SAMPLE_COUNT_64_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( SampleCountFlagBits value ) + { + switch ( value ) + { + case SampleCountFlagBits::e1 : return "1"; + case SampleCountFlagBits::e2 : return "2"; + case SampleCountFlagBits::e4 : return "4"; + case SampleCountFlagBits::e8 : return "8"; + case SampleCountFlagBits::e16 : return "16"; + case SampleCountFlagBits::e32 : return "32"; + case SampleCountFlagBits::e64 : return "64"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerAddressMode + { + eRepeat = VK_SAMPLER_ADDRESS_MODE_REPEAT, + eMirroredRepeat = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, + eClampToEdge = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + eClampToBorder = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + eMirrorClampToEdge = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + eMirrorClampToEdgeKHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( SamplerAddressMode value ) + { + switch ( value ) + { + case SamplerAddressMode::eRepeat : return "Repeat"; + case SamplerAddressMode::eMirroredRepeat : return "MirroredRepeat"; + case SamplerAddressMode::eClampToEdge : return "ClampToEdge"; + case SamplerAddressMode::eClampToBorder : return "ClampToBorder"; + case SamplerAddressMode::eMirrorClampToEdge : return "MirrorClampToEdge"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerCreateFlagBits : VkSamplerCreateFlags + { + eSubsampledEXT = VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT, + eSubsampledCoarseReconstructionEXT = VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( SamplerCreateFlagBits value ) + { + switch ( value ) + { + case SamplerCreateFlagBits::eSubsampledEXT : return "SubsampledEXT"; + case SamplerCreateFlagBits::eSubsampledCoarseReconstructionEXT : return "SubsampledCoarseReconstructionEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerMipmapMode + { + eNearest = VK_SAMPLER_MIPMAP_MODE_NEAREST, + eLinear = VK_SAMPLER_MIPMAP_MODE_LINEAR + }; + + VULKAN_HPP_INLINE std::string to_string( SamplerMipmapMode value ) + { + switch ( value ) + { + case SamplerMipmapMode::eNearest : return "Nearest"; + case SamplerMipmapMode::eLinear : return "Linear"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerReductionMode + { + eWeightedAverage = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + eMin = VK_SAMPLER_REDUCTION_MODE_MIN, + eMax = VK_SAMPLER_REDUCTION_MODE_MAX + }; + using SamplerReductionModeEXT = SamplerReductionMode; + + VULKAN_HPP_INLINE std::string to_string( SamplerReductionMode value ) + { + switch ( value ) + { + case SamplerReductionMode::eWeightedAverage : return "WeightedAverage"; + case SamplerReductionMode::eMin : return "Min"; + case SamplerReductionMode::eMax : return "Max"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerYcbcrModelConversion + { + eRgbIdentity = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + eYcbcrIdentity = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + eYcbcr709 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + eYcbcr601 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + eYcbcr2020 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 + }; + using SamplerYcbcrModelConversionKHR = SamplerYcbcrModelConversion; + + VULKAN_HPP_INLINE std::string to_string( SamplerYcbcrModelConversion value ) + { + switch ( value ) + { + case SamplerYcbcrModelConversion::eRgbIdentity : return "RgbIdentity"; + case SamplerYcbcrModelConversion::eYcbcrIdentity : return "YcbcrIdentity"; + case SamplerYcbcrModelConversion::eYcbcr709 : return "Ycbcr709"; + case SamplerYcbcrModelConversion::eYcbcr601 : return "Ycbcr601"; + case SamplerYcbcrModelConversion::eYcbcr2020 : return "Ycbcr2020"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SamplerYcbcrRange + { + eItuFull = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + eItuNarrow = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW + }; + using SamplerYcbcrRangeKHR = SamplerYcbcrRange; + + VULKAN_HPP_INLINE std::string to_string( SamplerYcbcrRange value ) + { + switch ( value ) + { + case SamplerYcbcrRange::eItuFull : return "ItuFull"; + case SamplerYcbcrRange::eItuNarrow : return "ItuNarrow"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ScopeNV + { + eDevice = VK_SCOPE_DEVICE_NV, + eWorkgroup = VK_SCOPE_WORKGROUP_NV, + eSubgroup = VK_SCOPE_SUBGROUP_NV, + eQueueFamily = VK_SCOPE_QUEUE_FAMILY_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ScopeNV value ) + { + switch ( value ) + { + case ScopeNV::eDevice : return "Device"; + case ScopeNV::eWorkgroup : return "Workgroup"; + case ScopeNV::eSubgroup : return "Subgroup"; + case ScopeNV::eQueueFamily : return "QueueFamily"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SemaphoreImportFlagBits : VkSemaphoreImportFlags + { + eTemporary = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT + }; + using SemaphoreImportFlagBitsKHR = SemaphoreImportFlagBits; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreImportFlagBits value ) + { + switch ( value ) + { + case SemaphoreImportFlagBits::eTemporary : return "Temporary"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SemaphoreType + { + eBinary = VK_SEMAPHORE_TYPE_BINARY, + eTimeline = VK_SEMAPHORE_TYPE_TIMELINE + }; + using SemaphoreTypeKHR = SemaphoreType; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreType value ) + { + switch ( value ) + { + case SemaphoreType::eBinary : return "Binary"; + case SemaphoreType::eTimeline : return "Timeline"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SemaphoreWaitFlagBits : VkSemaphoreWaitFlags + { + eAny = VK_SEMAPHORE_WAIT_ANY_BIT + }; + using SemaphoreWaitFlagBitsKHR = SemaphoreWaitFlagBits; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreWaitFlagBits value ) + { + switch ( value ) + { + case SemaphoreWaitFlagBits::eAny : return "Any"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ShaderCorePropertiesFlagBitsAMD : VkShaderCorePropertiesFlagsAMD + {}; + + VULKAN_HPP_INLINE std::string to_string( ShaderCorePropertiesFlagBitsAMD ) + { + return "(void)"; + } + + enum class ShaderFloatControlsIndependence + { + e32BitOnly = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + eAll = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + eNone = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE + }; + using ShaderFloatControlsIndependenceKHR = ShaderFloatControlsIndependence; + + VULKAN_HPP_INLINE std::string to_string( ShaderFloatControlsIndependence value ) + { + switch ( value ) + { + case ShaderFloatControlsIndependence::e32BitOnly : return "32BitOnly"; + case ShaderFloatControlsIndependence::eAll : return "All"; + case ShaderFloatControlsIndependence::eNone : return "None"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ShaderGroupShaderKHR + { + eGeneral = VK_SHADER_GROUP_SHADER_GENERAL_KHR, + eClosestHit = VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR, + eAnyHit = VK_SHADER_GROUP_SHADER_ANY_HIT_KHR, + eIntersection = VK_SHADER_GROUP_SHADER_INTERSECTION_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( ShaderGroupShaderKHR value ) + { + switch ( value ) + { + case ShaderGroupShaderKHR::eGeneral : return "General"; + case ShaderGroupShaderKHR::eClosestHit : return "ClosestHit"; + case ShaderGroupShaderKHR::eAnyHit : return "AnyHit"; + case ShaderGroupShaderKHR::eIntersection : return "Intersection"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ShaderInfoTypeAMD + { + eStatistics = VK_SHADER_INFO_TYPE_STATISTICS_AMD, + eBinary = VK_SHADER_INFO_TYPE_BINARY_AMD, + eDisassembly = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD + }; + + VULKAN_HPP_INLINE std::string to_string( ShaderInfoTypeAMD value ) + { + switch ( value ) + { + case ShaderInfoTypeAMD::eStatistics : return "Statistics"; + case ShaderInfoTypeAMD::eBinary : return "Binary"; + case ShaderInfoTypeAMD::eDisassembly : return "Disassembly"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ShaderModuleCreateFlagBits : VkShaderModuleCreateFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( ShaderModuleCreateFlagBits ) + { + return "(void)"; + } + + enum class ShaderStageFlagBits : VkShaderStageFlags + { + eVertex = VK_SHADER_STAGE_VERTEX_BIT, + eTessellationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, + eTessellationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, + eGeometry = VK_SHADER_STAGE_GEOMETRY_BIT, + eFragment = VK_SHADER_STAGE_FRAGMENT_BIT, + eCompute = VK_SHADER_STAGE_COMPUTE_BIT, + eAllGraphics = VK_SHADER_STAGE_ALL_GRAPHICS, + eAll = VK_SHADER_STAGE_ALL, + eRaygenKHR = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + eAnyHitKHR = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + eClosestHitKHR = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + eMissKHR = VK_SHADER_STAGE_MISS_BIT_KHR, + eIntersectionKHR = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + eCallableKHR = VK_SHADER_STAGE_CALLABLE_BIT_KHR, + eTaskNV = VK_SHADER_STAGE_TASK_BIT_NV, + eMeshNV = VK_SHADER_STAGE_MESH_BIT_NV, + eAnyHitNV = VK_SHADER_STAGE_ANY_HIT_BIT_NV, + eCallableNV = VK_SHADER_STAGE_CALLABLE_BIT_NV, + eClosestHitNV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV, + eIntersectionNV = VK_SHADER_STAGE_INTERSECTION_BIT_NV, + eMissNV = VK_SHADER_STAGE_MISS_BIT_NV, + eRaygenNV = VK_SHADER_STAGE_RAYGEN_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ShaderStageFlagBits value ) + { + switch ( value ) + { + case ShaderStageFlagBits::eVertex : return "Vertex"; + case ShaderStageFlagBits::eTessellationControl : return "TessellationControl"; + case ShaderStageFlagBits::eTessellationEvaluation : return "TessellationEvaluation"; + case ShaderStageFlagBits::eGeometry : return "Geometry"; + case ShaderStageFlagBits::eFragment : return "Fragment"; + case ShaderStageFlagBits::eCompute : return "Compute"; + case ShaderStageFlagBits::eAllGraphics : return "AllGraphics"; + case ShaderStageFlagBits::eAll : return "All"; + case ShaderStageFlagBits::eRaygenKHR : return "RaygenKHR"; + case ShaderStageFlagBits::eAnyHitKHR : return "AnyHitKHR"; + case ShaderStageFlagBits::eClosestHitKHR : return "ClosestHitKHR"; + case ShaderStageFlagBits::eMissKHR : return "MissKHR"; + case ShaderStageFlagBits::eIntersectionKHR : return "IntersectionKHR"; + case ShaderStageFlagBits::eCallableKHR : return "CallableKHR"; + case ShaderStageFlagBits::eTaskNV : return "TaskNV"; + case ShaderStageFlagBits::eMeshNV : return "MeshNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ShadingRatePaletteEntryNV + { + eNoInvocations = VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV, + e16InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV, + e8InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV, + e4InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV, + e2InvocationsPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV, + e1InvocationPerPixel = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV, + e1InvocationPer2X1Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV, + e1InvocationPer1X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV, + e1InvocationPer2X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV, + e1InvocationPer4X2Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV, + e1InvocationPer2X4Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV, + e1InvocationPer4X4Pixels = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ShadingRatePaletteEntryNV value ) + { + switch ( value ) + { + case ShadingRatePaletteEntryNV::eNoInvocations : return "NoInvocations"; + case ShadingRatePaletteEntryNV::e16InvocationsPerPixel : return "16InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e8InvocationsPerPixel : return "8InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e4InvocationsPerPixel : return "4InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e2InvocationsPerPixel : return "2InvocationsPerPixel"; + case ShadingRatePaletteEntryNV::e1InvocationPerPixel : return "1InvocationPerPixel"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X1Pixels : return "1InvocationPer2X1Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer1X2Pixels : return "1InvocationPer1X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X2Pixels : return "1InvocationPer2X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer4X2Pixels : return "1InvocationPer4X2Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer2X4Pixels : return "1InvocationPer2X4Pixels"; + case ShadingRatePaletteEntryNV::e1InvocationPer4X4Pixels : return "1InvocationPer4X4Pixels"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SharingMode + { + eExclusive = VK_SHARING_MODE_EXCLUSIVE, + eConcurrent = VK_SHARING_MODE_CONCURRENT + }; + + VULKAN_HPP_INLINE std::string to_string( SharingMode value ) + { + switch ( value ) + { + case SharingMode::eExclusive : return "Exclusive"; + case SharingMode::eConcurrent : return "Concurrent"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SparseImageFormatFlagBits : VkSparseImageFormatFlags + { + eSingleMiptail = VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT, + eAlignedMipSize = VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT, + eNonstandardBlockSize = VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( SparseImageFormatFlagBits value ) + { + switch ( value ) + { + case SparseImageFormatFlagBits::eSingleMiptail : return "SingleMiptail"; + case SparseImageFormatFlagBits::eAlignedMipSize : return "AlignedMipSize"; + case SparseImageFormatFlagBits::eNonstandardBlockSize : return "NonstandardBlockSize"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SparseMemoryBindFlagBits : VkSparseMemoryBindFlags + { + eMetadata = VK_SPARSE_MEMORY_BIND_METADATA_BIT + }; + + VULKAN_HPP_INLINE std::string to_string( SparseMemoryBindFlagBits value ) + { + switch ( value ) + { + case SparseMemoryBindFlagBits::eMetadata : return "Metadata"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class StencilFaceFlagBits : VkStencilFaceFlags + { + eFront = VK_STENCIL_FACE_FRONT_BIT, + eBack = VK_STENCIL_FACE_BACK_BIT, + eFrontAndBack = VK_STENCIL_FACE_FRONT_AND_BACK, + eVkStencilFrontAndBack = VK_STENCIL_FRONT_AND_BACK + }; + + VULKAN_HPP_INLINE std::string to_string( StencilFaceFlagBits value ) + { + switch ( value ) + { + case StencilFaceFlagBits::eFront : return "Front"; + case StencilFaceFlagBits::eBack : return "Back"; + case StencilFaceFlagBits::eFrontAndBack : return "FrontAndBack"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class StencilOp + { + eKeep = VK_STENCIL_OP_KEEP, + eZero = VK_STENCIL_OP_ZERO, + eReplace = VK_STENCIL_OP_REPLACE, + eIncrementAndClamp = VK_STENCIL_OP_INCREMENT_AND_CLAMP, + eDecrementAndClamp = VK_STENCIL_OP_DECREMENT_AND_CLAMP, + eInvert = VK_STENCIL_OP_INVERT, + eIncrementAndWrap = VK_STENCIL_OP_INCREMENT_AND_WRAP, + eDecrementAndWrap = VK_STENCIL_OP_DECREMENT_AND_WRAP + }; + + VULKAN_HPP_INLINE std::string to_string( StencilOp value ) + { + switch ( value ) + { + case StencilOp::eKeep : return "Keep"; + case StencilOp::eZero : return "Zero"; + case StencilOp::eReplace : return "Replace"; + case StencilOp::eIncrementAndClamp : return "IncrementAndClamp"; + case StencilOp::eDecrementAndClamp : return "DecrementAndClamp"; + case StencilOp::eInvert : return "Invert"; + case StencilOp::eIncrementAndWrap : return "IncrementAndWrap"; + case StencilOp::eDecrementAndWrap : return "DecrementAndWrap"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class StructureType + { + eApplicationInfo = VK_STRUCTURE_TYPE_APPLICATION_INFO, + eInstanceCreateInfo = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + eDeviceQueueCreateInfo = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + eDeviceCreateInfo = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + eSubmitInfo = VK_STRUCTURE_TYPE_SUBMIT_INFO, + eMemoryAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + eMappedMemoryRange = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + eBindSparseInfo = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, + eFenceCreateInfo = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + eSemaphoreCreateInfo = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + eEventCreateInfo = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, + eQueryPoolCreateInfo = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + eBufferCreateInfo = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + eBufferViewCreateInfo = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, + eImageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + eImageViewCreateInfo = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + eShaderModuleCreateInfo = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + ePipelineCacheCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, + ePipelineShaderStageCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + ePipelineVertexInputStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + ePipelineInputAssemblyStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + ePipelineTessellationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, + ePipelineViewportStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + ePipelineRasterizationStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + ePipelineMultisampleStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + ePipelineDepthStencilStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + ePipelineColorBlendStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + ePipelineDynamicStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + eGraphicsPipelineCreateInfo = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + eComputePipelineCreateInfo = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + ePipelineLayoutCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + eSamplerCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + eDescriptorSetLayoutCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + eDescriptorPoolCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + eDescriptorSetAllocateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + eWriteDescriptorSet = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + eCopyDescriptorSet = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET, + eFramebufferCreateInfo = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + eRenderPassCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + eCommandPoolCreateInfo = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + eCommandBufferAllocateInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + eCommandBufferInheritanceInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, + eCommandBufferBeginInfo = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + eRenderPassBeginInfo = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + eBufferMemoryBarrier = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, + eImageMemoryBarrier = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + eMemoryBarrier = VK_STRUCTURE_TYPE_MEMORY_BARRIER, + eLoaderInstanceCreateInfo = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO, + eLoaderDeviceCreateInfo = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + ePhysicalDeviceSubgroupProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES, + eBindBufferMemoryInfo = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + eBindImageMemoryInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + ePhysicalDevice16BitStorageFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + eMemoryDedicatedRequirements = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + eMemoryDedicatedAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + eMemoryAllocateFlagsInfo = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + eDeviceGroupRenderPassBeginInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + eDeviceGroupCommandBufferBeginInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + eDeviceGroupSubmitInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + eDeviceGroupBindSparseInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + eBindBufferMemoryDeviceGroupInfo = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + eBindImageMemoryDeviceGroupInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + ePhysicalDeviceGroupProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + eDeviceGroupDeviceCreateInfo = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + eBufferMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + eImageMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + eImageSparseMemoryRequirementsInfo2 = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + eMemoryRequirements2 = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + eSparseImageMemoryRequirements2 = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + ePhysicalDeviceFeatures2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + ePhysicalDeviceProperties2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + eFormatProperties2 = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + eImageFormatProperties2 = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + ePhysicalDeviceImageFormatInfo2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + eQueueFamilyProperties2 = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + ePhysicalDeviceMemoryProperties2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + eSparseImageFormatProperties2 = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + ePhysicalDeviceSparseImageFormatInfo2 = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + ePhysicalDevicePointClippingProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + eRenderPassInputAttachmentAspectCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + eImageViewUsageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + ePipelineTessellationDomainOriginStateCreateInfo = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + eRenderPassMultiviewCreateInfo = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + ePhysicalDeviceMultiviewFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + ePhysicalDeviceMultiviewProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + ePhysicalDeviceVariablePointersFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + eProtectedSubmitInfo = VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO, + ePhysicalDeviceProtectedMemoryFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES, + ePhysicalDeviceProtectedMemoryProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES, + eDeviceQueueInfo2 = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, + eSamplerYcbcrConversionCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + eSamplerYcbcrConversionInfo = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + eBindImagePlaneMemoryInfo = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + eImagePlaneMemoryRequirementsInfo = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + ePhysicalDeviceSamplerYcbcrConversionFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + eSamplerYcbcrConversionImageFormatProperties = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + eDescriptorUpdateTemplateCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + ePhysicalDeviceExternalImageFormatInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + eExternalImageFormatProperties = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + ePhysicalDeviceExternalBufferInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + eExternalBufferProperties = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + ePhysicalDeviceIdProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + eExternalMemoryBufferCreateInfo = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + eExternalMemoryImageCreateInfo = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + eExportMemoryAllocateInfo = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + ePhysicalDeviceExternalFenceInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + eExternalFenceProperties = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + eExportFenceCreateInfo = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + eExportSemaphoreCreateInfo = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + ePhysicalDeviceExternalSemaphoreInfo = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + eExternalSemaphoreProperties = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + ePhysicalDeviceMaintenance3Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + eDescriptorSetLayoutSupport = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + ePhysicalDeviceShaderDrawParametersFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + ePhysicalDeviceVulkan11Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES, + ePhysicalDeviceVulkan11Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES, + ePhysicalDeviceVulkan12Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES, + ePhysicalDeviceVulkan12Properties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES, + eImageFormatListCreateInfo = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + eAttachmentDescription2 = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + eAttachmentReference2 = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + eSubpassDescription2 = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + eSubpassDependency2 = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + eRenderPassCreateInfo2 = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + eSubpassBeginInfo = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + eSubpassEndInfo = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + ePhysicalDevice8BitStorageFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + ePhysicalDeviceDriverProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + ePhysicalDeviceShaderAtomicInt64Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + ePhysicalDeviceShaderFloat16Int8Features = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + ePhysicalDeviceFloatControlsProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + eDescriptorSetLayoutBindingFlagsCreateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + ePhysicalDeviceDescriptorIndexingFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + ePhysicalDeviceDescriptorIndexingProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + eDescriptorSetVariableDescriptorCountAllocateInfo = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + eDescriptorSetVariableDescriptorCountLayoutSupport = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + ePhysicalDeviceDepthStencilResolveProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + eSubpassDescriptionDepthStencilResolve = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + ePhysicalDeviceScalarBlockLayoutFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + eImageStencilUsageCreateInfo = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + ePhysicalDeviceSamplerFilterMinmaxProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + eSamplerReductionModeCreateInfo = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + ePhysicalDeviceVulkanMemoryModelFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + ePhysicalDeviceImagelessFramebufferFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + eFramebufferAttachmentsCreateInfo = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + eFramebufferAttachmentImageInfo = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + eRenderPassAttachmentBeginInfo = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + ePhysicalDeviceUniformBufferStandardLayoutFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + ePhysicalDeviceShaderSubgroupExtendedTypesFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + ePhysicalDeviceSeparateDepthStencilLayoutsFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + eAttachmentReferenceStencilLayout = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + eAttachmentDescriptionStencilLayout = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + ePhysicalDeviceHostQueryResetFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + ePhysicalDeviceTimelineSemaphoreFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + ePhysicalDeviceTimelineSemaphoreProperties = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + eSemaphoreTypeCreateInfo = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + eTimelineSemaphoreSubmitInfo = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + eSemaphoreWaitInfo = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + eSemaphoreSignalInfo = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + ePhysicalDeviceBufferDeviceAddressFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + eBufferDeviceAddressInfo = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + eBufferOpaqueCaptureAddressCreateInfo = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + eMemoryOpaqueCaptureAddressAllocateInfo = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + eDeviceMemoryOpaqueCaptureAddressInfo = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + eSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + ePresentInfoKHR = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + eDeviceGroupPresentCapabilitiesKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR, + eImageSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR, + eBindImageMemorySwapchainInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR, + eAcquireNextImageInfoKHR = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR, + eDeviceGroupPresentInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR, + eDeviceGroupSwapchainCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR, + eDisplayModeCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR, + eDisplaySurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR, + eDisplayPresentInfoKHR = VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR, + eXlibSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, + eXcbSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, + eWaylandSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, + eAndroidSurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + eWin32SurfaceCreateInfoKHR = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, + eDebugReportCallbackCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + ePipelineRasterizationStateRasterizationOrderAMD = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD, + eDebugMarkerObjectNameInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT, + eDebugMarkerObjectTagInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT, + eDebugMarkerMarkerInfoEXT = VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT, + eDedicatedAllocationImageCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV, + eDedicatedAllocationBufferCreateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV, + eDedicatedAllocationMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV, + ePhysicalDeviceTransformFeedbackFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT, + ePhysicalDeviceTransformFeedbackPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT, + ePipelineRasterizationStateStreamCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT, + eImageViewHandleInfoNVX = VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX, + eImageViewAddressPropertiesNVX = VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX, + eTextureLodGatherFormatPropertiesAMD = VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD, + eStreamDescriptorSurfaceCreateInfoGGP = VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP, + ePhysicalDeviceCornerSampledImageFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV, + eExternalMemoryImageCreateInfoNV = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV, + eExportMemoryAllocateInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV, + eImportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV, + eExportMemoryWin32HandleInfoNV = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV, + eWin32KeyedMutexAcquireReleaseInfoNV = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV, + eValidationFlagsEXT = VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT, + eViSurfaceCreateInfoNN = VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN, + ePhysicalDeviceTextureCompressionAstcHdrFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT, + eImageViewAstcDecodeModeEXT = VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT, + ePhysicalDeviceAstcDecodeFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT, + eImportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + eExportMemoryWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR, + eMemoryWin32HandlePropertiesKHR = VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR, + eMemoryGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, + eImportMemoryFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, + eMemoryFdPropertiesKHR = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR, + eMemoryGetFdInfoKHR = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, + eWin32KeyedMutexAcquireReleaseInfoKHR = VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR, + eImportSemaphoreWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + eExportSemaphoreWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, + eD3D12FenceSubmitInfoKHR = VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR, + eSemaphoreGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, + eImportSemaphoreFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, + eSemaphoreGetFdInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, + ePhysicalDevicePushDescriptorPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, + eCommandBufferInheritanceConditionalRenderingInfoEXT = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT, + ePhysicalDeviceConditionalRenderingFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT, + eConditionalRenderingBeginInfoEXT = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT, + ePresentRegionsKHR = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR, + ePipelineViewportWScalingStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV, + eSurfaceCapabilities2EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + eDisplayPowerInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT, + eDeviceEventInfoEXT = VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT, + eDisplayEventInfoEXT = VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT, + eSwapchainCounterCreateInfoEXT = VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT, + ePresentTimesInfoGOOGLE = VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE, + ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX, + ePipelineViewportSwizzleStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV, + ePhysicalDeviceDiscardRectanglePropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT, + ePipelineDiscardRectangleStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT, + ePhysicalDeviceConservativeRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT, + ePipelineRasterizationConservativeStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT, + ePhysicalDeviceDepthClipEnableFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT, + ePipelineRasterizationDepthClipStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT, + eHdrMetadataEXT = VK_STRUCTURE_TYPE_HDR_METADATA_EXT, + eSharedPresentSurfaceCapabilitiesKHR = VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR, + eImportFenceWin32HandleInfoKHR = VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, + eExportFenceWin32HandleInfoKHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR, + eFenceGetWin32HandleInfoKHR = VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, + eImportFenceFdInfoKHR = VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, + eFenceGetFdInfoKHR = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, + ePhysicalDevicePerformanceQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR, + ePhysicalDevicePerformanceQueryPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR, + eQueryPoolPerformanceCreateInfoKHR = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR, + ePerformanceQuerySubmitInfoKHR = VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR, + eAcquireProfilingLockInfoKHR = VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR, + ePerformanceCounterKHR = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR, + ePerformanceCounterDescriptionKHR = VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR, + ePhysicalDeviceSurfaceInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR, + eSurfaceCapabilities2KHR = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR, + eSurfaceFormat2KHR = VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR, + eDisplayProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR, + eDisplayPlaneProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR, + eDisplayModeProperties2KHR = VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR, + eDisplayPlaneInfo2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR, + eDisplayPlaneCapabilities2KHR = VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR, + eIosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK, + eMacosSurfaceCreateInfoMVK = VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, + eDebugUtilsObjectNameInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + eDebugUtilsObjectTagInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT, + eDebugUtilsLabelEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT, + eDebugUtilsMessengerCallbackDataEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT, + eDebugUtilsMessengerCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + eAndroidHardwareBufferUsageANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID, + eAndroidHardwareBufferPropertiesANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID, + eAndroidHardwareBufferFormatPropertiesANDROID = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID, + eImportAndroidHardwareBufferInfoANDROID = VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + eMemoryGetAndroidHardwareBufferInfoANDROID = VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID, + eExternalFormatANDROID = VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID, + ePhysicalDeviceInlineUniformBlockFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT, + ePhysicalDeviceInlineUniformBlockPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT, + eWriteDescriptorSetInlineUniformBlockEXT = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT, + eDescriptorPoolInlineUniformBlockCreateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT, + eSampleLocationsInfoEXT = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT, + eRenderPassSampleLocationsBeginInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT, + ePipelineSampleLocationsStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT, + ePhysicalDeviceSampleLocationsPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT, + eMultisamplePropertiesEXT = VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT, + ePhysicalDeviceBlendOperationAdvancedFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT, + ePhysicalDeviceBlendOperationAdvancedPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT, + ePipelineColorBlendAdvancedStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT, + ePipelineCoverageToColorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV, + eWriteDescriptorSetAccelerationStructureKHR = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, + eAccelerationStructureBuildGeometryInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, + eAccelerationStructureDeviceAddressInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR, + eAccelerationStructureGeometryAabbsDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, + eAccelerationStructureGeometryInstancesDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, + eAccelerationStructureGeometryTrianglesDataKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, + eAccelerationStructureGeometryKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR, + eAccelerationStructureVersionInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR, + eCopyAccelerationStructureInfoKHR = VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR, + eCopyAccelerationStructureToMemoryInfoKHR = VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR, + eCopyMemoryToAccelerationStructureInfoKHR = VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR, + ePhysicalDeviceAccelerationStructureFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR, + ePhysicalDeviceAccelerationStructurePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR, + eAccelerationStructureCreateInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR, + eAccelerationStructureBuildSizesInfoKHR = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR, + ePhysicalDeviceRayTracingPipelineFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR, + ePhysicalDeviceRayTracingPipelinePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR, + eRayTracingPipelineCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR, + eRayTracingShaderGroupCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR, + eRayTracingPipelineInterfaceCreateInfoKHR = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR, + ePhysicalDeviceRayQueryFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR, + ePipelineCoverageModulationStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV, + ePhysicalDeviceShaderSmBuiltinsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV, + ePhysicalDeviceShaderSmBuiltinsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV, + eDrmFormatModifierPropertiesListEXT = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, + ePhysicalDeviceImageDrmFormatModifierInfoEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT, + eImageDrmFormatModifierListCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT, + eImageDrmFormatModifierExplicitCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT, + eImageDrmFormatModifierPropertiesEXT = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT, + eValidationCacheCreateInfoEXT = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT, + eShaderModuleValidationCacheCreateInfoEXT = VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT, + ePhysicalDevicePortabilitySubsetFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR, + ePhysicalDevicePortabilitySubsetPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR, + ePipelineViewportShadingRateImageStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV, + ePhysicalDeviceShadingRateImageFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV, + ePhysicalDeviceShadingRateImagePropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV, + ePipelineViewportCoarseSampleOrderStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV, + eRayTracingPipelineCreateInfoNV = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, + eAccelerationStructureCreateInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV, + eGeometryNV = VK_STRUCTURE_TYPE_GEOMETRY_NV, + eGeometryTrianglesNV = VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV, + eGeometryAabbNV = VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV, + eBindAccelerationStructureMemoryInfoNV = VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV, + eWriteDescriptorSetAccelerationStructureNV = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV, + eAccelerationStructureMemoryRequirementsInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV, + ePhysicalDeviceRayTracingPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV, + eRayTracingShaderGroupCreateInfoNV = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV, + eAccelerationStructureInfoNV = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV, + ePhysicalDeviceRepresentativeFragmentTestFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV, + ePipelineRepresentativeFragmentTestStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV, + ePhysicalDeviceImageViewImageFormatInfoEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT, + eFilterCubicImageViewImageFormatPropertiesEXT = VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT, + eDeviceQueueGlobalPriorityCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT, + eImportMemoryHostPointerInfoEXT = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT, + eMemoryHostPointerPropertiesEXT = VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT, + ePhysicalDeviceExternalMemoryHostPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT, + ePhysicalDeviceShaderClockFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR, + ePipelineCompilerControlCreateInfoAMD = VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD, + eCalibratedTimestampInfoEXT = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT, + ePhysicalDeviceShaderCorePropertiesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD, + eDeviceMemoryOverallocationCreateInfoAMD = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD, + ePhysicalDeviceVertexAttributeDivisorPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT, + ePipelineVertexInputDivisorStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT, + ePhysicalDeviceVertexAttributeDivisorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT, + ePresentFrameTokenGGP = VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP, + ePipelineCreationFeedbackCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT, + ePhysicalDeviceComputeShaderDerivativesFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV, + ePhysicalDeviceMeshShaderFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV, + ePhysicalDeviceMeshShaderPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV, + ePhysicalDeviceFragmentShaderBarycentricFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV, + ePhysicalDeviceShaderImageFootprintFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV, + ePipelineViewportExclusiveScissorStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV, + ePhysicalDeviceExclusiveScissorFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV, + eCheckpointDataNV = VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV, + eQueueFamilyCheckpointPropertiesNV = VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV, + ePhysicalDeviceShaderIntegerFunctions2FeaturesINTEL = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL, + eQueryPoolPerformanceQueryCreateInfoINTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, + eInitializePerformanceApiInfoINTEL = VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL, + ePerformanceMarkerInfoINTEL = VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL, + ePerformanceStreamMarkerInfoINTEL = VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL, + ePerformanceOverrideInfoINTEL = VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL, + ePerformanceConfigurationAcquireInfoINTEL = VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL, + ePhysicalDevicePciBusInfoPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT, + eDisplayNativeHdrSurfaceCapabilitiesAMD = VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD, + eSwapchainDisplayNativeHdrCreateInfoAMD = VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD, + eImagepipeSurfaceCreateInfoFUCHSIA = VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA, + ePhysicalDeviceShaderTerminateInvocationFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR, + eMetalSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, + ePhysicalDeviceFragmentDensityMapFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT, + ePhysicalDeviceFragmentDensityMapPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT, + eRenderPassFragmentDensityMapCreateInfoEXT = VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT, + ePhysicalDeviceSubgroupSizeControlPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT, + ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, + ePhysicalDeviceSubgroupSizeControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT, + eFragmentShadingRateAttachmentInfoKHR = VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR, + ePipelineFragmentShadingRateStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR, + ePhysicalDeviceFragmentShadingRatePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR, + ePhysicalDeviceFragmentShadingRateFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR, + ePhysicalDeviceFragmentShadingRateKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR, + ePhysicalDeviceShaderCoreProperties2AMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD, + ePhysicalDeviceCoherentMemoryFeaturesAMD = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD, + ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT, + ePhysicalDeviceMemoryBudgetPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT, + ePhysicalDeviceMemoryPriorityFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT, + eMemoryPriorityAllocateInfoEXT = VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT, + eSurfaceProtectedCapabilitiesKHR = VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR, + ePhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV, + ePhysicalDeviceBufferDeviceAddressFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + eBufferDeviceAddressCreateInfoEXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT, + ePhysicalDeviceToolPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT, + eValidationFeaturesEXT = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT, + ePhysicalDeviceCooperativeMatrixFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV, + eCooperativeMatrixPropertiesNV = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV, + ePhysicalDeviceCooperativeMatrixPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV, + ePhysicalDeviceCoverageReductionModeFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV, + ePipelineCoverageReductionStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV, + eFramebufferMixedSamplesCombinationNV = VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV, + ePhysicalDeviceFragmentShaderInterlockFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT, + ePhysicalDeviceYcbcrImageArraysFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT, + eSurfaceFullScreenExclusiveInfoEXT = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT, + eSurfaceCapabilitiesFullScreenExclusiveEXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT, + eSurfaceFullScreenExclusiveWin32InfoEXT = VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT, + eHeadlessSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT, + ePhysicalDeviceLineRasterizationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT, + ePipelineRasterizationLineStateCreateInfoEXT = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT, + ePhysicalDeviceLineRasterizationPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT, + ePhysicalDeviceShaderAtomicFloatFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT, + ePhysicalDeviceIndexTypeUint8FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT, + ePhysicalDeviceExtendedDynamicStateFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT, + ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR, + ePipelineInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR, + ePipelineExecutablePropertiesKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR, + ePipelineExecutableInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR, + ePipelineExecutableStatisticKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR, + ePipelineExecutableInternalRepresentationKHR = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR, + ePhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT, + ePhysicalDeviceDeviceGeneratedCommandsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV, + eGraphicsShaderGroupCreateInfoNV = VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV, + eGraphicsPipelineShaderGroupsCreateInfoNV = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV, + eIndirectCommandsLayoutTokenNV = VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV, + eIndirectCommandsLayoutCreateInfoNV = VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV, + eGeneratedCommandsInfoNV = VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV, + eGeneratedCommandsMemoryRequirementsInfoNV = VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV, + ePhysicalDeviceDeviceGeneratedCommandsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV, + ePhysicalDeviceTexelBufferAlignmentFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT, + ePhysicalDeviceTexelBufferAlignmentPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT, + eCommandBufferInheritanceRenderPassTransformInfoQCOM = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM, + eRenderPassTransformBeginInfoQCOM = VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM, + ePhysicalDeviceDeviceMemoryReportFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT, + eDeviceDeviceMemoryReportCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT, + eDeviceMemoryReportCallbackDataEXT = VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, + ePhysicalDeviceRobustness2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT, + ePhysicalDeviceRobustness2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT, + eSamplerCustomBorderColorCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT, + ePhysicalDeviceCustomBorderColorPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT, + ePhysicalDeviceCustomBorderColorFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT, + ePipelineLibraryCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR, + ePhysicalDevicePrivateDataFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT, + eDevicePrivateDataCreateInfoEXT = VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT, + ePrivateDataSlotCreateInfoEXT = VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT, + ePhysicalDevicePipelineCreationCacheControlFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT, + ePhysicalDeviceDiagnosticsConfigFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV, + eDeviceDiagnosticsConfigCreateInfoNV = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV, + ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV, + ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV, + ePipelineFragmentShadingRateEnumStateCreateInfoNV = VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV, + ePhysicalDeviceFragmentDensityMap2FeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT, + ePhysicalDeviceFragmentDensityMap2PropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT, + eCopyCommandTransformInfoQCOM = VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM, + ePhysicalDeviceImageRobustnessFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT, + eCopyBufferInfo2KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR, + eCopyImageInfo2KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR, + eCopyBufferToImageInfo2KHR = VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR, + eCopyImageToBufferInfo2KHR = VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR, + eBlitImageInfo2KHR = VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR, + eResolveImageInfo2KHR = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR, + eBufferCopy2KHR = VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR, + eImageCopy2KHR = VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR, + eImageBlit2KHR = VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR, + eBufferImageCopy2KHR = VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR, + eImageResolve2KHR = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR, + ePhysicalDevice4444FormatsFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT, + eDirectfbSurfaceCreateInfoEXT = VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT, + eAttachmentDescription2KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR, + eAttachmentDescriptionStencilLayoutKHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR, + eAttachmentReference2KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR, + eAttachmentReferenceStencilLayoutKHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR, + eBindBufferMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR, + eBindBufferMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, + eBindImageMemoryDeviceGroupInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR, + eBindImageMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, + eBindImagePlaneMemoryInfoKHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR, + eBufferDeviceAddressInfoEXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT, + eBufferDeviceAddressInfoKHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR, + eBufferMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, + eBufferOpaqueCaptureAddressCreateInfoKHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR, + eDebugReportCreateInfoEXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT, + eDescriptorSetLayoutBindingFlagsCreateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT, + eDescriptorSetLayoutSupportKHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR, + eDescriptorSetVariableDescriptorCountAllocateInfoEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT, + eDescriptorSetVariableDescriptorCountLayoutSupportEXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT, + eDescriptorUpdateTemplateCreateInfoKHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR, + eDeviceGroupBindSparseInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR, + eDeviceGroupCommandBufferBeginInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR, + eDeviceGroupDeviceCreateInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR, + eDeviceGroupRenderPassBeginInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR, + eDeviceGroupSubmitInfoKHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR, + eDeviceMemoryOpaqueCaptureAddressInfoKHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR, + eExportFenceCreateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, + eExportMemoryAllocateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, + eExportSemaphoreCreateInfoKHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, + eExternalBufferPropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, + eExternalFencePropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, + eExternalImageFormatPropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR, + eExternalMemoryBufferCreateInfoKHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, + eExternalMemoryImageCreateInfoKHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR, + eExternalSemaphorePropertiesKHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, + eFormatProperties2KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR, + eFramebufferAttachmentsCreateInfoKHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR, + eFramebufferAttachmentImageInfoKHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR, + eImageFormatListCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR, + eImageFormatProperties2KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR, + eImageMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, + eImagePlaneMemoryRequirementsInfoKHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR, + eImageSparseMemoryRequirementsInfo2KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR, + eImageStencilUsageCreateInfoEXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT, + eImageViewUsageCreateInfoKHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR, + eMemoryAllocateFlagsInfoKHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR, + eMemoryDedicatedAllocateInfoKHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, + eMemoryDedicatedRequirementsKHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR, + eMemoryOpaqueCaptureAddressAllocateInfoKHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR, + eMemoryRequirements2KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR, + ePhysicalDevice16BitStorageFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR, + ePhysicalDevice8BitStorageFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR, + ePhysicalDeviceBufferAddressFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT, + ePhysicalDeviceBufferDeviceAddressFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR, + ePhysicalDeviceDepthStencilResolvePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR, + ePhysicalDeviceDescriptorIndexingFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT, + ePhysicalDeviceDescriptorIndexingPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT, + ePhysicalDeviceDriverPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR, + ePhysicalDeviceExternalBufferInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, + ePhysicalDeviceExternalFenceInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, + ePhysicalDeviceExternalImageFormatInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR, + ePhysicalDeviceExternalSemaphoreInfoKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, + ePhysicalDeviceFeatures2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR, + ePhysicalDeviceFloat16Int8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR, + ePhysicalDeviceFloatControlsPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR, + ePhysicalDeviceGroupPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR, + ePhysicalDeviceHostQueryResetFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT, + ePhysicalDeviceIdPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR, + ePhysicalDeviceImagelessFramebufferFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR, + ePhysicalDeviceImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR, + ePhysicalDeviceMaintenance3PropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR, + ePhysicalDeviceMemoryProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR, + ePhysicalDeviceMultiviewFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR, + ePhysicalDeviceMultiviewPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR, + ePhysicalDevicePointClippingPropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR, + ePhysicalDeviceProperties2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR, + ePhysicalDeviceSamplerFilterMinmaxPropertiesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT, + ePhysicalDeviceSamplerYcbcrConversionFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR, + ePhysicalDeviceScalarBlockLayoutFeaturesEXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT, + ePhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR, + ePhysicalDeviceShaderAtomicInt64FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR, + ePhysicalDeviceShaderDrawParameterFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES, + ePhysicalDeviceShaderFloat16Int8FeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR, + ePhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR, + ePhysicalDeviceSparseImageFormatInfo2KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR, + ePhysicalDeviceTimelineSemaphoreFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR, + ePhysicalDeviceTimelineSemaphorePropertiesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR, + ePhysicalDeviceUniformBufferStandardLayoutFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR, + ePhysicalDeviceVariablePointersFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, + ePhysicalDeviceVariablePointerFeatures = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + ePhysicalDeviceVariablePointerFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR, + ePhysicalDeviceVulkanMemoryModelFeaturesKHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR, + ePipelineTessellationDomainOriginStateCreateInfoKHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR, + eQueryPoolCreateInfoINTEL = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL, + eQueueFamilyProperties2KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR, + eRenderPassAttachmentBeginInfoKHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR, + eRenderPassCreateInfo2KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR, + eRenderPassInputAttachmentAspectCreateInfoKHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR, + eRenderPassMultiviewCreateInfoKHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR, + eSamplerReductionModeCreateInfoEXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT, + eSamplerYcbcrConversionCreateInfoKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR, + eSamplerYcbcrConversionImageFormatPropertiesKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR, + eSamplerYcbcrConversionInfoKHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR, + eSemaphoreSignalInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR, + eSemaphoreTypeCreateInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR, + eSemaphoreWaitInfoKHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR, + eSparseImageFormatProperties2KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR, + eSparseImageMemoryRequirements2KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR, + eSubpassBeginInfoKHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR, + eSubpassDependency2KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR, + eSubpassDescription2KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR, + eSubpassDescriptionDepthStencilResolveKHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR, + eSubpassEndInfoKHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR, + eTimelineSemaphoreSubmitInfoKHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( StructureType value ) + { + switch ( value ) + { + case StructureType::eApplicationInfo : return "ApplicationInfo"; + case StructureType::eInstanceCreateInfo : return "InstanceCreateInfo"; + case StructureType::eDeviceQueueCreateInfo : return "DeviceQueueCreateInfo"; + case StructureType::eDeviceCreateInfo : return "DeviceCreateInfo"; + case StructureType::eSubmitInfo : return "SubmitInfo"; + case StructureType::eMemoryAllocateInfo : return "MemoryAllocateInfo"; + case StructureType::eMappedMemoryRange : return "MappedMemoryRange"; + case StructureType::eBindSparseInfo : return "BindSparseInfo"; + case StructureType::eFenceCreateInfo : return "FenceCreateInfo"; + case StructureType::eSemaphoreCreateInfo : return "SemaphoreCreateInfo"; + case StructureType::eEventCreateInfo : return "EventCreateInfo"; + case StructureType::eQueryPoolCreateInfo : return "QueryPoolCreateInfo"; + case StructureType::eBufferCreateInfo : return "BufferCreateInfo"; + case StructureType::eBufferViewCreateInfo : return "BufferViewCreateInfo"; + case StructureType::eImageCreateInfo : return "ImageCreateInfo"; + case StructureType::eImageViewCreateInfo : return "ImageViewCreateInfo"; + case StructureType::eShaderModuleCreateInfo : return "ShaderModuleCreateInfo"; + case StructureType::ePipelineCacheCreateInfo : return "PipelineCacheCreateInfo"; + case StructureType::ePipelineShaderStageCreateInfo : return "PipelineShaderStageCreateInfo"; + case StructureType::ePipelineVertexInputStateCreateInfo : return "PipelineVertexInputStateCreateInfo"; + case StructureType::ePipelineInputAssemblyStateCreateInfo : return "PipelineInputAssemblyStateCreateInfo"; + case StructureType::ePipelineTessellationStateCreateInfo : return "PipelineTessellationStateCreateInfo"; + case StructureType::ePipelineViewportStateCreateInfo : return "PipelineViewportStateCreateInfo"; + case StructureType::ePipelineRasterizationStateCreateInfo : return "PipelineRasterizationStateCreateInfo"; + case StructureType::ePipelineMultisampleStateCreateInfo : return "PipelineMultisampleStateCreateInfo"; + case StructureType::ePipelineDepthStencilStateCreateInfo : return "PipelineDepthStencilStateCreateInfo"; + case StructureType::ePipelineColorBlendStateCreateInfo : return "PipelineColorBlendStateCreateInfo"; + case StructureType::ePipelineDynamicStateCreateInfo : return "PipelineDynamicStateCreateInfo"; + case StructureType::eGraphicsPipelineCreateInfo : return "GraphicsPipelineCreateInfo"; + case StructureType::eComputePipelineCreateInfo : return "ComputePipelineCreateInfo"; + case StructureType::ePipelineLayoutCreateInfo : return "PipelineLayoutCreateInfo"; + case StructureType::eSamplerCreateInfo : return "SamplerCreateInfo"; + case StructureType::eDescriptorSetLayoutCreateInfo : return "DescriptorSetLayoutCreateInfo"; + case StructureType::eDescriptorPoolCreateInfo : return "DescriptorPoolCreateInfo"; + case StructureType::eDescriptorSetAllocateInfo : return "DescriptorSetAllocateInfo"; + case StructureType::eWriteDescriptorSet : return "WriteDescriptorSet"; + case StructureType::eCopyDescriptorSet : return "CopyDescriptorSet"; + case StructureType::eFramebufferCreateInfo : return "FramebufferCreateInfo"; + case StructureType::eRenderPassCreateInfo : return "RenderPassCreateInfo"; + case StructureType::eCommandPoolCreateInfo : return "CommandPoolCreateInfo"; + case StructureType::eCommandBufferAllocateInfo : return "CommandBufferAllocateInfo"; + case StructureType::eCommandBufferInheritanceInfo : return "CommandBufferInheritanceInfo"; + case StructureType::eCommandBufferBeginInfo : return "CommandBufferBeginInfo"; + case StructureType::eRenderPassBeginInfo : return "RenderPassBeginInfo"; + case StructureType::eBufferMemoryBarrier : return "BufferMemoryBarrier"; + case StructureType::eImageMemoryBarrier : return "ImageMemoryBarrier"; + case StructureType::eMemoryBarrier : return "MemoryBarrier"; + case StructureType::eLoaderInstanceCreateInfo : return "LoaderInstanceCreateInfo"; + case StructureType::eLoaderDeviceCreateInfo : return "LoaderDeviceCreateInfo"; + case StructureType::ePhysicalDeviceSubgroupProperties : return "PhysicalDeviceSubgroupProperties"; + case StructureType::eBindBufferMemoryInfo : return "BindBufferMemoryInfo"; + case StructureType::eBindImageMemoryInfo : return "BindImageMemoryInfo"; + case StructureType::ePhysicalDevice16BitStorageFeatures : return "PhysicalDevice16BitStorageFeatures"; + case StructureType::eMemoryDedicatedRequirements : return "MemoryDedicatedRequirements"; + case StructureType::eMemoryDedicatedAllocateInfo : return "MemoryDedicatedAllocateInfo"; + case StructureType::eMemoryAllocateFlagsInfo : return "MemoryAllocateFlagsInfo"; + case StructureType::eDeviceGroupRenderPassBeginInfo : return "DeviceGroupRenderPassBeginInfo"; + case StructureType::eDeviceGroupCommandBufferBeginInfo : return "DeviceGroupCommandBufferBeginInfo"; + case StructureType::eDeviceGroupSubmitInfo : return "DeviceGroupSubmitInfo"; + case StructureType::eDeviceGroupBindSparseInfo : return "DeviceGroupBindSparseInfo"; + case StructureType::eBindBufferMemoryDeviceGroupInfo : return "BindBufferMemoryDeviceGroupInfo"; + case StructureType::eBindImageMemoryDeviceGroupInfo : return "BindImageMemoryDeviceGroupInfo"; + case StructureType::ePhysicalDeviceGroupProperties : return "PhysicalDeviceGroupProperties"; + case StructureType::eDeviceGroupDeviceCreateInfo : return "DeviceGroupDeviceCreateInfo"; + case StructureType::eBufferMemoryRequirementsInfo2 : return "BufferMemoryRequirementsInfo2"; + case StructureType::eImageMemoryRequirementsInfo2 : return "ImageMemoryRequirementsInfo2"; + case StructureType::eImageSparseMemoryRequirementsInfo2 : return "ImageSparseMemoryRequirementsInfo2"; + case StructureType::eMemoryRequirements2 : return "MemoryRequirements2"; + case StructureType::eSparseImageMemoryRequirements2 : return "SparseImageMemoryRequirements2"; + case StructureType::ePhysicalDeviceFeatures2 : return "PhysicalDeviceFeatures2"; + case StructureType::ePhysicalDeviceProperties2 : return "PhysicalDeviceProperties2"; + case StructureType::eFormatProperties2 : return "FormatProperties2"; + case StructureType::eImageFormatProperties2 : return "ImageFormatProperties2"; + case StructureType::ePhysicalDeviceImageFormatInfo2 : return "PhysicalDeviceImageFormatInfo2"; + case StructureType::eQueueFamilyProperties2 : return "QueueFamilyProperties2"; + case StructureType::ePhysicalDeviceMemoryProperties2 : return "PhysicalDeviceMemoryProperties2"; + case StructureType::eSparseImageFormatProperties2 : return "SparseImageFormatProperties2"; + case StructureType::ePhysicalDeviceSparseImageFormatInfo2 : return "PhysicalDeviceSparseImageFormatInfo2"; + case StructureType::ePhysicalDevicePointClippingProperties : return "PhysicalDevicePointClippingProperties"; + case StructureType::eRenderPassInputAttachmentAspectCreateInfo : return "RenderPassInputAttachmentAspectCreateInfo"; + case StructureType::eImageViewUsageCreateInfo : return "ImageViewUsageCreateInfo"; + case StructureType::ePipelineTessellationDomainOriginStateCreateInfo : return "PipelineTessellationDomainOriginStateCreateInfo"; + case StructureType::eRenderPassMultiviewCreateInfo : return "RenderPassMultiviewCreateInfo"; + case StructureType::ePhysicalDeviceMultiviewFeatures : return "PhysicalDeviceMultiviewFeatures"; + case StructureType::ePhysicalDeviceMultiviewProperties : return "PhysicalDeviceMultiviewProperties"; + case StructureType::ePhysicalDeviceVariablePointersFeatures : return "PhysicalDeviceVariablePointersFeatures"; + case StructureType::eProtectedSubmitInfo : return "ProtectedSubmitInfo"; + case StructureType::ePhysicalDeviceProtectedMemoryFeatures : return "PhysicalDeviceProtectedMemoryFeatures"; + case StructureType::ePhysicalDeviceProtectedMemoryProperties : return "PhysicalDeviceProtectedMemoryProperties"; + case StructureType::eDeviceQueueInfo2 : return "DeviceQueueInfo2"; + case StructureType::eSamplerYcbcrConversionCreateInfo : return "SamplerYcbcrConversionCreateInfo"; + case StructureType::eSamplerYcbcrConversionInfo : return "SamplerYcbcrConversionInfo"; + case StructureType::eBindImagePlaneMemoryInfo : return "BindImagePlaneMemoryInfo"; + case StructureType::eImagePlaneMemoryRequirementsInfo : return "ImagePlaneMemoryRequirementsInfo"; + case StructureType::ePhysicalDeviceSamplerYcbcrConversionFeatures : return "PhysicalDeviceSamplerYcbcrConversionFeatures"; + case StructureType::eSamplerYcbcrConversionImageFormatProperties : return "SamplerYcbcrConversionImageFormatProperties"; + case StructureType::eDescriptorUpdateTemplateCreateInfo : return "DescriptorUpdateTemplateCreateInfo"; + case StructureType::ePhysicalDeviceExternalImageFormatInfo : return "PhysicalDeviceExternalImageFormatInfo"; + case StructureType::eExternalImageFormatProperties : return "ExternalImageFormatProperties"; + case StructureType::ePhysicalDeviceExternalBufferInfo : return "PhysicalDeviceExternalBufferInfo"; + case StructureType::eExternalBufferProperties : return "ExternalBufferProperties"; + case StructureType::ePhysicalDeviceIdProperties : return "PhysicalDeviceIdProperties"; + case StructureType::eExternalMemoryBufferCreateInfo : return "ExternalMemoryBufferCreateInfo"; + case StructureType::eExternalMemoryImageCreateInfo : return "ExternalMemoryImageCreateInfo"; + case StructureType::eExportMemoryAllocateInfo : return "ExportMemoryAllocateInfo"; + case StructureType::ePhysicalDeviceExternalFenceInfo : return "PhysicalDeviceExternalFenceInfo"; + case StructureType::eExternalFenceProperties : return "ExternalFenceProperties"; + case StructureType::eExportFenceCreateInfo : return "ExportFenceCreateInfo"; + case StructureType::eExportSemaphoreCreateInfo : return "ExportSemaphoreCreateInfo"; + case StructureType::ePhysicalDeviceExternalSemaphoreInfo : return "PhysicalDeviceExternalSemaphoreInfo"; + case StructureType::eExternalSemaphoreProperties : return "ExternalSemaphoreProperties"; + case StructureType::ePhysicalDeviceMaintenance3Properties : return "PhysicalDeviceMaintenance3Properties"; + case StructureType::eDescriptorSetLayoutSupport : return "DescriptorSetLayoutSupport"; + case StructureType::ePhysicalDeviceShaderDrawParametersFeatures : return "PhysicalDeviceShaderDrawParametersFeatures"; + case StructureType::ePhysicalDeviceVulkan11Features : return "PhysicalDeviceVulkan11Features"; + case StructureType::ePhysicalDeviceVulkan11Properties : return "PhysicalDeviceVulkan11Properties"; + case StructureType::ePhysicalDeviceVulkan12Features : return "PhysicalDeviceVulkan12Features"; + case StructureType::ePhysicalDeviceVulkan12Properties : return "PhysicalDeviceVulkan12Properties"; + case StructureType::eImageFormatListCreateInfo : return "ImageFormatListCreateInfo"; + case StructureType::eAttachmentDescription2 : return "AttachmentDescription2"; + case StructureType::eAttachmentReference2 : return "AttachmentReference2"; + case StructureType::eSubpassDescription2 : return "SubpassDescription2"; + case StructureType::eSubpassDependency2 : return "SubpassDependency2"; + case StructureType::eRenderPassCreateInfo2 : return "RenderPassCreateInfo2"; + case StructureType::eSubpassBeginInfo : return "SubpassBeginInfo"; + case StructureType::eSubpassEndInfo : return "SubpassEndInfo"; + case StructureType::ePhysicalDevice8BitStorageFeatures : return "PhysicalDevice8BitStorageFeatures"; + case StructureType::ePhysicalDeviceDriverProperties : return "PhysicalDeviceDriverProperties"; + case StructureType::ePhysicalDeviceShaderAtomicInt64Features : return "PhysicalDeviceShaderAtomicInt64Features"; + case StructureType::ePhysicalDeviceShaderFloat16Int8Features : return "PhysicalDeviceShaderFloat16Int8Features"; + case StructureType::ePhysicalDeviceFloatControlsProperties : return "PhysicalDeviceFloatControlsProperties"; + case StructureType::eDescriptorSetLayoutBindingFlagsCreateInfo : return "DescriptorSetLayoutBindingFlagsCreateInfo"; + case StructureType::ePhysicalDeviceDescriptorIndexingFeatures : return "PhysicalDeviceDescriptorIndexingFeatures"; + case StructureType::ePhysicalDeviceDescriptorIndexingProperties : return "PhysicalDeviceDescriptorIndexingProperties"; + case StructureType::eDescriptorSetVariableDescriptorCountAllocateInfo : return "DescriptorSetVariableDescriptorCountAllocateInfo"; + case StructureType::eDescriptorSetVariableDescriptorCountLayoutSupport : return "DescriptorSetVariableDescriptorCountLayoutSupport"; + case StructureType::ePhysicalDeviceDepthStencilResolveProperties : return "PhysicalDeviceDepthStencilResolveProperties"; + case StructureType::eSubpassDescriptionDepthStencilResolve : return "SubpassDescriptionDepthStencilResolve"; + case StructureType::ePhysicalDeviceScalarBlockLayoutFeatures : return "PhysicalDeviceScalarBlockLayoutFeatures"; + case StructureType::eImageStencilUsageCreateInfo : return "ImageStencilUsageCreateInfo"; + case StructureType::ePhysicalDeviceSamplerFilterMinmaxProperties : return "PhysicalDeviceSamplerFilterMinmaxProperties"; + case StructureType::eSamplerReductionModeCreateInfo : return "SamplerReductionModeCreateInfo"; + case StructureType::ePhysicalDeviceVulkanMemoryModelFeatures : return "PhysicalDeviceVulkanMemoryModelFeatures"; + case StructureType::ePhysicalDeviceImagelessFramebufferFeatures : return "PhysicalDeviceImagelessFramebufferFeatures"; + case StructureType::eFramebufferAttachmentsCreateInfo : return "FramebufferAttachmentsCreateInfo"; + case StructureType::eFramebufferAttachmentImageInfo : return "FramebufferAttachmentImageInfo"; + case StructureType::eRenderPassAttachmentBeginInfo : return "RenderPassAttachmentBeginInfo"; + case StructureType::ePhysicalDeviceUniformBufferStandardLayoutFeatures : return "PhysicalDeviceUniformBufferStandardLayoutFeatures"; + case StructureType::ePhysicalDeviceShaderSubgroupExtendedTypesFeatures : return "PhysicalDeviceShaderSubgroupExtendedTypesFeatures"; + case StructureType::ePhysicalDeviceSeparateDepthStencilLayoutsFeatures : return "PhysicalDeviceSeparateDepthStencilLayoutsFeatures"; + case StructureType::eAttachmentReferenceStencilLayout : return "AttachmentReferenceStencilLayout"; + case StructureType::eAttachmentDescriptionStencilLayout : return "AttachmentDescriptionStencilLayout"; + case StructureType::ePhysicalDeviceHostQueryResetFeatures : return "PhysicalDeviceHostQueryResetFeatures"; + case StructureType::ePhysicalDeviceTimelineSemaphoreFeatures : return "PhysicalDeviceTimelineSemaphoreFeatures"; + case StructureType::ePhysicalDeviceTimelineSemaphoreProperties : return "PhysicalDeviceTimelineSemaphoreProperties"; + case StructureType::eSemaphoreTypeCreateInfo : return "SemaphoreTypeCreateInfo"; + case StructureType::eTimelineSemaphoreSubmitInfo : return "TimelineSemaphoreSubmitInfo"; + case StructureType::eSemaphoreWaitInfo : return "SemaphoreWaitInfo"; + case StructureType::eSemaphoreSignalInfo : return "SemaphoreSignalInfo"; + case StructureType::ePhysicalDeviceBufferDeviceAddressFeatures : return "PhysicalDeviceBufferDeviceAddressFeatures"; + case StructureType::eBufferDeviceAddressInfo : return "BufferDeviceAddressInfo"; + case StructureType::eBufferOpaqueCaptureAddressCreateInfo : return "BufferOpaqueCaptureAddressCreateInfo"; + case StructureType::eMemoryOpaqueCaptureAddressAllocateInfo : return "MemoryOpaqueCaptureAddressAllocateInfo"; + case StructureType::eDeviceMemoryOpaqueCaptureAddressInfo : return "DeviceMemoryOpaqueCaptureAddressInfo"; + case StructureType::eSwapchainCreateInfoKHR : return "SwapchainCreateInfoKHR"; + case StructureType::ePresentInfoKHR : return "PresentInfoKHR"; + case StructureType::eDeviceGroupPresentCapabilitiesKHR : return "DeviceGroupPresentCapabilitiesKHR"; + case StructureType::eImageSwapchainCreateInfoKHR : return "ImageSwapchainCreateInfoKHR"; + case StructureType::eBindImageMemorySwapchainInfoKHR : return "BindImageMemorySwapchainInfoKHR"; + case StructureType::eAcquireNextImageInfoKHR : return "AcquireNextImageInfoKHR"; + case StructureType::eDeviceGroupPresentInfoKHR : return "DeviceGroupPresentInfoKHR"; + case StructureType::eDeviceGroupSwapchainCreateInfoKHR : return "DeviceGroupSwapchainCreateInfoKHR"; + case StructureType::eDisplayModeCreateInfoKHR : return "DisplayModeCreateInfoKHR"; + case StructureType::eDisplaySurfaceCreateInfoKHR : return "DisplaySurfaceCreateInfoKHR"; + case StructureType::eDisplayPresentInfoKHR : return "DisplayPresentInfoKHR"; + case StructureType::eXlibSurfaceCreateInfoKHR : return "XlibSurfaceCreateInfoKHR"; + case StructureType::eXcbSurfaceCreateInfoKHR : return "XcbSurfaceCreateInfoKHR"; + case StructureType::eWaylandSurfaceCreateInfoKHR : return "WaylandSurfaceCreateInfoKHR"; + case StructureType::eAndroidSurfaceCreateInfoKHR : return "AndroidSurfaceCreateInfoKHR"; + case StructureType::eWin32SurfaceCreateInfoKHR : return "Win32SurfaceCreateInfoKHR"; + case StructureType::eDebugReportCallbackCreateInfoEXT : return "DebugReportCallbackCreateInfoEXT"; + case StructureType::ePipelineRasterizationStateRasterizationOrderAMD : return "PipelineRasterizationStateRasterizationOrderAMD"; + case StructureType::eDebugMarkerObjectNameInfoEXT : return "DebugMarkerObjectNameInfoEXT"; + case StructureType::eDebugMarkerObjectTagInfoEXT : return "DebugMarkerObjectTagInfoEXT"; + case StructureType::eDebugMarkerMarkerInfoEXT : return "DebugMarkerMarkerInfoEXT"; + case StructureType::eDedicatedAllocationImageCreateInfoNV : return "DedicatedAllocationImageCreateInfoNV"; + case StructureType::eDedicatedAllocationBufferCreateInfoNV : return "DedicatedAllocationBufferCreateInfoNV"; + case StructureType::eDedicatedAllocationMemoryAllocateInfoNV : return "DedicatedAllocationMemoryAllocateInfoNV"; + case StructureType::ePhysicalDeviceTransformFeedbackFeaturesEXT : return "PhysicalDeviceTransformFeedbackFeaturesEXT"; + case StructureType::ePhysicalDeviceTransformFeedbackPropertiesEXT : return "PhysicalDeviceTransformFeedbackPropertiesEXT"; + case StructureType::ePipelineRasterizationStateStreamCreateInfoEXT : return "PipelineRasterizationStateStreamCreateInfoEXT"; + case StructureType::eImageViewHandleInfoNVX : return "ImageViewHandleInfoNVX"; + case StructureType::eImageViewAddressPropertiesNVX : return "ImageViewAddressPropertiesNVX"; + case StructureType::eTextureLodGatherFormatPropertiesAMD : return "TextureLodGatherFormatPropertiesAMD"; + case StructureType::eStreamDescriptorSurfaceCreateInfoGGP : return "StreamDescriptorSurfaceCreateInfoGGP"; + case StructureType::ePhysicalDeviceCornerSampledImageFeaturesNV : return "PhysicalDeviceCornerSampledImageFeaturesNV"; + case StructureType::eExternalMemoryImageCreateInfoNV : return "ExternalMemoryImageCreateInfoNV"; + case StructureType::eExportMemoryAllocateInfoNV : return "ExportMemoryAllocateInfoNV"; + case StructureType::eImportMemoryWin32HandleInfoNV : return "ImportMemoryWin32HandleInfoNV"; + case StructureType::eExportMemoryWin32HandleInfoNV : return "ExportMemoryWin32HandleInfoNV"; + case StructureType::eWin32KeyedMutexAcquireReleaseInfoNV : return "Win32KeyedMutexAcquireReleaseInfoNV"; + case StructureType::eValidationFlagsEXT : return "ValidationFlagsEXT"; + case StructureType::eViSurfaceCreateInfoNN : return "ViSurfaceCreateInfoNN"; + case StructureType::ePhysicalDeviceTextureCompressionAstcHdrFeaturesEXT : return "PhysicalDeviceTextureCompressionAstcHdrFeaturesEXT"; + case StructureType::eImageViewAstcDecodeModeEXT : return "ImageViewAstcDecodeModeEXT"; + case StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT : return "PhysicalDeviceAstcDecodeFeaturesEXT"; + case StructureType::eImportMemoryWin32HandleInfoKHR : return "ImportMemoryWin32HandleInfoKHR"; + case StructureType::eExportMemoryWin32HandleInfoKHR : return "ExportMemoryWin32HandleInfoKHR"; + case StructureType::eMemoryWin32HandlePropertiesKHR : return "MemoryWin32HandlePropertiesKHR"; + case StructureType::eMemoryGetWin32HandleInfoKHR : return "MemoryGetWin32HandleInfoKHR"; + case StructureType::eImportMemoryFdInfoKHR : return "ImportMemoryFdInfoKHR"; + case StructureType::eMemoryFdPropertiesKHR : return "MemoryFdPropertiesKHR"; + case StructureType::eMemoryGetFdInfoKHR : return "MemoryGetFdInfoKHR"; + case StructureType::eWin32KeyedMutexAcquireReleaseInfoKHR : return "Win32KeyedMutexAcquireReleaseInfoKHR"; + case StructureType::eImportSemaphoreWin32HandleInfoKHR : return "ImportSemaphoreWin32HandleInfoKHR"; + case StructureType::eExportSemaphoreWin32HandleInfoKHR : return "ExportSemaphoreWin32HandleInfoKHR"; + case StructureType::eD3D12FenceSubmitInfoKHR : return "D3D12FenceSubmitInfoKHR"; + case StructureType::eSemaphoreGetWin32HandleInfoKHR : return "SemaphoreGetWin32HandleInfoKHR"; + case StructureType::eImportSemaphoreFdInfoKHR : return "ImportSemaphoreFdInfoKHR"; + case StructureType::eSemaphoreGetFdInfoKHR : return "SemaphoreGetFdInfoKHR"; + case StructureType::ePhysicalDevicePushDescriptorPropertiesKHR : return "PhysicalDevicePushDescriptorPropertiesKHR"; + case StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT : return "CommandBufferInheritanceConditionalRenderingInfoEXT"; + case StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT : return "PhysicalDeviceConditionalRenderingFeaturesEXT"; + case StructureType::eConditionalRenderingBeginInfoEXT : return "ConditionalRenderingBeginInfoEXT"; + case StructureType::ePresentRegionsKHR : return "PresentRegionsKHR"; + case StructureType::ePipelineViewportWScalingStateCreateInfoNV : return "PipelineViewportWScalingStateCreateInfoNV"; + case StructureType::eSurfaceCapabilities2EXT : return "SurfaceCapabilities2EXT"; + case StructureType::eDisplayPowerInfoEXT : return "DisplayPowerInfoEXT"; + case StructureType::eDeviceEventInfoEXT : return "DeviceEventInfoEXT"; + case StructureType::eDisplayEventInfoEXT : return "DisplayEventInfoEXT"; + case StructureType::eSwapchainCounterCreateInfoEXT : return "SwapchainCounterCreateInfoEXT"; + case StructureType::ePresentTimesInfoGOOGLE : return "PresentTimesInfoGOOGLE"; + case StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX : return "PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX"; + case StructureType::ePipelineViewportSwizzleStateCreateInfoNV : return "PipelineViewportSwizzleStateCreateInfoNV"; + case StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT : return "PhysicalDeviceDiscardRectanglePropertiesEXT"; + case StructureType::ePipelineDiscardRectangleStateCreateInfoEXT : return "PipelineDiscardRectangleStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceConservativeRasterizationPropertiesEXT : return "PhysicalDeviceConservativeRasterizationPropertiesEXT"; + case StructureType::ePipelineRasterizationConservativeStateCreateInfoEXT : return "PipelineRasterizationConservativeStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceDepthClipEnableFeaturesEXT : return "PhysicalDeviceDepthClipEnableFeaturesEXT"; + case StructureType::ePipelineRasterizationDepthClipStateCreateInfoEXT : return "PipelineRasterizationDepthClipStateCreateInfoEXT"; + case StructureType::eHdrMetadataEXT : return "HdrMetadataEXT"; + case StructureType::eSharedPresentSurfaceCapabilitiesKHR : return "SharedPresentSurfaceCapabilitiesKHR"; + case StructureType::eImportFenceWin32HandleInfoKHR : return "ImportFenceWin32HandleInfoKHR"; + case StructureType::eExportFenceWin32HandleInfoKHR : return "ExportFenceWin32HandleInfoKHR"; + case StructureType::eFenceGetWin32HandleInfoKHR : return "FenceGetWin32HandleInfoKHR"; + case StructureType::eImportFenceFdInfoKHR : return "ImportFenceFdInfoKHR"; + case StructureType::eFenceGetFdInfoKHR : return "FenceGetFdInfoKHR"; + case StructureType::ePhysicalDevicePerformanceQueryFeaturesKHR : return "PhysicalDevicePerformanceQueryFeaturesKHR"; + case StructureType::ePhysicalDevicePerformanceQueryPropertiesKHR : return "PhysicalDevicePerformanceQueryPropertiesKHR"; + case StructureType::eQueryPoolPerformanceCreateInfoKHR : return "QueryPoolPerformanceCreateInfoKHR"; + case StructureType::ePerformanceQuerySubmitInfoKHR : return "PerformanceQuerySubmitInfoKHR"; + case StructureType::eAcquireProfilingLockInfoKHR : return "AcquireProfilingLockInfoKHR"; + case StructureType::ePerformanceCounterKHR : return "PerformanceCounterKHR"; + case StructureType::ePerformanceCounterDescriptionKHR : return "PerformanceCounterDescriptionKHR"; + case StructureType::ePhysicalDeviceSurfaceInfo2KHR : return "PhysicalDeviceSurfaceInfo2KHR"; + case StructureType::eSurfaceCapabilities2KHR : return "SurfaceCapabilities2KHR"; + case StructureType::eSurfaceFormat2KHR : return "SurfaceFormat2KHR"; + case StructureType::eDisplayProperties2KHR : return "DisplayProperties2KHR"; + case StructureType::eDisplayPlaneProperties2KHR : return "DisplayPlaneProperties2KHR"; + case StructureType::eDisplayModeProperties2KHR : return "DisplayModeProperties2KHR"; + case StructureType::eDisplayPlaneInfo2KHR : return "DisplayPlaneInfo2KHR"; + case StructureType::eDisplayPlaneCapabilities2KHR : return "DisplayPlaneCapabilities2KHR"; + case StructureType::eIosSurfaceCreateInfoMVK : return "IosSurfaceCreateInfoMVK"; + case StructureType::eMacosSurfaceCreateInfoMVK : return "MacosSurfaceCreateInfoMVK"; + case StructureType::eDebugUtilsObjectNameInfoEXT : return "DebugUtilsObjectNameInfoEXT"; + case StructureType::eDebugUtilsObjectTagInfoEXT : return "DebugUtilsObjectTagInfoEXT"; + case StructureType::eDebugUtilsLabelEXT : return "DebugUtilsLabelEXT"; + case StructureType::eDebugUtilsMessengerCallbackDataEXT : return "DebugUtilsMessengerCallbackDataEXT"; + case StructureType::eDebugUtilsMessengerCreateInfoEXT : return "DebugUtilsMessengerCreateInfoEXT"; + case StructureType::eAndroidHardwareBufferUsageANDROID : return "AndroidHardwareBufferUsageANDROID"; + case StructureType::eAndroidHardwareBufferPropertiesANDROID : return "AndroidHardwareBufferPropertiesANDROID"; + case StructureType::eAndroidHardwareBufferFormatPropertiesANDROID : return "AndroidHardwareBufferFormatPropertiesANDROID"; + case StructureType::eImportAndroidHardwareBufferInfoANDROID : return "ImportAndroidHardwareBufferInfoANDROID"; + case StructureType::eMemoryGetAndroidHardwareBufferInfoANDROID : return "MemoryGetAndroidHardwareBufferInfoANDROID"; + case StructureType::eExternalFormatANDROID : return "ExternalFormatANDROID"; + case StructureType::ePhysicalDeviceInlineUniformBlockFeaturesEXT : return "PhysicalDeviceInlineUniformBlockFeaturesEXT"; + case StructureType::ePhysicalDeviceInlineUniformBlockPropertiesEXT : return "PhysicalDeviceInlineUniformBlockPropertiesEXT"; + case StructureType::eWriteDescriptorSetInlineUniformBlockEXT : return "WriteDescriptorSetInlineUniformBlockEXT"; + case StructureType::eDescriptorPoolInlineUniformBlockCreateInfoEXT : return "DescriptorPoolInlineUniformBlockCreateInfoEXT"; + case StructureType::eSampleLocationsInfoEXT : return "SampleLocationsInfoEXT"; + case StructureType::eRenderPassSampleLocationsBeginInfoEXT : return "RenderPassSampleLocationsBeginInfoEXT"; + case StructureType::ePipelineSampleLocationsStateCreateInfoEXT : return "PipelineSampleLocationsStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceSampleLocationsPropertiesEXT : return "PhysicalDeviceSampleLocationsPropertiesEXT"; + case StructureType::eMultisamplePropertiesEXT : return "MultisamplePropertiesEXT"; + case StructureType::ePhysicalDeviceBlendOperationAdvancedFeaturesEXT : return "PhysicalDeviceBlendOperationAdvancedFeaturesEXT"; + case StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT : return "PhysicalDeviceBlendOperationAdvancedPropertiesEXT"; + case StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT : return "PipelineColorBlendAdvancedStateCreateInfoEXT"; + case StructureType::ePipelineCoverageToColorStateCreateInfoNV : return "PipelineCoverageToColorStateCreateInfoNV"; + case StructureType::eWriteDescriptorSetAccelerationStructureKHR : return "WriteDescriptorSetAccelerationStructureKHR"; + case StructureType::eAccelerationStructureBuildGeometryInfoKHR : return "AccelerationStructureBuildGeometryInfoKHR"; + case StructureType::eAccelerationStructureDeviceAddressInfoKHR : return "AccelerationStructureDeviceAddressInfoKHR"; + case StructureType::eAccelerationStructureGeometryAabbsDataKHR : return "AccelerationStructureGeometryAabbsDataKHR"; + case StructureType::eAccelerationStructureGeometryInstancesDataKHR : return "AccelerationStructureGeometryInstancesDataKHR"; + case StructureType::eAccelerationStructureGeometryTrianglesDataKHR : return "AccelerationStructureGeometryTrianglesDataKHR"; + case StructureType::eAccelerationStructureGeometryKHR : return "AccelerationStructureGeometryKHR"; + case StructureType::eAccelerationStructureVersionInfoKHR : return "AccelerationStructureVersionInfoKHR"; + case StructureType::eCopyAccelerationStructureInfoKHR : return "CopyAccelerationStructureInfoKHR"; + case StructureType::eCopyAccelerationStructureToMemoryInfoKHR : return "CopyAccelerationStructureToMemoryInfoKHR"; + case StructureType::eCopyMemoryToAccelerationStructureInfoKHR : return "CopyMemoryToAccelerationStructureInfoKHR"; + case StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR : return "PhysicalDeviceAccelerationStructureFeaturesKHR"; + case StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR : return "PhysicalDeviceAccelerationStructurePropertiesKHR"; + case StructureType::eAccelerationStructureCreateInfoKHR : return "AccelerationStructureCreateInfoKHR"; + case StructureType::eAccelerationStructureBuildSizesInfoKHR : return "AccelerationStructureBuildSizesInfoKHR"; + case StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR : return "PhysicalDeviceRayTracingPipelineFeaturesKHR"; + case StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR : return "PhysicalDeviceRayTracingPipelinePropertiesKHR"; + case StructureType::eRayTracingPipelineCreateInfoKHR : return "RayTracingPipelineCreateInfoKHR"; + case StructureType::eRayTracingShaderGroupCreateInfoKHR : return "RayTracingShaderGroupCreateInfoKHR"; + case StructureType::eRayTracingPipelineInterfaceCreateInfoKHR : return "RayTracingPipelineInterfaceCreateInfoKHR"; + case StructureType::ePhysicalDeviceRayQueryFeaturesKHR : return "PhysicalDeviceRayQueryFeaturesKHR"; + case StructureType::ePipelineCoverageModulationStateCreateInfoNV : return "PipelineCoverageModulationStateCreateInfoNV"; + case StructureType::ePhysicalDeviceShaderSmBuiltinsFeaturesNV : return "PhysicalDeviceShaderSmBuiltinsFeaturesNV"; + case StructureType::ePhysicalDeviceShaderSmBuiltinsPropertiesNV : return "PhysicalDeviceShaderSmBuiltinsPropertiesNV"; + case StructureType::eDrmFormatModifierPropertiesListEXT : return "DrmFormatModifierPropertiesListEXT"; + case StructureType::ePhysicalDeviceImageDrmFormatModifierInfoEXT : return "PhysicalDeviceImageDrmFormatModifierInfoEXT"; + case StructureType::eImageDrmFormatModifierListCreateInfoEXT : return "ImageDrmFormatModifierListCreateInfoEXT"; + case StructureType::eImageDrmFormatModifierExplicitCreateInfoEXT : return "ImageDrmFormatModifierExplicitCreateInfoEXT"; + case StructureType::eImageDrmFormatModifierPropertiesEXT : return "ImageDrmFormatModifierPropertiesEXT"; + case StructureType::eValidationCacheCreateInfoEXT : return "ValidationCacheCreateInfoEXT"; + case StructureType::eShaderModuleValidationCacheCreateInfoEXT : return "ShaderModuleValidationCacheCreateInfoEXT"; + case StructureType::ePhysicalDevicePortabilitySubsetFeaturesKHR : return "PhysicalDevicePortabilitySubsetFeaturesKHR"; + case StructureType::ePhysicalDevicePortabilitySubsetPropertiesKHR : return "PhysicalDevicePortabilitySubsetPropertiesKHR"; + case StructureType::ePipelineViewportShadingRateImageStateCreateInfoNV : return "PipelineViewportShadingRateImageStateCreateInfoNV"; + case StructureType::ePhysicalDeviceShadingRateImageFeaturesNV : return "PhysicalDeviceShadingRateImageFeaturesNV"; + case StructureType::ePhysicalDeviceShadingRateImagePropertiesNV : return "PhysicalDeviceShadingRateImagePropertiesNV"; + case StructureType::ePipelineViewportCoarseSampleOrderStateCreateInfoNV : return "PipelineViewportCoarseSampleOrderStateCreateInfoNV"; + case StructureType::eRayTracingPipelineCreateInfoNV : return "RayTracingPipelineCreateInfoNV"; + case StructureType::eAccelerationStructureCreateInfoNV : return "AccelerationStructureCreateInfoNV"; + case StructureType::eGeometryNV : return "GeometryNV"; + case StructureType::eGeometryTrianglesNV : return "GeometryTrianglesNV"; + case StructureType::eGeometryAabbNV : return "GeometryAabbNV"; + case StructureType::eBindAccelerationStructureMemoryInfoNV : return "BindAccelerationStructureMemoryInfoNV"; + case StructureType::eWriteDescriptorSetAccelerationStructureNV : return "WriteDescriptorSetAccelerationStructureNV"; + case StructureType::eAccelerationStructureMemoryRequirementsInfoNV : return "AccelerationStructureMemoryRequirementsInfoNV"; + case StructureType::ePhysicalDeviceRayTracingPropertiesNV : return "PhysicalDeviceRayTracingPropertiesNV"; + case StructureType::eRayTracingShaderGroupCreateInfoNV : return "RayTracingShaderGroupCreateInfoNV"; + case StructureType::eAccelerationStructureInfoNV : return "AccelerationStructureInfoNV"; + case StructureType::ePhysicalDeviceRepresentativeFragmentTestFeaturesNV : return "PhysicalDeviceRepresentativeFragmentTestFeaturesNV"; + case StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV : return "PipelineRepresentativeFragmentTestStateCreateInfoNV"; + case StructureType::ePhysicalDeviceImageViewImageFormatInfoEXT : return "PhysicalDeviceImageViewImageFormatInfoEXT"; + case StructureType::eFilterCubicImageViewImageFormatPropertiesEXT : return "FilterCubicImageViewImageFormatPropertiesEXT"; + case StructureType::eDeviceQueueGlobalPriorityCreateInfoEXT : return "DeviceQueueGlobalPriorityCreateInfoEXT"; + case StructureType::eImportMemoryHostPointerInfoEXT : return "ImportMemoryHostPointerInfoEXT"; + case StructureType::eMemoryHostPointerPropertiesEXT : return "MemoryHostPointerPropertiesEXT"; + case StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT : return "PhysicalDeviceExternalMemoryHostPropertiesEXT"; + case StructureType::ePhysicalDeviceShaderClockFeaturesKHR : return "PhysicalDeviceShaderClockFeaturesKHR"; + case StructureType::ePipelineCompilerControlCreateInfoAMD : return "PipelineCompilerControlCreateInfoAMD"; + case StructureType::eCalibratedTimestampInfoEXT : return "CalibratedTimestampInfoEXT"; + case StructureType::ePhysicalDeviceShaderCorePropertiesAMD : return "PhysicalDeviceShaderCorePropertiesAMD"; + case StructureType::eDeviceMemoryOverallocationCreateInfoAMD : return "DeviceMemoryOverallocationCreateInfoAMD"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT : return "PhysicalDeviceVertexAttributeDivisorPropertiesEXT"; + case StructureType::ePipelineVertexInputDivisorStateCreateInfoEXT : return "PipelineVertexInputDivisorStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesEXT : return "PhysicalDeviceVertexAttributeDivisorFeaturesEXT"; + case StructureType::ePresentFrameTokenGGP : return "PresentFrameTokenGGP"; + case StructureType::ePipelineCreationFeedbackCreateInfoEXT : return "PipelineCreationFeedbackCreateInfoEXT"; + case StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV : return "PhysicalDeviceComputeShaderDerivativesFeaturesNV"; + case StructureType::ePhysicalDeviceMeshShaderFeaturesNV : return "PhysicalDeviceMeshShaderFeaturesNV"; + case StructureType::ePhysicalDeviceMeshShaderPropertiesNV : return "PhysicalDeviceMeshShaderPropertiesNV"; + case StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesNV : return "PhysicalDeviceFragmentShaderBarycentricFeaturesNV"; + case StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV : return "PhysicalDeviceShaderImageFootprintFeaturesNV"; + case StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV : return "PipelineViewportExclusiveScissorStateCreateInfoNV"; + case StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV : return "PhysicalDeviceExclusiveScissorFeaturesNV"; + case StructureType::eCheckpointDataNV : return "CheckpointDataNV"; + case StructureType::eQueueFamilyCheckpointPropertiesNV : return "QueueFamilyCheckpointPropertiesNV"; + case StructureType::ePhysicalDeviceShaderIntegerFunctions2FeaturesINTEL : return "PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL"; + case StructureType::eQueryPoolPerformanceQueryCreateInfoINTEL : return "QueryPoolPerformanceQueryCreateInfoINTEL"; + case StructureType::eInitializePerformanceApiInfoINTEL : return "InitializePerformanceApiInfoINTEL"; + case StructureType::ePerformanceMarkerInfoINTEL : return "PerformanceMarkerInfoINTEL"; + case StructureType::ePerformanceStreamMarkerInfoINTEL : return "PerformanceStreamMarkerInfoINTEL"; + case StructureType::ePerformanceOverrideInfoINTEL : return "PerformanceOverrideInfoINTEL"; + case StructureType::ePerformanceConfigurationAcquireInfoINTEL : return "PerformanceConfigurationAcquireInfoINTEL"; + case StructureType::ePhysicalDevicePciBusInfoPropertiesEXT : return "PhysicalDevicePciBusInfoPropertiesEXT"; + case StructureType::eDisplayNativeHdrSurfaceCapabilitiesAMD : return "DisplayNativeHdrSurfaceCapabilitiesAMD"; + case StructureType::eSwapchainDisplayNativeHdrCreateInfoAMD : return "SwapchainDisplayNativeHdrCreateInfoAMD"; + case StructureType::eImagepipeSurfaceCreateInfoFUCHSIA : return "ImagepipeSurfaceCreateInfoFUCHSIA"; + case StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR : return "PhysicalDeviceShaderTerminateInvocationFeaturesKHR"; + case StructureType::eMetalSurfaceCreateInfoEXT : return "MetalSurfaceCreateInfoEXT"; + case StructureType::ePhysicalDeviceFragmentDensityMapFeaturesEXT : return "PhysicalDeviceFragmentDensityMapFeaturesEXT"; + case StructureType::ePhysicalDeviceFragmentDensityMapPropertiesEXT : return "PhysicalDeviceFragmentDensityMapPropertiesEXT"; + case StructureType::eRenderPassFragmentDensityMapCreateInfoEXT : return "RenderPassFragmentDensityMapCreateInfoEXT"; + case StructureType::ePhysicalDeviceSubgroupSizeControlPropertiesEXT : return "PhysicalDeviceSubgroupSizeControlPropertiesEXT"; + case StructureType::ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT : return "PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT"; + case StructureType::ePhysicalDeviceSubgroupSizeControlFeaturesEXT : return "PhysicalDeviceSubgroupSizeControlFeaturesEXT"; + case StructureType::eFragmentShadingRateAttachmentInfoKHR : return "FragmentShadingRateAttachmentInfoKHR"; + case StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR : return "PipelineFragmentShadingRateStateCreateInfoKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR : return "PhysicalDeviceFragmentShadingRatePropertiesKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR : return "PhysicalDeviceFragmentShadingRateFeaturesKHR"; + case StructureType::ePhysicalDeviceFragmentShadingRateKHR : return "PhysicalDeviceFragmentShadingRateKHR"; + case StructureType::ePhysicalDeviceShaderCoreProperties2AMD : return "PhysicalDeviceShaderCoreProperties2AMD"; + case StructureType::ePhysicalDeviceCoherentMemoryFeaturesAMD : return "PhysicalDeviceCoherentMemoryFeaturesAMD"; + case StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT : return "PhysicalDeviceShaderImageAtomicInt64FeaturesEXT"; + case StructureType::ePhysicalDeviceMemoryBudgetPropertiesEXT : return "PhysicalDeviceMemoryBudgetPropertiesEXT"; + case StructureType::ePhysicalDeviceMemoryPriorityFeaturesEXT : return "PhysicalDeviceMemoryPriorityFeaturesEXT"; + case StructureType::eMemoryPriorityAllocateInfoEXT : return "MemoryPriorityAllocateInfoEXT"; + case StructureType::eSurfaceProtectedCapabilitiesKHR : return "SurfaceProtectedCapabilitiesKHR"; + case StructureType::ePhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV : return "PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV"; + case StructureType::ePhysicalDeviceBufferDeviceAddressFeaturesEXT : return "PhysicalDeviceBufferDeviceAddressFeaturesEXT"; + case StructureType::eBufferDeviceAddressCreateInfoEXT : return "BufferDeviceAddressCreateInfoEXT"; + case StructureType::ePhysicalDeviceToolPropertiesEXT : return "PhysicalDeviceToolPropertiesEXT"; + case StructureType::eValidationFeaturesEXT : return "ValidationFeaturesEXT"; + case StructureType::ePhysicalDeviceCooperativeMatrixFeaturesNV : return "PhysicalDeviceCooperativeMatrixFeaturesNV"; + case StructureType::eCooperativeMatrixPropertiesNV : return "CooperativeMatrixPropertiesNV"; + case StructureType::ePhysicalDeviceCooperativeMatrixPropertiesNV : return "PhysicalDeviceCooperativeMatrixPropertiesNV"; + case StructureType::ePhysicalDeviceCoverageReductionModeFeaturesNV : return "PhysicalDeviceCoverageReductionModeFeaturesNV"; + case StructureType::ePipelineCoverageReductionStateCreateInfoNV : return "PipelineCoverageReductionStateCreateInfoNV"; + case StructureType::eFramebufferMixedSamplesCombinationNV : return "FramebufferMixedSamplesCombinationNV"; + case StructureType::ePhysicalDeviceFragmentShaderInterlockFeaturesEXT : return "PhysicalDeviceFragmentShaderInterlockFeaturesEXT"; + case StructureType::ePhysicalDeviceYcbcrImageArraysFeaturesEXT : return "PhysicalDeviceYcbcrImageArraysFeaturesEXT"; + case StructureType::eSurfaceFullScreenExclusiveInfoEXT : return "SurfaceFullScreenExclusiveInfoEXT"; + case StructureType::eSurfaceCapabilitiesFullScreenExclusiveEXT : return "SurfaceCapabilitiesFullScreenExclusiveEXT"; + case StructureType::eSurfaceFullScreenExclusiveWin32InfoEXT : return "SurfaceFullScreenExclusiveWin32InfoEXT"; + case StructureType::eHeadlessSurfaceCreateInfoEXT : return "HeadlessSurfaceCreateInfoEXT"; + case StructureType::ePhysicalDeviceLineRasterizationFeaturesEXT : return "PhysicalDeviceLineRasterizationFeaturesEXT"; + case StructureType::ePipelineRasterizationLineStateCreateInfoEXT : return "PipelineRasterizationLineStateCreateInfoEXT"; + case StructureType::ePhysicalDeviceLineRasterizationPropertiesEXT : return "PhysicalDeviceLineRasterizationPropertiesEXT"; + case StructureType::ePhysicalDeviceShaderAtomicFloatFeaturesEXT : return "PhysicalDeviceShaderAtomicFloatFeaturesEXT"; + case StructureType::ePhysicalDeviceIndexTypeUint8FeaturesEXT : return "PhysicalDeviceIndexTypeUint8FeaturesEXT"; + case StructureType::ePhysicalDeviceExtendedDynamicStateFeaturesEXT : return "PhysicalDeviceExtendedDynamicStateFeaturesEXT"; + case StructureType::ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR : return "PhysicalDevicePipelineExecutablePropertiesFeaturesKHR"; + case StructureType::ePipelineInfoKHR : return "PipelineInfoKHR"; + case StructureType::ePipelineExecutablePropertiesKHR : return "PipelineExecutablePropertiesKHR"; + case StructureType::ePipelineExecutableInfoKHR : return "PipelineExecutableInfoKHR"; + case StructureType::ePipelineExecutableStatisticKHR : return "PipelineExecutableStatisticKHR"; + case StructureType::ePipelineExecutableInternalRepresentationKHR : return "PipelineExecutableInternalRepresentationKHR"; + case StructureType::ePhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT : return "PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT"; + case StructureType::ePhysicalDeviceDeviceGeneratedCommandsPropertiesNV : return "PhysicalDeviceDeviceGeneratedCommandsPropertiesNV"; + case StructureType::eGraphicsShaderGroupCreateInfoNV : return "GraphicsShaderGroupCreateInfoNV"; + case StructureType::eGraphicsPipelineShaderGroupsCreateInfoNV : return "GraphicsPipelineShaderGroupsCreateInfoNV"; + case StructureType::eIndirectCommandsLayoutTokenNV : return "IndirectCommandsLayoutTokenNV"; + case StructureType::eIndirectCommandsLayoutCreateInfoNV : return "IndirectCommandsLayoutCreateInfoNV"; + case StructureType::eGeneratedCommandsInfoNV : return "GeneratedCommandsInfoNV"; + case StructureType::eGeneratedCommandsMemoryRequirementsInfoNV : return "GeneratedCommandsMemoryRequirementsInfoNV"; + case StructureType::ePhysicalDeviceDeviceGeneratedCommandsFeaturesNV : return "PhysicalDeviceDeviceGeneratedCommandsFeaturesNV"; + case StructureType::ePhysicalDeviceTexelBufferAlignmentFeaturesEXT : return "PhysicalDeviceTexelBufferAlignmentFeaturesEXT"; + case StructureType::ePhysicalDeviceTexelBufferAlignmentPropertiesEXT : return "PhysicalDeviceTexelBufferAlignmentPropertiesEXT"; + case StructureType::eCommandBufferInheritanceRenderPassTransformInfoQCOM : return "CommandBufferInheritanceRenderPassTransformInfoQCOM"; + case StructureType::eRenderPassTransformBeginInfoQCOM : return "RenderPassTransformBeginInfoQCOM"; + case StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT : return "PhysicalDeviceDeviceMemoryReportFeaturesEXT"; + case StructureType::eDeviceDeviceMemoryReportCreateInfoEXT : return "DeviceDeviceMemoryReportCreateInfoEXT"; + case StructureType::eDeviceMemoryReportCallbackDataEXT : return "DeviceMemoryReportCallbackDataEXT"; + case StructureType::ePhysicalDeviceRobustness2FeaturesEXT : return "PhysicalDeviceRobustness2FeaturesEXT"; + case StructureType::ePhysicalDeviceRobustness2PropertiesEXT : return "PhysicalDeviceRobustness2PropertiesEXT"; + case StructureType::eSamplerCustomBorderColorCreateInfoEXT : return "SamplerCustomBorderColorCreateInfoEXT"; + case StructureType::ePhysicalDeviceCustomBorderColorPropertiesEXT : return "PhysicalDeviceCustomBorderColorPropertiesEXT"; + case StructureType::ePhysicalDeviceCustomBorderColorFeaturesEXT : return "PhysicalDeviceCustomBorderColorFeaturesEXT"; + case StructureType::ePipelineLibraryCreateInfoKHR : return "PipelineLibraryCreateInfoKHR"; + case StructureType::ePhysicalDevicePrivateDataFeaturesEXT : return "PhysicalDevicePrivateDataFeaturesEXT"; + case StructureType::eDevicePrivateDataCreateInfoEXT : return "DevicePrivateDataCreateInfoEXT"; + case StructureType::ePrivateDataSlotCreateInfoEXT : return "PrivateDataSlotCreateInfoEXT"; + case StructureType::ePhysicalDevicePipelineCreationCacheControlFeaturesEXT : return "PhysicalDevicePipelineCreationCacheControlFeaturesEXT"; + case StructureType::ePhysicalDeviceDiagnosticsConfigFeaturesNV : return "PhysicalDeviceDiagnosticsConfigFeaturesNV"; + case StructureType::eDeviceDiagnosticsConfigCreateInfoNV : return "DeviceDiagnosticsConfigCreateInfoNV"; + case StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV : return "PhysicalDeviceFragmentShadingRateEnumsPropertiesNV"; + case StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV : return "PhysicalDeviceFragmentShadingRateEnumsFeaturesNV"; + case StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV : return "PipelineFragmentShadingRateEnumStateCreateInfoNV"; + case StructureType::ePhysicalDeviceFragmentDensityMap2FeaturesEXT : return "PhysicalDeviceFragmentDensityMap2FeaturesEXT"; + case StructureType::ePhysicalDeviceFragmentDensityMap2PropertiesEXT : return "PhysicalDeviceFragmentDensityMap2PropertiesEXT"; + case StructureType::eCopyCommandTransformInfoQCOM : return "CopyCommandTransformInfoQCOM"; + case StructureType::ePhysicalDeviceImageRobustnessFeaturesEXT : return "PhysicalDeviceImageRobustnessFeaturesEXT"; + case StructureType::eCopyBufferInfo2KHR : return "CopyBufferInfo2KHR"; + case StructureType::eCopyImageInfo2KHR : return "CopyImageInfo2KHR"; + case StructureType::eCopyBufferToImageInfo2KHR : return "CopyBufferToImageInfo2KHR"; + case StructureType::eCopyImageToBufferInfo2KHR : return "CopyImageToBufferInfo2KHR"; + case StructureType::eBlitImageInfo2KHR : return "BlitImageInfo2KHR"; + case StructureType::eResolveImageInfo2KHR : return "ResolveImageInfo2KHR"; + case StructureType::eBufferCopy2KHR : return "BufferCopy2KHR"; + case StructureType::eImageCopy2KHR : return "ImageCopy2KHR"; + case StructureType::eImageBlit2KHR : return "ImageBlit2KHR"; + case StructureType::eBufferImageCopy2KHR : return "BufferImageCopy2KHR"; + case StructureType::eImageResolve2KHR : return "ImageResolve2KHR"; + case StructureType::ePhysicalDevice4444FormatsFeaturesEXT : return "PhysicalDevice4444FormatsFeaturesEXT"; + case StructureType::eDirectfbSurfaceCreateInfoEXT : return "DirectfbSurfaceCreateInfoEXT"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SubgroupFeatureFlagBits : VkSubgroupFeatureFlags + { + eBasic = VK_SUBGROUP_FEATURE_BASIC_BIT, + eVote = VK_SUBGROUP_FEATURE_VOTE_BIT, + eArithmetic = VK_SUBGROUP_FEATURE_ARITHMETIC_BIT, + eBallot = VK_SUBGROUP_FEATURE_BALLOT_BIT, + eShuffle = VK_SUBGROUP_FEATURE_SHUFFLE_BIT, + eShuffleRelative = VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT, + eClustered = VK_SUBGROUP_FEATURE_CLUSTERED_BIT, + eQuad = VK_SUBGROUP_FEATURE_QUAD_BIT, + ePartitionedNV = VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV + }; + + VULKAN_HPP_INLINE std::string to_string( SubgroupFeatureFlagBits value ) + { + switch ( value ) + { + case SubgroupFeatureFlagBits::eBasic : return "Basic"; + case SubgroupFeatureFlagBits::eVote : return "Vote"; + case SubgroupFeatureFlagBits::eArithmetic : return "Arithmetic"; + case SubgroupFeatureFlagBits::eBallot : return "Ballot"; + case SubgroupFeatureFlagBits::eShuffle : return "Shuffle"; + case SubgroupFeatureFlagBits::eShuffleRelative : return "ShuffleRelative"; + case SubgroupFeatureFlagBits::eClustered : return "Clustered"; + case SubgroupFeatureFlagBits::eQuad : return "Quad"; + case SubgroupFeatureFlagBits::ePartitionedNV : return "PartitionedNV"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SubpassContents + { + eInline = VK_SUBPASS_CONTENTS_INLINE, + eSecondaryCommandBuffers = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS + }; + + VULKAN_HPP_INLINE std::string to_string( SubpassContents value ) + { + switch ( value ) + { + case SubpassContents::eInline : return "Inline"; + case SubpassContents::eSecondaryCommandBuffers : return "SecondaryCommandBuffers"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SubpassDescriptionFlagBits : VkSubpassDescriptionFlags + { + ePerViewAttributesNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX, + ePerViewPositionXOnlyNVX = VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX, + eFragmentRegionQCOM = VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM, + eShaderResolveQCOM = VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM + }; + + VULKAN_HPP_INLINE std::string to_string( SubpassDescriptionFlagBits value ) + { + switch ( value ) + { + case SubpassDescriptionFlagBits::ePerViewAttributesNVX : return "PerViewAttributesNVX"; + case SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX : return "PerViewPositionXOnlyNVX"; + case SubpassDescriptionFlagBits::eFragmentRegionQCOM : return "FragmentRegionQCOM"; + case SubpassDescriptionFlagBits::eShaderResolveQCOM : return "ShaderResolveQCOM"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SurfaceCounterFlagBitsEXT : VkSurfaceCounterFlagsEXT + { + eVblank = VK_SURFACE_COUNTER_VBLANK_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( SurfaceCounterFlagBitsEXT value ) + { + switch ( value ) + { + case SurfaceCounterFlagBitsEXT::eVblank : return "Vblank"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SurfaceTransformFlagBitsKHR : VkSurfaceTransformFlagsKHR + { + eIdentity = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR, + eRotate90 = VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR, + eRotate180 = VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR, + eRotate270 = VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR, + eHorizontalMirror = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR, + eHorizontalMirrorRotate90 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR, + eHorizontalMirrorRotate180 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR, + eHorizontalMirrorRotate270 = VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR, + eInherit = VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( SurfaceTransformFlagBitsKHR value ) + { + switch ( value ) + { + case SurfaceTransformFlagBitsKHR::eIdentity : return "Identity"; + case SurfaceTransformFlagBitsKHR::eRotate90 : return "Rotate90"; + case SurfaceTransformFlagBitsKHR::eRotate180 : return "Rotate180"; + case SurfaceTransformFlagBitsKHR::eRotate270 : return "Rotate270"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirror : return "HorizontalMirror"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90 : return "HorizontalMirrorRotate90"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180 : return "HorizontalMirrorRotate180"; + case SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270 : return "HorizontalMirrorRotate270"; + case SurfaceTransformFlagBitsKHR::eInherit : return "Inherit"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SwapchainCreateFlagBitsKHR : VkSwapchainCreateFlagsKHR + { + eSplitInstanceBindRegions = VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR, + eProtected = VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR, + eMutableFormat = VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR + }; + + VULKAN_HPP_INLINE std::string to_string( SwapchainCreateFlagBitsKHR value ) + { + switch ( value ) + { + case SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions : return "SplitInstanceBindRegions"; + case SwapchainCreateFlagBitsKHR::eProtected : return "Protected"; + case SwapchainCreateFlagBitsKHR::eMutableFormat : return "MutableFormat"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class SystemAllocationScope + { + eCommand = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + eObject = VK_SYSTEM_ALLOCATION_SCOPE_OBJECT, + eCache = VK_SYSTEM_ALLOCATION_SCOPE_CACHE, + eDevice = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE, + eInstance = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE + }; + + VULKAN_HPP_INLINE std::string to_string( SystemAllocationScope value ) + { + switch ( value ) + { + case SystemAllocationScope::eCommand : return "Command"; + case SystemAllocationScope::eObject : return "Object"; + case SystemAllocationScope::eCache : return "Cache"; + case SystemAllocationScope::eDevice : return "Device"; + case SystemAllocationScope::eInstance : return "Instance"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class TessellationDomainOrigin + { + eUpperLeft = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + eLowerLeft = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT + }; + using TessellationDomainOriginKHR = TessellationDomainOrigin; + + VULKAN_HPP_INLINE std::string to_string( TessellationDomainOrigin value ) + { + switch ( value ) + { + case TessellationDomainOrigin::eUpperLeft : return "UpperLeft"; + case TessellationDomainOrigin::eLowerLeft : return "LowerLeft"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class TimeDomainEXT + { + eDevice = VK_TIME_DOMAIN_DEVICE_EXT, + eClockMonotonic = VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT, + eClockMonotonicRaw = VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT, + eQueryPerformanceCounter = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( TimeDomainEXT value ) + { + switch ( value ) + { + case TimeDomainEXT::eDevice : return "Device"; + case TimeDomainEXT::eClockMonotonic : return "ClockMonotonic"; + case TimeDomainEXT::eClockMonotonicRaw : return "ClockMonotonicRaw"; + case TimeDomainEXT::eQueryPerformanceCounter : return "QueryPerformanceCounter"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ToolPurposeFlagBitsEXT : VkToolPurposeFlagsEXT + { + eValidation = VK_TOOL_PURPOSE_VALIDATION_BIT_EXT, + eProfiling = VK_TOOL_PURPOSE_PROFILING_BIT_EXT, + eTracing = VK_TOOL_PURPOSE_TRACING_BIT_EXT, + eAdditionalFeatures = VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT, + eModifyingFeatures = VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT, + eDebugReporting = VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT, + eDebugMarkers = VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ToolPurposeFlagBitsEXT value ) + { + switch ( value ) + { + case ToolPurposeFlagBitsEXT::eValidation : return "Validation"; + case ToolPurposeFlagBitsEXT::eProfiling : return "Profiling"; + case ToolPurposeFlagBitsEXT::eTracing : return "Tracing"; + case ToolPurposeFlagBitsEXT::eAdditionalFeatures : return "AdditionalFeatures"; + case ToolPurposeFlagBitsEXT::eModifyingFeatures : return "ModifyingFeatures"; + case ToolPurposeFlagBitsEXT::eDebugReporting : return "DebugReporting"; + case ToolPurposeFlagBitsEXT::eDebugMarkers : return "DebugMarkers"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ValidationCacheHeaderVersionEXT + { + eOne = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ValidationCacheHeaderVersionEXT value ) + { + switch ( value ) + { + case ValidationCacheHeaderVersionEXT::eOne : return "One"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ValidationCheckEXT + { + eAll = VK_VALIDATION_CHECK_ALL_EXT, + eShaders = VK_VALIDATION_CHECK_SHADERS_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ValidationCheckEXT value ) + { + switch ( value ) + { + case ValidationCheckEXT::eAll : return "All"; + case ValidationCheckEXT::eShaders : return "Shaders"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ValidationFeatureDisableEXT + { + eAll = VK_VALIDATION_FEATURE_DISABLE_ALL_EXT, + eShaders = VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT, + eThreadSafety = VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT, + eApiParameters = VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT, + eObjectLifetimes = VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT, + eCoreChecks = VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT, + eUniqueHandles = VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ValidationFeatureDisableEXT value ) + { + switch ( value ) + { + case ValidationFeatureDisableEXT::eAll : return "All"; + case ValidationFeatureDisableEXT::eShaders : return "Shaders"; + case ValidationFeatureDisableEXT::eThreadSafety : return "ThreadSafety"; + case ValidationFeatureDisableEXT::eApiParameters : return "ApiParameters"; + case ValidationFeatureDisableEXT::eObjectLifetimes : return "ObjectLifetimes"; + case ValidationFeatureDisableEXT::eCoreChecks : return "CoreChecks"; + case ValidationFeatureDisableEXT::eUniqueHandles : return "UniqueHandles"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ValidationFeatureEnableEXT + { + eGpuAssisted = VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT, + eGpuAssistedReserveBindingSlot = VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT, + eBestPractices = VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT, + eDebugPrintf = VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT, + eSynchronizationValidation = VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT + }; + + VULKAN_HPP_INLINE std::string to_string( ValidationFeatureEnableEXT value ) + { + switch ( value ) + { + case ValidationFeatureEnableEXT::eGpuAssisted : return "GpuAssisted"; + case ValidationFeatureEnableEXT::eGpuAssistedReserveBindingSlot : return "GpuAssistedReserveBindingSlot"; + case ValidationFeatureEnableEXT::eBestPractices : return "BestPractices"; + case ValidationFeatureEnableEXT::eDebugPrintf : return "DebugPrintf"; + case ValidationFeatureEnableEXT::eSynchronizationValidation : return "SynchronizationValidation"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class VendorId + { + eVIV = VK_VENDOR_ID_VIV, + eVSI = VK_VENDOR_ID_VSI, + eKazan = VK_VENDOR_ID_KAZAN, + eCodeplay = VK_VENDOR_ID_CODEPLAY, + eMESA = VK_VENDOR_ID_MESA + }; + + VULKAN_HPP_INLINE std::string to_string( VendorId value ) + { + switch ( value ) + { + case VendorId::eVIV : return "VIV"; + case VendorId::eVSI : return "VSI"; + case VendorId::eKazan : return "Kazan"; + case VendorId::eCodeplay : return "Codeplay"; + case VendorId::eMESA : return "MESA"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class VertexInputRate + { + eVertex = VK_VERTEX_INPUT_RATE_VERTEX, + eInstance = VK_VERTEX_INPUT_RATE_INSTANCE + }; + + VULKAN_HPP_INLINE std::string to_string( VertexInputRate value ) + { + switch ( value ) + { + case VertexInputRate::eVertex : return "Vertex"; + case VertexInputRate::eInstance : return "Instance"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + enum class ViewportCoordinateSwizzleNV + { + ePositiveX = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + eNegativeX = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV, + ePositiveY = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, + eNegativeY = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV, + ePositiveZ = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, + eNegativeZ = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV, + ePositiveW = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV, + eNegativeW = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV + }; + + VULKAN_HPP_INLINE std::string to_string( ViewportCoordinateSwizzleNV value ) + { + switch ( value ) + { + case ViewportCoordinateSwizzleNV::ePositiveX : return "PositiveX"; + case ViewportCoordinateSwizzleNV::eNegativeX : return "NegativeX"; + case ViewportCoordinateSwizzleNV::ePositiveY : return "PositiveY"; + case ViewportCoordinateSwizzleNV::eNegativeY : return "NegativeY"; + case ViewportCoordinateSwizzleNV::ePositiveZ : return "PositiveZ"; + case ViewportCoordinateSwizzleNV::eNegativeZ : return "NegativeZ"; + case ViewportCoordinateSwizzleNV::ePositiveW : return "PositiveW"; + case ViewportCoordinateSwizzleNV::eNegativeW : return "NegativeW"; + default: return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString( static_cast( value ) ) + " )"; + } + } + + template + struct IndexTypeValue + {}; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint16; + }; + + template <> + struct CppType + { + using Type = uint16_t; + }; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint32; + }; + + template <> + struct CppType + { + using Type = uint32_t; + }; + + template <> + struct IndexTypeValue + { + static VULKAN_HPP_CONST_OR_CONSTEXPR IndexType value = IndexType::eUint8EXT; + }; + + template <> + struct CppType + { + using Type = uint8_t; + }; + + + using AccelerationStructureCreateFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator|( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator&( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator^( AccelerationStructureCreateFlagBitsKHR bit0, AccelerationStructureCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccelerationStructureCreateFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccelerationStructureCreateFlagsKHR operator~( AccelerationStructureCreateFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( AccelerationStructureCreateFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( AccelerationStructureCreateFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & AccelerationStructureCreateFlagBitsKHR::eDeviceAddressCaptureReplay ) result += "DeviceAddressCaptureReplay | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using AccessFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(AccessFlagBits::eIndirectCommandRead) | VkFlags(AccessFlagBits::eIndexRead) | VkFlags(AccessFlagBits::eVertexAttributeRead) | VkFlags(AccessFlagBits::eUniformRead) | VkFlags(AccessFlagBits::eInputAttachmentRead) | VkFlags(AccessFlagBits::eShaderRead) | VkFlags(AccessFlagBits::eShaderWrite) | VkFlags(AccessFlagBits::eColorAttachmentRead) | VkFlags(AccessFlagBits::eColorAttachmentWrite) | VkFlags(AccessFlagBits::eDepthStencilAttachmentRead) | VkFlags(AccessFlagBits::eDepthStencilAttachmentWrite) | VkFlags(AccessFlagBits::eTransferRead) | VkFlags(AccessFlagBits::eTransferWrite) | VkFlags(AccessFlagBits::eHostRead) | VkFlags(AccessFlagBits::eHostWrite) | VkFlags(AccessFlagBits::eMemoryRead) | VkFlags(AccessFlagBits::eMemoryWrite) | VkFlags(AccessFlagBits::eTransformFeedbackWriteEXT) | VkFlags(AccessFlagBits::eTransformFeedbackCounterReadEXT) | VkFlags(AccessFlagBits::eTransformFeedbackCounterWriteEXT) | VkFlags(AccessFlagBits::eConditionalRenderingReadEXT) | VkFlags(AccessFlagBits::eColorAttachmentReadNoncoherentEXT) | VkFlags(AccessFlagBits::eAccelerationStructureReadKHR) | VkFlags(AccessFlagBits::eAccelerationStructureWriteKHR) | VkFlags(AccessFlagBits::eShadingRateImageReadNV) | VkFlags(AccessFlagBits::eFragmentDensityMapReadEXT) | VkFlags(AccessFlagBits::eCommandPreprocessReadNV) | VkFlags(AccessFlagBits::eCommandPreprocessWriteNV) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccessFlags operator|( AccessFlagBits bit0, AccessFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccessFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccessFlags operator&( AccessFlagBits bit0, AccessFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccessFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccessFlags operator^( AccessFlagBits bit0, AccessFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AccessFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AccessFlags operator~( AccessFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( AccessFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( AccessFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & AccessFlagBits::eIndirectCommandRead ) result += "IndirectCommandRead | "; + if ( value & AccessFlagBits::eIndexRead ) result += "IndexRead | "; + if ( value & AccessFlagBits::eVertexAttributeRead ) result += "VertexAttributeRead | "; + if ( value & AccessFlagBits::eUniformRead ) result += "UniformRead | "; + if ( value & AccessFlagBits::eInputAttachmentRead ) result += "InputAttachmentRead | "; + if ( value & AccessFlagBits::eShaderRead ) result += "ShaderRead | "; + if ( value & AccessFlagBits::eShaderWrite ) result += "ShaderWrite | "; + if ( value & AccessFlagBits::eColorAttachmentRead ) result += "ColorAttachmentRead | "; + if ( value & AccessFlagBits::eColorAttachmentWrite ) result += "ColorAttachmentWrite | "; + if ( value & AccessFlagBits::eDepthStencilAttachmentRead ) result += "DepthStencilAttachmentRead | "; + if ( value & AccessFlagBits::eDepthStencilAttachmentWrite ) result += "DepthStencilAttachmentWrite | "; + if ( value & AccessFlagBits::eTransferRead ) result += "TransferRead | "; + if ( value & AccessFlagBits::eTransferWrite ) result += "TransferWrite | "; + if ( value & AccessFlagBits::eHostRead ) result += "HostRead | "; + if ( value & AccessFlagBits::eHostWrite ) result += "HostWrite | "; + if ( value & AccessFlagBits::eMemoryRead ) result += "MemoryRead | "; + if ( value & AccessFlagBits::eMemoryWrite ) result += "MemoryWrite | "; + if ( value & AccessFlagBits::eTransformFeedbackWriteEXT ) result += "TransformFeedbackWriteEXT | "; + if ( value & AccessFlagBits::eTransformFeedbackCounterReadEXT ) result += "TransformFeedbackCounterReadEXT | "; + if ( value & AccessFlagBits::eTransformFeedbackCounterWriteEXT ) result += "TransformFeedbackCounterWriteEXT | "; + if ( value & AccessFlagBits::eConditionalRenderingReadEXT ) result += "ConditionalRenderingReadEXT | "; + if ( value & AccessFlagBits::eColorAttachmentReadNoncoherentEXT ) result += "ColorAttachmentReadNoncoherentEXT | "; + if ( value & AccessFlagBits::eAccelerationStructureReadKHR ) result += "AccelerationStructureReadKHR | "; + if ( value & AccessFlagBits::eAccelerationStructureWriteKHR ) result += "AccelerationStructureWriteKHR | "; + if ( value & AccessFlagBits::eShadingRateImageReadNV ) result += "ShadingRateImageReadNV | "; + if ( value & AccessFlagBits::eFragmentDensityMapReadEXT ) result += "FragmentDensityMapReadEXT | "; + if ( value & AccessFlagBits::eCommandPreprocessReadNV ) result += "CommandPreprocessReadNV | "; + if ( value & AccessFlagBits::eCommandPreprocessWriteNV ) result += "CommandPreprocessWriteNV | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using AcquireProfilingLockFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( AcquireProfilingLockFlagsKHR ) + { + + return "{}"; + } + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + enum class AndroidSurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( AndroidSurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using AndroidSurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( AndroidSurfaceCreateFlagsKHR ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + using AttachmentDescriptionFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(AttachmentDescriptionFlagBits::eMayAlias) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AttachmentDescriptionFlags operator|( AttachmentDescriptionFlagBits bit0, AttachmentDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AttachmentDescriptionFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AttachmentDescriptionFlags operator&( AttachmentDescriptionFlagBits bit0, AttachmentDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AttachmentDescriptionFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AttachmentDescriptionFlags operator^( AttachmentDescriptionFlagBits bit0, AttachmentDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return AttachmentDescriptionFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AttachmentDescriptionFlags operator~( AttachmentDescriptionFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( AttachmentDescriptionFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( AttachmentDescriptionFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & AttachmentDescriptionFlagBits::eMayAlias ) result += "MayAlias | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using BufferCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(BufferCreateFlagBits::eSparseBinding) | VkFlags(BufferCreateFlagBits::eSparseResidency) | VkFlags(BufferCreateFlagBits::eSparseAliased) | VkFlags(BufferCreateFlagBits::eProtected) | VkFlags(BufferCreateFlagBits::eDeviceAddressCaptureReplay) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferCreateFlags operator|( BufferCreateFlagBits bit0, BufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferCreateFlags operator&( BufferCreateFlagBits bit0, BufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferCreateFlags operator^( BufferCreateFlagBits bit0, BufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferCreateFlags operator~( BufferCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( BufferCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( BufferCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & BufferCreateFlagBits::eSparseBinding ) result += "SparseBinding | "; + if ( value & BufferCreateFlagBits::eSparseResidency ) result += "SparseResidency | "; + if ( value & BufferCreateFlagBits::eSparseAliased ) result += "SparseAliased | "; + if ( value & BufferCreateFlagBits::eProtected ) result += "Protected | "; + if ( value & BufferCreateFlagBits::eDeviceAddressCaptureReplay ) result += "DeviceAddressCaptureReplay | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using BufferUsageFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(BufferUsageFlagBits::eTransferSrc) | VkFlags(BufferUsageFlagBits::eTransferDst) | VkFlags(BufferUsageFlagBits::eUniformTexelBuffer) | VkFlags(BufferUsageFlagBits::eStorageTexelBuffer) | VkFlags(BufferUsageFlagBits::eUniformBuffer) | VkFlags(BufferUsageFlagBits::eStorageBuffer) | VkFlags(BufferUsageFlagBits::eIndexBuffer) | VkFlags(BufferUsageFlagBits::eVertexBuffer) | VkFlags(BufferUsageFlagBits::eIndirectBuffer) | VkFlags(BufferUsageFlagBits::eShaderDeviceAddress) | VkFlags(BufferUsageFlagBits::eTransformFeedbackBufferEXT) | VkFlags(BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT) | VkFlags(BufferUsageFlagBits::eConditionalRenderingEXT) | VkFlags(BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR) | VkFlags(BufferUsageFlagBits::eAccelerationStructureStorageKHR) | VkFlags(BufferUsageFlagBits::eShaderBindingTableKHR) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferUsageFlags operator|( BufferUsageFlagBits bit0, BufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferUsageFlags operator&( BufferUsageFlagBits bit0, BufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferUsageFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferUsageFlags operator^( BufferUsageFlagBits bit0, BufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return BufferUsageFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BufferUsageFlags operator~( BufferUsageFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( BufferUsageFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( BufferUsageFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & BufferUsageFlagBits::eTransferSrc ) result += "TransferSrc | "; + if ( value & BufferUsageFlagBits::eTransferDst ) result += "TransferDst | "; + if ( value & BufferUsageFlagBits::eUniformTexelBuffer ) result += "UniformTexelBuffer | "; + if ( value & BufferUsageFlagBits::eStorageTexelBuffer ) result += "StorageTexelBuffer | "; + if ( value & BufferUsageFlagBits::eUniformBuffer ) result += "UniformBuffer | "; + if ( value & BufferUsageFlagBits::eStorageBuffer ) result += "StorageBuffer | "; + if ( value & BufferUsageFlagBits::eIndexBuffer ) result += "IndexBuffer | "; + if ( value & BufferUsageFlagBits::eVertexBuffer ) result += "VertexBuffer | "; + if ( value & BufferUsageFlagBits::eIndirectBuffer ) result += "IndirectBuffer | "; + if ( value & BufferUsageFlagBits::eShaderDeviceAddress ) result += "ShaderDeviceAddress | "; + if ( value & BufferUsageFlagBits::eTransformFeedbackBufferEXT ) result += "TransformFeedbackBufferEXT | "; + if ( value & BufferUsageFlagBits::eTransformFeedbackCounterBufferEXT ) result += "TransformFeedbackCounterBufferEXT | "; + if ( value & BufferUsageFlagBits::eConditionalRenderingEXT ) result += "ConditionalRenderingEXT | "; + if ( value & BufferUsageFlagBits::eAccelerationStructureBuildInputReadOnlyKHR ) result += "AccelerationStructureBuildInputReadOnlyKHR | "; + if ( value & BufferUsageFlagBits::eAccelerationStructureStorageKHR ) result += "AccelerationStructureStorageKHR | "; + if ( value & BufferUsageFlagBits::eShaderBindingTableKHR ) result += "ShaderBindingTableKHR | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class BufferViewCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( BufferViewCreateFlagBits ) + { + return "(void)"; + } + + using BufferViewCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( BufferViewCreateFlags ) + { + + return "{}"; + } + + + using BuildAccelerationStructureFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(BuildAccelerationStructureFlagBitsKHR::eAllowUpdate) | VkFlags(BuildAccelerationStructureFlagBitsKHR::eAllowCompaction) | VkFlags(BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace) | VkFlags(BuildAccelerationStructureFlagBitsKHR::ePreferFastBuild) | VkFlags(BuildAccelerationStructureFlagBitsKHR::eLowMemory) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BuildAccelerationStructureFlagsKHR operator|( BuildAccelerationStructureFlagBitsKHR bit0, BuildAccelerationStructureFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return BuildAccelerationStructureFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BuildAccelerationStructureFlagsKHR operator&( BuildAccelerationStructureFlagBitsKHR bit0, BuildAccelerationStructureFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return BuildAccelerationStructureFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BuildAccelerationStructureFlagsKHR operator^( BuildAccelerationStructureFlagBitsKHR bit0, BuildAccelerationStructureFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return BuildAccelerationStructureFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR BuildAccelerationStructureFlagsKHR operator~( BuildAccelerationStructureFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( BuildAccelerationStructureFlagsKHR( bits ) ); + } + + using BuildAccelerationStructureFlagsNV = BuildAccelerationStructureFlagsKHR; + + VULKAN_HPP_INLINE std::string to_string( BuildAccelerationStructureFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & BuildAccelerationStructureFlagBitsKHR::eAllowUpdate ) result += "AllowUpdate | "; + if ( value & BuildAccelerationStructureFlagBitsKHR::eAllowCompaction ) result += "AllowCompaction | "; + if ( value & BuildAccelerationStructureFlagBitsKHR::ePreferFastTrace ) result += "PreferFastTrace | "; + if ( value & BuildAccelerationStructureFlagBitsKHR::ePreferFastBuild ) result += "PreferFastBuild | "; + if ( value & BuildAccelerationStructureFlagBitsKHR::eLowMemory ) result += "LowMemory | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ColorComponentFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ColorComponentFlagBits::eR) | VkFlags(ColorComponentFlagBits::eG) | VkFlags(ColorComponentFlagBits::eB) | VkFlags(ColorComponentFlagBits::eA) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ColorComponentFlags operator|( ColorComponentFlagBits bit0, ColorComponentFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ColorComponentFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ColorComponentFlags operator&( ColorComponentFlagBits bit0, ColorComponentFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ColorComponentFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ColorComponentFlags operator^( ColorComponentFlagBits bit0, ColorComponentFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ColorComponentFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ColorComponentFlags operator~( ColorComponentFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ColorComponentFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ColorComponentFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ColorComponentFlagBits::eR ) result += "R | "; + if ( value & ColorComponentFlagBits::eG ) result += "G | "; + if ( value & ColorComponentFlagBits::eB ) result += "B | "; + if ( value & ColorComponentFlagBits::eA ) result += "A | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using CommandBufferResetFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CommandBufferResetFlagBits::eReleaseResources) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferResetFlags operator|( CommandBufferResetFlagBits bit0, CommandBufferResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferResetFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferResetFlags operator&( CommandBufferResetFlagBits bit0, CommandBufferResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferResetFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferResetFlags operator^( CommandBufferResetFlagBits bit0, CommandBufferResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferResetFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferResetFlags operator~( CommandBufferResetFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CommandBufferResetFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CommandBufferResetFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CommandBufferResetFlagBits::eReleaseResources ) result += "ReleaseResources | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using CommandBufferUsageFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CommandBufferUsageFlagBits::eOneTimeSubmit) | VkFlags(CommandBufferUsageFlagBits::eRenderPassContinue) | VkFlags(CommandBufferUsageFlagBits::eSimultaneousUse) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferUsageFlags operator|( CommandBufferUsageFlagBits bit0, CommandBufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferUsageFlags operator&( CommandBufferUsageFlagBits bit0, CommandBufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferUsageFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferUsageFlags operator^( CommandBufferUsageFlagBits bit0, CommandBufferUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandBufferUsageFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandBufferUsageFlags operator~( CommandBufferUsageFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CommandBufferUsageFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CommandBufferUsageFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CommandBufferUsageFlagBits::eOneTimeSubmit ) result += "OneTimeSubmit | "; + if ( value & CommandBufferUsageFlagBits::eRenderPassContinue ) result += "RenderPassContinue | "; + if ( value & CommandBufferUsageFlagBits::eSimultaneousUse ) result += "SimultaneousUse | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using CommandPoolCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CommandPoolCreateFlagBits::eTransient) | VkFlags(CommandPoolCreateFlagBits::eResetCommandBuffer) | VkFlags(CommandPoolCreateFlagBits::eProtected) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolCreateFlags operator|( CommandPoolCreateFlagBits bit0, CommandPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolCreateFlags operator&( CommandPoolCreateFlagBits bit0, CommandPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolCreateFlags operator^( CommandPoolCreateFlagBits bit0, CommandPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolCreateFlags operator~( CommandPoolCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CommandPoolCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CommandPoolCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CommandPoolCreateFlagBits::eTransient ) result += "Transient | "; + if ( value & CommandPoolCreateFlagBits::eResetCommandBuffer ) result += "ResetCommandBuffer | "; + if ( value & CommandPoolCreateFlagBits::eProtected ) result += "Protected | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using CommandPoolResetFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CommandPoolResetFlagBits::eReleaseResources) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolResetFlags operator|( CommandPoolResetFlagBits bit0, CommandPoolResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolResetFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolResetFlags operator&( CommandPoolResetFlagBits bit0, CommandPoolResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolResetFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolResetFlags operator^( CommandPoolResetFlagBits bit0, CommandPoolResetFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CommandPoolResetFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CommandPoolResetFlags operator~( CommandPoolResetFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CommandPoolResetFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CommandPoolResetFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CommandPoolResetFlagBits::eReleaseResources ) result += "ReleaseResources | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class CommandPoolTrimFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( CommandPoolTrimFlagBits ) + { + return "(void)"; + } + + using CommandPoolTrimFlags = Flags; + + using CommandPoolTrimFlagsKHR = CommandPoolTrimFlags; + + VULKAN_HPP_INLINE std::string to_string( CommandPoolTrimFlags ) + { + + return "{}"; + } + + + using CompositeAlphaFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CompositeAlphaFlagBitsKHR::eOpaque) | VkFlags(CompositeAlphaFlagBitsKHR::ePreMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::ePostMultiplied) | VkFlags(CompositeAlphaFlagBitsKHR::eInherit) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CompositeAlphaFlagsKHR operator|( CompositeAlphaFlagBitsKHR bit0, CompositeAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return CompositeAlphaFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CompositeAlphaFlagsKHR operator&( CompositeAlphaFlagBitsKHR bit0, CompositeAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return CompositeAlphaFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CompositeAlphaFlagsKHR operator^( CompositeAlphaFlagBitsKHR bit0, CompositeAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return CompositeAlphaFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CompositeAlphaFlagsKHR operator~( CompositeAlphaFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CompositeAlphaFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CompositeAlphaFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CompositeAlphaFlagBitsKHR::eOpaque ) result += "Opaque | "; + if ( value & CompositeAlphaFlagBitsKHR::ePreMultiplied ) result += "PreMultiplied | "; + if ( value & CompositeAlphaFlagBitsKHR::ePostMultiplied ) result += "PostMultiplied | "; + if ( value & CompositeAlphaFlagBitsKHR::eInherit ) result += "Inherit | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ConditionalRenderingFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ConditionalRenderingFlagBitsEXT::eInverted) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ConditionalRenderingFlagsEXT operator|( ConditionalRenderingFlagBitsEXT bit0, ConditionalRenderingFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ConditionalRenderingFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ConditionalRenderingFlagsEXT operator&( ConditionalRenderingFlagBitsEXT bit0, ConditionalRenderingFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ConditionalRenderingFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ConditionalRenderingFlagsEXT operator^( ConditionalRenderingFlagBitsEXT bit0, ConditionalRenderingFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ConditionalRenderingFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ConditionalRenderingFlagsEXT operator~( ConditionalRenderingFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ConditionalRenderingFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ConditionalRenderingFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ConditionalRenderingFlagBitsEXT::eInverted ) result += "Inverted | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using CullModeFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(CullModeFlagBits::eNone) | VkFlags(CullModeFlagBits::eFront) | VkFlags(CullModeFlagBits::eBack) | VkFlags(CullModeFlagBits::eFrontAndBack) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CullModeFlags operator|( CullModeFlagBits bit0, CullModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CullModeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CullModeFlags operator&( CullModeFlagBits bit0, CullModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CullModeFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CullModeFlags operator^( CullModeFlagBits bit0, CullModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return CullModeFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR CullModeFlags operator~( CullModeFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( CullModeFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( CullModeFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & CullModeFlagBits::eFront ) result += "Front | "; + if ( value & CullModeFlagBits::eBack ) result += "Back | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DebugReportFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DebugReportFlagBitsEXT::eInformation) | VkFlags(DebugReportFlagBitsEXT::eWarning) | VkFlags(DebugReportFlagBitsEXT::ePerformanceWarning) | VkFlags(DebugReportFlagBitsEXT::eError) | VkFlags(DebugReportFlagBitsEXT::eDebug) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugReportFlagsEXT operator|( DebugReportFlagBitsEXT bit0, DebugReportFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugReportFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugReportFlagsEXT operator&( DebugReportFlagBitsEXT bit0, DebugReportFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugReportFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugReportFlagsEXT operator^( DebugReportFlagBitsEXT bit0, DebugReportFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugReportFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugReportFlagsEXT operator~( DebugReportFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DebugReportFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DebugReportFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DebugReportFlagBitsEXT::eInformation ) result += "Information | "; + if ( value & DebugReportFlagBitsEXT::eWarning ) result += "Warning | "; + if ( value & DebugReportFlagBitsEXT::ePerformanceWarning ) result += "PerformanceWarning | "; + if ( value & DebugReportFlagBitsEXT::eError ) result += "Error | "; + if ( value & DebugReportFlagBitsEXT::eDebug ) result += "Debug | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DebugUtilsMessageSeverityFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eInfo) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eWarning) | VkFlags(DebugUtilsMessageSeverityFlagBitsEXT::eError) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageSeverityFlagsEXT operator|( DebugUtilsMessageSeverityFlagBitsEXT bit0, DebugUtilsMessageSeverityFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageSeverityFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageSeverityFlagsEXT operator&( DebugUtilsMessageSeverityFlagBitsEXT bit0, DebugUtilsMessageSeverityFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageSeverityFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageSeverityFlagsEXT operator^( DebugUtilsMessageSeverityFlagBitsEXT bit0, DebugUtilsMessageSeverityFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageSeverityFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageSeverityFlagsEXT operator~( DebugUtilsMessageSeverityFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DebugUtilsMessageSeverityFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessageSeverityFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DebugUtilsMessageSeverityFlagBitsEXT::eVerbose ) result += "Verbose | "; + if ( value & DebugUtilsMessageSeverityFlagBitsEXT::eInfo ) result += "Info | "; + if ( value & DebugUtilsMessageSeverityFlagBitsEXT::eWarning ) result += "Warning | "; + if ( value & DebugUtilsMessageSeverityFlagBitsEXT::eError ) result += "Error | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DebugUtilsMessageTypeFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DebugUtilsMessageTypeFlagBitsEXT::eGeneral) | VkFlags(DebugUtilsMessageTypeFlagBitsEXT::eValidation) | VkFlags(DebugUtilsMessageTypeFlagBitsEXT::ePerformance) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageTypeFlagsEXT operator|( DebugUtilsMessageTypeFlagBitsEXT bit0, DebugUtilsMessageTypeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageTypeFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageTypeFlagsEXT operator&( DebugUtilsMessageTypeFlagBitsEXT bit0, DebugUtilsMessageTypeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageTypeFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageTypeFlagsEXT operator^( DebugUtilsMessageTypeFlagBitsEXT bit0, DebugUtilsMessageTypeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return DebugUtilsMessageTypeFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DebugUtilsMessageTypeFlagsEXT operator~( DebugUtilsMessageTypeFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DebugUtilsMessageTypeFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessageTypeFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DebugUtilsMessageTypeFlagBitsEXT::eGeneral ) result += "General | "; + if ( value & DebugUtilsMessageTypeFlagBitsEXT::eValidation ) result += "Validation | "; + if ( value & DebugUtilsMessageTypeFlagBitsEXT::ePerformance ) result += "Performance | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class DebugUtilsMessengerCallbackDataFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessengerCallbackDataFlagBitsEXT ) + { + return "(void)"; + } + + using DebugUtilsMessengerCallbackDataFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessengerCallbackDataFlagsEXT ) + { + + return "{}"; + } + + enum class DebugUtilsMessengerCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessengerCreateFlagBitsEXT ) + { + return "(void)"; + } + + using DebugUtilsMessengerCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( DebugUtilsMessengerCreateFlagsEXT ) + { + + return "{}"; + } + + + using DependencyFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DependencyFlagBits::eByRegion) | VkFlags(DependencyFlagBits::eDeviceGroup) | VkFlags(DependencyFlagBits::eViewLocal) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DependencyFlags operator|( DependencyFlagBits bit0, DependencyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DependencyFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DependencyFlags operator&( DependencyFlagBits bit0, DependencyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DependencyFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DependencyFlags operator^( DependencyFlagBits bit0, DependencyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DependencyFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DependencyFlags operator~( DependencyFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DependencyFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DependencyFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DependencyFlagBits::eByRegion ) result += "ByRegion | "; + if ( value & DependencyFlagBits::eDeviceGroup ) result += "DeviceGroup | "; + if ( value & DependencyFlagBits::eViewLocal ) result += "ViewLocal | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DescriptorBindingFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DescriptorBindingFlagBits::eUpdateAfterBind) | VkFlags(DescriptorBindingFlagBits::eUpdateUnusedWhilePending) | VkFlags(DescriptorBindingFlagBits::ePartiallyBound) | VkFlags(DescriptorBindingFlagBits::eVariableDescriptorCount) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorBindingFlags operator|( DescriptorBindingFlagBits bit0, DescriptorBindingFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorBindingFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorBindingFlags operator&( DescriptorBindingFlagBits bit0, DescriptorBindingFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorBindingFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorBindingFlags operator^( DescriptorBindingFlagBits bit0, DescriptorBindingFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorBindingFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorBindingFlags operator~( DescriptorBindingFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DescriptorBindingFlags( bits ) ); + } + + using DescriptorBindingFlagsEXT = DescriptorBindingFlags; + + VULKAN_HPP_INLINE std::string to_string( DescriptorBindingFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DescriptorBindingFlagBits::eUpdateAfterBind ) result += "UpdateAfterBind | "; + if ( value & DescriptorBindingFlagBits::eUpdateUnusedWhilePending ) result += "UpdateUnusedWhilePending | "; + if ( value & DescriptorBindingFlagBits::ePartiallyBound ) result += "PartiallyBound | "; + if ( value & DescriptorBindingFlagBits::eVariableDescriptorCount ) result += "VariableDescriptorCount | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DescriptorPoolCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DescriptorPoolCreateFlagBits::eFreeDescriptorSet) | VkFlags(DescriptorPoolCreateFlagBits::eUpdateAfterBind) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorPoolCreateFlags operator|( DescriptorPoolCreateFlagBits bit0, DescriptorPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorPoolCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorPoolCreateFlags operator&( DescriptorPoolCreateFlagBits bit0, DescriptorPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorPoolCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorPoolCreateFlags operator^( DescriptorPoolCreateFlagBits bit0, DescriptorPoolCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorPoolCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorPoolCreateFlags operator~( DescriptorPoolCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DescriptorPoolCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DescriptorPoolCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DescriptorPoolCreateFlagBits::eFreeDescriptorSet ) result += "FreeDescriptorSet | "; + if ( value & DescriptorPoolCreateFlagBits::eUpdateAfterBind ) result += "UpdateAfterBind | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class DescriptorPoolResetFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DescriptorPoolResetFlagBits ) + { + return "(void)"; + } + + using DescriptorPoolResetFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( DescriptorPoolResetFlags ) + { + + return "{}"; + } + + + using DescriptorSetLayoutCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool) | VkFlags(DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateFlags operator|( DescriptorSetLayoutCreateFlagBits bit0, DescriptorSetLayoutCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorSetLayoutCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateFlags operator&( DescriptorSetLayoutCreateFlagBits bit0, DescriptorSetLayoutCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorSetLayoutCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateFlags operator^( DescriptorSetLayoutCreateFlagBits bit0, DescriptorSetLayoutCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DescriptorSetLayoutCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateFlags operator~( DescriptorSetLayoutCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DescriptorSetLayoutCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DescriptorSetLayoutCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DescriptorSetLayoutCreateFlagBits::eUpdateAfterBindPool ) result += "UpdateAfterBindPool | "; + if ( value & DescriptorSetLayoutCreateFlagBits::ePushDescriptorKHR ) result += "PushDescriptorKHR | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class DescriptorUpdateTemplateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DescriptorUpdateTemplateCreateFlagBits ) + { + return "(void)"; + } + + using DescriptorUpdateTemplateCreateFlags = Flags; + + using DescriptorUpdateTemplateCreateFlagsKHR = DescriptorUpdateTemplateCreateFlags; + + VULKAN_HPP_INLINE std::string to_string( DescriptorUpdateTemplateCreateFlags ) + { + + return "{}"; + } + + + using DeviceCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( DeviceCreateFlags ) + { + + return "{}"; + } + + + using DeviceDiagnosticsConfigFlagsNV = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DeviceDiagnosticsConfigFlagBitsNV::eEnableShaderDebugInfo) | VkFlags(DeviceDiagnosticsConfigFlagBitsNV::eEnableResourceTracking) | VkFlags(DeviceDiagnosticsConfigFlagBitsNV::eEnableAutomaticCheckpoints) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigFlagsNV operator|( DeviceDiagnosticsConfigFlagBitsNV bit0, DeviceDiagnosticsConfigFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceDiagnosticsConfigFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigFlagsNV operator&( DeviceDiagnosticsConfigFlagBitsNV bit0, DeviceDiagnosticsConfigFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceDiagnosticsConfigFlagsNV( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigFlagsNV operator^( DeviceDiagnosticsConfigFlagBitsNV bit0, DeviceDiagnosticsConfigFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceDiagnosticsConfigFlagsNV( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigFlagsNV operator~( DeviceDiagnosticsConfigFlagBitsNV bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DeviceDiagnosticsConfigFlagsNV( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DeviceDiagnosticsConfigFlagsNV value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DeviceDiagnosticsConfigFlagBitsNV::eEnableShaderDebugInfo ) result += "EnableShaderDebugInfo | "; + if ( value & DeviceDiagnosticsConfigFlagBitsNV::eEnableResourceTracking ) result += "EnableResourceTracking | "; + if ( value & DeviceDiagnosticsConfigFlagBitsNV::eEnableAutomaticCheckpoints ) result += "EnableAutomaticCheckpoints | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using DeviceGroupPresentModeFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DeviceGroupPresentModeFlagBitsKHR::eLocal) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eRemote) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eSum) | VkFlags(DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceGroupPresentModeFlagsKHR operator|( DeviceGroupPresentModeFlagBitsKHR bit0, DeviceGroupPresentModeFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceGroupPresentModeFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceGroupPresentModeFlagsKHR operator&( DeviceGroupPresentModeFlagBitsKHR bit0, DeviceGroupPresentModeFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceGroupPresentModeFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceGroupPresentModeFlagsKHR operator^( DeviceGroupPresentModeFlagBitsKHR bit0, DeviceGroupPresentModeFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceGroupPresentModeFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceGroupPresentModeFlagsKHR operator~( DeviceGroupPresentModeFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DeviceGroupPresentModeFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DeviceGroupPresentModeFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DeviceGroupPresentModeFlagBitsKHR::eLocal ) result += "Local | "; + if ( value & DeviceGroupPresentModeFlagBitsKHR::eRemote ) result += "Remote | "; + if ( value & DeviceGroupPresentModeFlagBitsKHR::eSum ) result += "Sum | "; + if ( value & DeviceGroupPresentModeFlagBitsKHR::eLocalMultiDevice ) result += "LocalMultiDevice | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class DeviceMemoryReportFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportFlagBitsEXT ) + { + return "(void)"; + } + + using DeviceMemoryReportFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( DeviceMemoryReportFlagsEXT ) + { + + return "{}"; + } + + + using DeviceQueueCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DeviceQueueCreateFlagBits::eProtected) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceQueueCreateFlags operator|( DeviceQueueCreateFlagBits bit0, DeviceQueueCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceQueueCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceQueueCreateFlags operator&( DeviceQueueCreateFlagBits bit0, DeviceQueueCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceQueueCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceQueueCreateFlags operator^( DeviceQueueCreateFlagBits bit0, DeviceQueueCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return DeviceQueueCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DeviceQueueCreateFlags operator~( DeviceQueueCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DeviceQueueCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DeviceQueueCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DeviceQueueCreateFlagBits::eProtected ) result += "Protected | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + enum class DirectFBSurfaceCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DirectFBSurfaceCreateFlagBitsEXT ) + { + return "(void)"; + } + + using DirectFBSurfaceCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( DirectFBSurfaceCreateFlagsEXT ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + enum class DisplayModeCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DisplayModeCreateFlagBitsKHR ) + { + return "(void)"; + } + + using DisplayModeCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( DisplayModeCreateFlagsKHR ) + { + + return "{}"; + } + + + using DisplayPlaneAlphaFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(DisplayPlaneAlphaFlagBitsKHR::eOpaque) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::eGlobal) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixel) | VkFlags(DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DisplayPlaneAlphaFlagsKHR operator|( DisplayPlaneAlphaFlagBitsKHR bit0, DisplayPlaneAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DisplayPlaneAlphaFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DisplayPlaneAlphaFlagsKHR operator&( DisplayPlaneAlphaFlagBitsKHR bit0, DisplayPlaneAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DisplayPlaneAlphaFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DisplayPlaneAlphaFlagsKHR operator^( DisplayPlaneAlphaFlagBitsKHR bit0, DisplayPlaneAlphaFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return DisplayPlaneAlphaFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DisplayPlaneAlphaFlagsKHR operator~( DisplayPlaneAlphaFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( DisplayPlaneAlphaFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( DisplayPlaneAlphaFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & DisplayPlaneAlphaFlagBitsKHR::eOpaque ) result += "Opaque | "; + if ( value & DisplayPlaneAlphaFlagBitsKHR::eGlobal ) result += "Global | "; + if ( value & DisplayPlaneAlphaFlagBitsKHR::ePerPixel ) result += "PerPixel | "; + if ( value & DisplayPlaneAlphaFlagBitsKHR::ePerPixelPremultiplied ) result += "PerPixelPremultiplied | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class DisplaySurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( DisplaySurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using DisplaySurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( DisplaySurfaceCreateFlagsKHR ) + { + + return "{}"; + } + + enum class EventCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( EventCreateFlagBits ) + { + return "(void)"; + } + + using EventCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( EventCreateFlags ) + { + + return "{}"; + } + + + using ExternalFenceFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalFenceFeatureFlagBits::eExportable) | VkFlags(ExternalFenceFeatureFlagBits::eImportable) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceFeatureFlags operator|( ExternalFenceFeatureFlagBits bit0, ExternalFenceFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceFeatureFlags operator&( ExternalFenceFeatureFlagBits bit0, ExternalFenceFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceFeatureFlags operator^( ExternalFenceFeatureFlagBits bit0, ExternalFenceFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceFeatureFlags operator~( ExternalFenceFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalFenceFeatureFlags( bits ) ); + } + + using ExternalFenceFeatureFlagsKHR = ExternalFenceFeatureFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalFenceFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalFenceFeatureFlagBits::eExportable ) result += "Exportable | "; + if ( value & ExternalFenceFeatureFlagBits::eImportable ) result += "Importable | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalFenceHandleTypeFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalFenceHandleTypeFlagBits::eSyncFd) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceHandleTypeFlags operator|( ExternalFenceHandleTypeFlagBits bit0, ExternalFenceHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceHandleTypeFlags operator&( ExternalFenceHandleTypeFlagBits bit0, ExternalFenceHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceHandleTypeFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceHandleTypeFlags operator^( ExternalFenceHandleTypeFlagBits bit0, ExternalFenceHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalFenceHandleTypeFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalFenceHandleTypeFlags operator~( ExternalFenceHandleTypeFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalFenceHandleTypeFlags( bits ) ); + } + + using ExternalFenceHandleTypeFlagsKHR = ExternalFenceHandleTypeFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalFenceHandleTypeFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalFenceHandleTypeFlagBits::eOpaqueFd ) result += "OpaqueFd | "; + if ( value & ExternalFenceHandleTypeFlagBits::eOpaqueWin32 ) result += "OpaqueWin32 | "; + if ( value & ExternalFenceHandleTypeFlagBits::eOpaqueWin32Kmt ) result += "OpaqueWin32Kmt | "; + if ( value & ExternalFenceHandleTypeFlagBits::eSyncFd ) result += "SyncFd | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalMemoryFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalMemoryFeatureFlagBits::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBits::eExportable) | VkFlags(ExternalMemoryFeatureFlagBits::eImportable) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlags operator|( ExternalMemoryFeatureFlagBits bit0, ExternalMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlags operator&( ExternalMemoryFeatureFlagBits bit0, ExternalMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlags operator^( ExternalMemoryFeatureFlagBits bit0, ExternalMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlags operator~( ExternalMemoryFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalMemoryFeatureFlags( bits ) ); + } + + using ExternalMemoryFeatureFlagsKHR = ExternalMemoryFeatureFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalMemoryFeatureFlagBits::eDedicatedOnly ) result += "DedicatedOnly | "; + if ( value & ExternalMemoryFeatureFlagBits::eExportable ) result += "Exportable | "; + if ( value & ExternalMemoryFeatureFlagBits::eImportable ) result += "Importable | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalMemoryFeatureFlagsNV = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eExportable) | VkFlags(ExternalMemoryFeatureFlagBitsNV::eImportable) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlagsNV operator|( ExternalMemoryFeatureFlagBitsNV bit0, ExternalMemoryFeatureFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlagsNV operator&( ExternalMemoryFeatureFlagBitsNV bit0, ExternalMemoryFeatureFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlagsNV( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlagsNV operator^( ExternalMemoryFeatureFlagBitsNV bit0, ExternalMemoryFeatureFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryFeatureFlagsNV( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryFeatureFlagsNV operator~( ExternalMemoryFeatureFlagBitsNV bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalMemoryFeatureFlagsNV( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryFeatureFlagsNV value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalMemoryFeatureFlagBitsNV::eDedicatedOnly ) result += "DedicatedOnly | "; + if ( value & ExternalMemoryFeatureFlagBitsNV::eExportable ) result += "Exportable | "; + if ( value & ExternalMemoryFeatureFlagBitsNV::eImportable ) result += "Importable | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalMemoryHandleTypeFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D11Texture) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D12Heap) | VkFlags(ExternalMemoryHandleTypeFlagBits::eD3D12Resource) | VkFlags(ExternalMemoryHandleTypeFlagBits::eDmaBufEXT) | VkFlags(ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID) | VkFlags(ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT) | VkFlags(ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlags operator|( ExternalMemoryHandleTypeFlagBits bit0, ExternalMemoryHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlags operator&( ExternalMemoryHandleTypeFlagBits bit0, ExternalMemoryHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlags operator^( ExternalMemoryHandleTypeFlagBits bit0, ExternalMemoryHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlags operator~( ExternalMemoryHandleTypeFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalMemoryHandleTypeFlags( bits ) ); + } + + using ExternalMemoryHandleTypeFlagsKHR = ExternalMemoryHandleTypeFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryHandleTypeFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalMemoryHandleTypeFlagBits::eOpaqueFd ) result += "OpaqueFd | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eOpaqueWin32 ) result += "OpaqueWin32 | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eOpaqueWin32Kmt ) result += "OpaqueWin32Kmt | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eD3D11Texture ) result += "D3D11Texture | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eD3D11TextureKmt ) result += "D3D11TextureKmt | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eD3D12Heap ) result += "D3D12Heap | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eD3D12Resource ) result += "D3D12Resource | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eDmaBufEXT ) result += "DmaBufEXT | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eAndroidHardwareBufferANDROID ) result += "AndroidHardwareBufferANDROID | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eHostAllocationEXT ) result += "HostAllocationEXT | "; + if ( value & ExternalMemoryHandleTypeFlagBits::eHostMappedForeignMemoryEXT ) result += "HostMappedForeignMemoryEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalMemoryHandleTypeFlagsNV = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image) | VkFlags(ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlagsNV operator|( ExternalMemoryHandleTypeFlagBitsNV bit0, ExternalMemoryHandleTypeFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlagsNV operator&( ExternalMemoryHandleTypeFlagBitsNV bit0, ExternalMemoryHandleTypeFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlagsNV( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlagsNV operator^( ExternalMemoryHandleTypeFlagBitsNV bit0, ExternalMemoryHandleTypeFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalMemoryHandleTypeFlagsNV( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalMemoryHandleTypeFlagsNV operator~( ExternalMemoryHandleTypeFlagBitsNV bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalMemoryHandleTypeFlagsNV( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ExternalMemoryHandleTypeFlagsNV value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32 ) result += "OpaqueWin32 | "; + if ( value & ExternalMemoryHandleTypeFlagBitsNV::eOpaqueWin32Kmt ) result += "OpaqueWin32Kmt | "; + if ( value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11Image ) result += "D3D11Image | "; + if ( value & ExternalMemoryHandleTypeFlagBitsNV::eD3D11ImageKmt ) result += "D3D11ImageKmt | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalSemaphoreFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalSemaphoreFeatureFlagBits::eExportable) | VkFlags(ExternalSemaphoreFeatureFlagBits::eImportable) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreFeatureFlags operator|( ExternalSemaphoreFeatureFlagBits bit0, ExternalSemaphoreFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreFeatureFlags operator&( ExternalSemaphoreFeatureFlagBits bit0, ExternalSemaphoreFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreFeatureFlags operator^( ExternalSemaphoreFeatureFlagBits bit0, ExternalSemaphoreFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreFeatureFlags operator~( ExternalSemaphoreFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalSemaphoreFeatureFlags( bits ) ); + } + + using ExternalSemaphoreFeatureFlagsKHR = ExternalSemaphoreFeatureFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalSemaphoreFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalSemaphoreFeatureFlagBits::eExportable ) result += "Exportable | "; + if ( value & ExternalSemaphoreFeatureFlagBits::eImportable ) result += "Importable | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ExternalSemaphoreHandleTypeFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence) | VkFlags(ExternalSemaphoreHandleTypeFlagBits::eSyncFd) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreHandleTypeFlags operator|( ExternalSemaphoreHandleTypeFlagBits bit0, ExternalSemaphoreHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreHandleTypeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreHandleTypeFlags operator&( ExternalSemaphoreHandleTypeFlagBits bit0, ExternalSemaphoreHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreHandleTypeFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreHandleTypeFlags operator^( ExternalSemaphoreHandleTypeFlagBits bit0, ExternalSemaphoreHandleTypeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ExternalSemaphoreHandleTypeFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ExternalSemaphoreHandleTypeFlags operator~( ExternalSemaphoreHandleTypeFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ExternalSemaphoreHandleTypeFlags( bits ) ); + } + + using ExternalSemaphoreHandleTypeFlagsKHR = ExternalSemaphoreHandleTypeFlags; + + VULKAN_HPP_INLINE std::string to_string( ExternalSemaphoreHandleTypeFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd ) result += "OpaqueFd | "; + if ( value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32 ) result += "OpaqueWin32 | "; + if ( value & ExternalSemaphoreHandleTypeFlagBits::eOpaqueWin32Kmt ) result += "OpaqueWin32Kmt | "; + if ( value & ExternalSemaphoreHandleTypeFlagBits::eD3D12Fence ) result += "D3D12Fence | "; + if ( value & ExternalSemaphoreHandleTypeFlagBits::eSyncFd ) result += "SyncFd | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using FenceCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(FenceCreateFlagBits::eSignaled) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceCreateFlags operator|( FenceCreateFlagBits bit0, FenceCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceCreateFlags operator&( FenceCreateFlagBits bit0, FenceCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceCreateFlags operator^( FenceCreateFlagBits bit0, FenceCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceCreateFlags operator~( FenceCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( FenceCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( FenceCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & FenceCreateFlagBits::eSignaled ) result += "Signaled | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using FenceImportFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(FenceImportFlagBits::eTemporary) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceImportFlags operator|( FenceImportFlagBits bit0, FenceImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceImportFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceImportFlags operator&( FenceImportFlagBits bit0, FenceImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceImportFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceImportFlags operator^( FenceImportFlagBits bit0, FenceImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FenceImportFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FenceImportFlags operator~( FenceImportFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( FenceImportFlags( bits ) ); + } + + using FenceImportFlagsKHR = FenceImportFlags; + + VULKAN_HPP_INLINE std::string to_string( FenceImportFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & FenceImportFlagBits::eTemporary ) result += "Temporary | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using FormatFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(FormatFeatureFlagBits::eSampledImage) | VkFlags(FormatFeatureFlagBits::eStorageImage) | VkFlags(FormatFeatureFlagBits::eStorageImageAtomic) | VkFlags(FormatFeatureFlagBits::eUniformTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBuffer) | VkFlags(FormatFeatureFlagBits::eStorageTexelBufferAtomic) | VkFlags(FormatFeatureFlagBits::eVertexBuffer) | VkFlags(FormatFeatureFlagBits::eColorAttachment) | VkFlags(FormatFeatureFlagBits::eColorAttachmentBlend) | VkFlags(FormatFeatureFlagBits::eDepthStencilAttachment) | VkFlags(FormatFeatureFlagBits::eBlitSrc) | VkFlags(FormatFeatureFlagBits::eBlitDst) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterLinear) | VkFlags(FormatFeatureFlagBits::eTransferSrc) | VkFlags(FormatFeatureFlagBits::eTransferDst) | VkFlags(FormatFeatureFlagBits::eMidpointChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit) | VkFlags(FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable) | VkFlags(FormatFeatureFlagBits::eDisjoint) | VkFlags(FormatFeatureFlagBits::eCositedChromaSamples) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterMinmax) | VkFlags(FormatFeatureFlagBits::eSampledImageFilterCubicIMG) | VkFlags(FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR) | VkFlags(FormatFeatureFlagBits::eFragmentDensityMapEXT) | VkFlags(FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FormatFeatureFlags operator|( FormatFeatureFlagBits bit0, FormatFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FormatFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FormatFeatureFlags operator&( FormatFeatureFlagBits bit0, FormatFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FormatFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FormatFeatureFlags operator^( FormatFeatureFlagBits bit0, FormatFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FormatFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FormatFeatureFlags operator~( FormatFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( FormatFeatureFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( FormatFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & FormatFeatureFlagBits::eSampledImage ) result += "SampledImage | "; + if ( value & FormatFeatureFlagBits::eStorageImage ) result += "StorageImage | "; + if ( value & FormatFeatureFlagBits::eStorageImageAtomic ) result += "StorageImageAtomic | "; + if ( value & FormatFeatureFlagBits::eUniformTexelBuffer ) result += "UniformTexelBuffer | "; + if ( value & FormatFeatureFlagBits::eStorageTexelBuffer ) result += "StorageTexelBuffer | "; + if ( value & FormatFeatureFlagBits::eStorageTexelBufferAtomic ) result += "StorageTexelBufferAtomic | "; + if ( value & FormatFeatureFlagBits::eVertexBuffer ) result += "VertexBuffer | "; + if ( value & FormatFeatureFlagBits::eColorAttachment ) result += "ColorAttachment | "; + if ( value & FormatFeatureFlagBits::eColorAttachmentBlend ) result += "ColorAttachmentBlend | "; + if ( value & FormatFeatureFlagBits::eDepthStencilAttachment ) result += "DepthStencilAttachment | "; + if ( value & FormatFeatureFlagBits::eBlitSrc ) result += "BlitSrc | "; + if ( value & FormatFeatureFlagBits::eBlitDst ) result += "BlitDst | "; + if ( value & FormatFeatureFlagBits::eSampledImageFilterLinear ) result += "SampledImageFilterLinear | "; + if ( value & FormatFeatureFlagBits::eTransferSrc ) result += "TransferSrc | "; + if ( value & FormatFeatureFlagBits::eTransferDst ) result += "TransferDst | "; + if ( value & FormatFeatureFlagBits::eMidpointChromaSamples ) result += "MidpointChromaSamples | "; + if ( value & FormatFeatureFlagBits::eSampledImageYcbcrConversionLinearFilter ) result += "SampledImageYcbcrConversionLinearFilter | "; + if ( value & FormatFeatureFlagBits::eSampledImageYcbcrConversionSeparateReconstructionFilter ) result += "SampledImageYcbcrConversionSeparateReconstructionFilter | "; + if ( value & FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicit ) result += "SampledImageYcbcrConversionChromaReconstructionExplicit | "; + if ( value & FormatFeatureFlagBits::eSampledImageYcbcrConversionChromaReconstructionExplicitForceable ) result += "SampledImageYcbcrConversionChromaReconstructionExplicitForceable | "; + if ( value & FormatFeatureFlagBits::eDisjoint ) result += "Disjoint | "; + if ( value & FormatFeatureFlagBits::eCositedChromaSamples ) result += "CositedChromaSamples | "; + if ( value & FormatFeatureFlagBits::eSampledImageFilterMinmax ) result += "SampledImageFilterMinmax | "; + if ( value & FormatFeatureFlagBits::eSampledImageFilterCubicIMG ) result += "SampledImageFilterCubicIMG | "; + if ( value & FormatFeatureFlagBits::eAccelerationStructureVertexBufferKHR ) result += "AccelerationStructureVertexBufferKHR | "; + if ( value & FormatFeatureFlagBits::eFragmentDensityMapEXT ) result += "FragmentDensityMapEXT | "; + if ( value & FormatFeatureFlagBits::eFragmentShadingRateAttachmentKHR ) result += "FragmentShadingRateAttachmentKHR | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using FramebufferCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(FramebufferCreateFlagBits::eImageless) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FramebufferCreateFlags operator|( FramebufferCreateFlagBits bit0, FramebufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FramebufferCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FramebufferCreateFlags operator&( FramebufferCreateFlagBits bit0, FramebufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FramebufferCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FramebufferCreateFlags operator^( FramebufferCreateFlagBits bit0, FramebufferCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return FramebufferCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR FramebufferCreateFlags operator~( FramebufferCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( FramebufferCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( FramebufferCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & FramebufferCreateFlagBits::eImageless ) result += "Imageless | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using GeometryFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(GeometryFlagBitsKHR::eOpaque) | VkFlags(GeometryFlagBitsKHR::eNoDuplicateAnyHitInvocation) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryFlagsKHR operator|( GeometryFlagBitsKHR bit0, GeometryFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryFlagsKHR operator&( GeometryFlagBitsKHR bit0, GeometryFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryFlagsKHR operator^( GeometryFlagBitsKHR bit0, GeometryFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryFlagsKHR operator~( GeometryFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( GeometryFlagsKHR( bits ) ); + } + + using GeometryFlagsNV = GeometryFlagsKHR; + + VULKAN_HPP_INLINE std::string to_string( GeometryFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & GeometryFlagBitsKHR::eOpaque ) result += "Opaque | "; + if ( value & GeometryFlagBitsKHR::eNoDuplicateAnyHitInvocation ) result += "NoDuplicateAnyHitInvocation | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using GeometryInstanceFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(GeometryInstanceFlagBitsKHR::eTriangleFacingCullDisable) | VkFlags(GeometryInstanceFlagBitsKHR::eTriangleFrontCounterclockwise) | VkFlags(GeometryInstanceFlagBitsKHR::eForceOpaque) | VkFlags(GeometryInstanceFlagBitsKHR::eForceNoOpaque) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryInstanceFlagsKHR operator|( GeometryInstanceFlagBitsKHR bit0, GeometryInstanceFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryInstanceFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryInstanceFlagsKHR operator&( GeometryInstanceFlagBitsKHR bit0, GeometryInstanceFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryInstanceFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryInstanceFlagsKHR operator^( GeometryInstanceFlagBitsKHR bit0, GeometryInstanceFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return GeometryInstanceFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR GeometryInstanceFlagsKHR operator~( GeometryInstanceFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( GeometryInstanceFlagsKHR( bits ) ); + } + + using GeometryInstanceFlagsNV = GeometryInstanceFlagsKHR; + + VULKAN_HPP_INLINE std::string to_string( GeometryInstanceFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & GeometryInstanceFlagBitsKHR::eTriangleFacingCullDisable ) result += "TriangleFacingCullDisable | "; + if ( value & GeometryInstanceFlagBitsKHR::eTriangleFrontCounterclockwise ) result += "TriangleFrontCounterclockwise | "; + if ( value & GeometryInstanceFlagBitsKHR::eForceOpaque ) result += "ForceOpaque | "; + if ( value & GeometryInstanceFlagBitsKHR::eForceNoOpaque ) result += "ForceNoOpaque | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class HeadlessSurfaceCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( HeadlessSurfaceCreateFlagBitsEXT ) + { + return "(void)"; + } + + using HeadlessSurfaceCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( HeadlessSurfaceCreateFlagsEXT ) + { + + return "{}"; + } + +#ifdef VK_USE_PLATFORM_IOS_MVK + enum class IOSSurfaceCreateFlagBitsMVK : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( IOSSurfaceCreateFlagBitsMVK ) + { + return "(void)"; + } + + using IOSSurfaceCreateFlagsMVK = Flags; + + VULKAN_HPP_INLINE std::string to_string( IOSSurfaceCreateFlagsMVK ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + + + using ImageAspectFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ImageAspectFlagBits::eColor) | VkFlags(ImageAspectFlagBits::eDepth) | VkFlags(ImageAspectFlagBits::eStencil) | VkFlags(ImageAspectFlagBits::eMetadata) | VkFlags(ImageAspectFlagBits::ePlane0) | VkFlags(ImageAspectFlagBits::ePlane1) | VkFlags(ImageAspectFlagBits::ePlane2) | VkFlags(ImageAspectFlagBits::eMemoryPlane0EXT) | VkFlags(ImageAspectFlagBits::eMemoryPlane1EXT) | VkFlags(ImageAspectFlagBits::eMemoryPlane2EXT) | VkFlags(ImageAspectFlagBits::eMemoryPlane3EXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageAspectFlags operator|( ImageAspectFlagBits bit0, ImageAspectFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageAspectFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageAspectFlags operator&( ImageAspectFlagBits bit0, ImageAspectFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageAspectFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageAspectFlags operator^( ImageAspectFlagBits bit0, ImageAspectFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageAspectFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageAspectFlags operator~( ImageAspectFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ImageAspectFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ImageAspectFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ImageAspectFlagBits::eColor ) result += "Color | "; + if ( value & ImageAspectFlagBits::eDepth ) result += "Depth | "; + if ( value & ImageAspectFlagBits::eStencil ) result += "Stencil | "; + if ( value & ImageAspectFlagBits::eMetadata ) result += "Metadata | "; + if ( value & ImageAspectFlagBits::ePlane0 ) result += "Plane0 | "; + if ( value & ImageAspectFlagBits::ePlane1 ) result += "Plane1 | "; + if ( value & ImageAspectFlagBits::ePlane2 ) result += "Plane2 | "; + if ( value & ImageAspectFlagBits::eMemoryPlane0EXT ) result += "MemoryPlane0EXT | "; + if ( value & ImageAspectFlagBits::eMemoryPlane1EXT ) result += "MemoryPlane1EXT | "; + if ( value & ImageAspectFlagBits::eMemoryPlane2EXT ) result += "MemoryPlane2EXT | "; + if ( value & ImageAspectFlagBits::eMemoryPlane3EXT ) result += "MemoryPlane3EXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ImageCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ImageCreateFlagBits::eSparseBinding) | VkFlags(ImageCreateFlagBits::eSparseResidency) | VkFlags(ImageCreateFlagBits::eSparseAliased) | VkFlags(ImageCreateFlagBits::eMutableFormat) | VkFlags(ImageCreateFlagBits::eCubeCompatible) | VkFlags(ImageCreateFlagBits::eAlias) | VkFlags(ImageCreateFlagBits::eSplitInstanceBindRegions) | VkFlags(ImageCreateFlagBits::e2DArrayCompatible) | VkFlags(ImageCreateFlagBits::eBlockTexelViewCompatible) | VkFlags(ImageCreateFlagBits::eExtendedUsage) | VkFlags(ImageCreateFlagBits::eProtected) | VkFlags(ImageCreateFlagBits::eDisjoint) | VkFlags(ImageCreateFlagBits::eCornerSampledNV) | VkFlags(ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT) | VkFlags(ImageCreateFlagBits::eSubsampledEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageCreateFlags operator|( ImageCreateFlagBits bit0, ImageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageCreateFlags operator&( ImageCreateFlagBits bit0, ImageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageCreateFlags operator^( ImageCreateFlagBits bit0, ImageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageCreateFlags operator~( ImageCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ImageCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ImageCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ImageCreateFlagBits::eSparseBinding ) result += "SparseBinding | "; + if ( value & ImageCreateFlagBits::eSparseResidency ) result += "SparseResidency | "; + if ( value & ImageCreateFlagBits::eSparseAliased ) result += "SparseAliased | "; + if ( value & ImageCreateFlagBits::eMutableFormat ) result += "MutableFormat | "; + if ( value & ImageCreateFlagBits::eCubeCompatible ) result += "CubeCompatible | "; + if ( value & ImageCreateFlagBits::eAlias ) result += "Alias | "; + if ( value & ImageCreateFlagBits::eSplitInstanceBindRegions ) result += "SplitInstanceBindRegions | "; + if ( value & ImageCreateFlagBits::e2DArrayCompatible ) result += "2DArrayCompatible | "; + if ( value & ImageCreateFlagBits::eBlockTexelViewCompatible ) result += "BlockTexelViewCompatible | "; + if ( value & ImageCreateFlagBits::eExtendedUsage ) result += "ExtendedUsage | "; + if ( value & ImageCreateFlagBits::eProtected ) result += "Protected | "; + if ( value & ImageCreateFlagBits::eDisjoint ) result += "Disjoint | "; + if ( value & ImageCreateFlagBits::eCornerSampledNV ) result += "CornerSampledNV | "; + if ( value & ImageCreateFlagBits::eSampleLocationsCompatibleDepthEXT ) result += "SampleLocationsCompatibleDepthEXT | "; + if ( value & ImageCreateFlagBits::eSubsampledEXT ) result += "SubsampledEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + +#ifdef VK_USE_PLATFORM_FUCHSIA + enum class ImagePipeSurfaceCreateFlagBitsFUCHSIA : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( ImagePipeSurfaceCreateFlagBitsFUCHSIA ) + { + return "(void)"; + } + + using ImagePipeSurfaceCreateFlagsFUCHSIA = Flags; + + VULKAN_HPP_INLINE std::string to_string( ImagePipeSurfaceCreateFlagsFUCHSIA ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + + using ImageUsageFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ImageUsageFlagBits::eTransferSrc) | VkFlags(ImageUsageFlagBits::eTransferDst) | VkFlags(ImageUsageFlagBits::eSampled) | VkFlags(ImageUsageFlagBits::eStorage) | VkFlags(ImageUsageFlagBits::eColorAttachment) | VkFlags(ImageUsageFlagBits::eDepthStencilAttachment) | VkFlags(ImageUsageFlagBits::eTransientAttachment) | VkFlags(ImageUsageFlagBits::eInputAttachment) | VkFlags(ImageUsageFlagBits::eShadingRateImageNV) | VkFlags(ImageUsageFlagBits::eFragmentDensityMapEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageUsageFlags operator|( ImageUsageFlagBits bit0, ImageUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageUsageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageUsageFlags operator&( ImageUsageFlagBits bit0, ImageUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageUsageFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageUsageFlags operator^( ImageUsageFlagBits bit0, ImageUsageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageUsageFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageUsageFlags operator~( ImageUsageFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ImageUsageFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ImageUsageFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ImageUsageFlagBits::eTransferSrc ) result += "TransferSrc | "; + if ( value & ImageUsageFlagBits::eTransferDst ) result += "TransferDst | "; + if ( value & ImageUsageFlagBits::eSampled ) result += "Sampled | "; + if ( value & ImageUsageFlagBits::eStorage ) result += "Storage | "; + if ( value & ImageUsageFlagBits::eColorAttachment ) result += "ColorAttachment | "; + if ( value & ImageUsageFlagBits::eDepthStencilAttachment ) result += "DepthStencilAttachment | "; + if ( value & ImageUsageFlagBits::eTransientAttachment ) result += "TransientAttachment | "; + if ( value & ImageUsageFlagBits::eInputAttachment ) result += "InputAttachment | "; + if ( value & ImageUsageFlagBits::eShadingRateImageNV ) result += "ShadingRateImageNV | "; + if ( value & ImageUsageFlagBits::eFragmentDensityMapEXT ) result += "FragmentDensityMapEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ImageViewCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ImageViewCreateFlagBits::eFragmentDensityMapDynamicEXT) | VkFlags(ImageViewCreateFlagBits::eFragmentDensityMapDeferredEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageViewCreateFlags operator|( ImageViewCreateFlagBits bit0, ImageViewCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageViewCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageViewCreateFlags operator&( ImageViewCreateFlagBits bit0, ImageViewCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageViewCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageViewCreateFlags operator^( ImageViewCreateFlagBits bit0, ImageViewCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ImageViewCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ImageViewCreateFlags operator~( ImageViewCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ImageViewCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ImageViewCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ImageViewCreateFlagBits::eFragmentDensityMapDynamicEXT ) result += "FragmentDensityMapDynamicEXT | "; + if ( value & ImageViewCreateFlagBits::eFragmentDensityMapDeferredEXT ) result += "FragmentDensityMapDeferredEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using IndirectCommandsLayoutUsageFlagsNV = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(IndirectCommandsLayoutUsageFlagBitsNV::eExplicitPreprocess) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNV::eIndexedSequences) | VkFlags(IndirectCommandsLayoutUsageFlagBitsNV::eUnorderedSequences) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutUsageFlagsNV operator|( IndirectCommandsLayoutUsageFlagBitsNV bit0, IndirectCommandsLayoutUsageFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectCommandsLayoutUsageFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutUsageFlagsNV operator&( IndirectCommandsLayoutUsageFlagBitsNV bit0, IndirectCommandsLayoutUsageFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectCommandsLayoutUsageFlagsNV( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutUsageFlagsNV operator^( IndirectCommandsLayoutUsageFlagBitsNV bit0, IndirectCommandsLayoutUsageFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectCommandsLayoutUsageFlagsNV( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutUsageFlagsNV operator~( IndirectCommandsLayoutUsageFlagBitsNV bits ) VULKAN_HPP_NOEXCEPT + { + return ~( IndirectCommandsLayoutUsageFlagsNV( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( IndirectCommandsLayoutUsageFlagsNV value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & IndirectCommandsLayoutUsageFlagBitsNV::eExplicitPreprocess ) result += "ExplicitPreprocess | "; + if ( value & IndirectCommandsLayoutUsageFlagBitsNV::eIndexedSequences ) result += "IndexedSequences | "; + if ( value & IndirectCommandsLayoutUsageFlagBitsNV::eUnorderedSequences ) result += "UnorderedSequences | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using IndirectStateFlagsNV = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(IndirectStateFlagBitsNV::eFlagFrontface) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectStateFlagsNV operator|( IndirectStateFlagBitsNV bit0, IndirectStateFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectStateFlagsNV( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectStateFlagsNV operator&( IndirectStateFlagBitsNV bit0, IndirectStateFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectStateFlagsNV( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectStateFlagsNV operator^( IndirectStateFlagBitsNV bit0, IndirectStateFlagBitsNV bit1 ) VULKAN_HPP_NOEXCEPT + { + return IndirectStateFlagsNV( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR IndirectStateFlagsNV operator~( IndirectStateFlagBitsNV bits ) VULKAN_HPP_NOEXCEPT + { + return ~( IndirectStateFlagsNV( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( IndirectStateFlagsNV value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & IndirectStateFlagBitsNV::eFlagFrontface ) result += "FlagFrontface | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using InstanceCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( InstanceCreateFlags ) + { + + return "{}"; + } + +#ifdef VK_USE_PLATFORM_MACOS_MVK + enum class MacOSSurfaceCreateFlagBitsMVK : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( MacOSSurfaceCreateFlagBitsMVK ) + { + return "(void)"; + } + + using MacOSSurfaceCreateFlagsMVK = Flags; + + VULKAN_HPP_INLINE std::string to_string( MacOSSurfaceCreateFlagsMVK ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + + using MemoryAllocateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(MemoryAllocateFlagBits::eDeviceMask) | VkFlags(MemoryAllocateFlagBits::eDeviceAddress) | VkFlags(MemoryAllocateFlagBits::eDeviceAddressCaptureReplay) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryAllocateFlags operator|( MemoryAllocateFlagBits bit0, MemoryAllocateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryAllocateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryAllocateFlags operator&( MemoryAllocateFlagBits bit0, MemoryAllocateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryAllocateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryAllocateFlags operator^( MemoryAllocateFlagBits bit0, MemoryAllocateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryAllocateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryAllocateFlags operator~( MemoryAllocateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( MemoryAllocateFlags( bits ) ); + } + + using MemoryAllocateFlagsKHR = MemoryAllocateFlags; + + VULKAN_HPP_INLINE std::string to_string( MemoryAllocateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & MemoryAllocateFlagBits::eDeviceMask ) result += "DeviceMask | "; + if ( value & MemoryAllocateFlagBits::eDeviceAddress ) result += "DeviceAddress | "; + if ( value & MemoryAllocateFlagBits::eDeviceAddressCaptureReplay ) result += "DeviceAddressCaptureReplay | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using MemoryHeapFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(MemoryHeapFlagBits::eDeviceLocal) | VkFlags(MemoryHeapFlagBits::eMultiInstance) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryHeapFlags operator|( MemoryHeapFlagBits bit0, MemoryHeapFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryHeapFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryHeapFlags operator&( MemoryHeapFlagBits bit0, MemoryHeapFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryHeapFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryHeapFlags operator^( MemoryHeapFlagBits bit0, MemoryHeapFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryHeapFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryHeapFlags operator~( MemoryHeapFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( MemoryHeapFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( MemoryHeapFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & MemoryHeapFlagBits::eDeviceLocal ) result += "DeviceLocal | "; + if ( value & MemoryHeapFlagBits::eMultiInstance ) result += "MultiInstance | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class MemoryMapFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( MemoryMapFlagBits ) + { + return "(void)"; + } + + using MemoryMapFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( MemoryMapFlags ) + { + + return "{}"; + } + + + using MemoryPropertyFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(MemoryPropertyFlagBits::eDeviceLocal) | VkFlags(MemoryPropertyFlagBits::eHostVisible) | VkFlags(MemoryPropertyFlagBits::eHostCoherent) | VkFlags(MemoryPropertyFlagBits::eHostCached) | VkFlags(MemoryPropertyFlagBits::eLazilyAllocated) | VkFlags(MemoryPropertyFlagBits::eProtected) | VkFlags(MemoryPropertyFlagBits::eDeviceCoherentAMD) | VkFlags(MemoryPropertyFlagBits::eDeviceUncachedAMD) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryPropertyFlags operator|( MemoryPropertyFlagBits bit0, MemoryPropertyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryPropertyFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryPropertyFlags operator&( MemoryPropertyFlagBits bit0, MemoryPropertyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryPropertyFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryPropertyFlags operator^( MemoryPropertyFlagBits bit0, MemoryPropertyFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return MemoryPropertyFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR MemoryPropertyFlags operator~( MemoryPropertyFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( MemoryPropertyFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( MemoryPropertyFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & MemoryPropertyFlagBits::eDeviceLocal ) result += "DeviceLocal | "; + if ( value & MemoryPropertyFlagBits::eHostVisible ) result += "HostVisible | "; + if ( value & MemoryPropertyFlagBits::eHostCoherent ) result += "HostCoherent | "; + if ( value & MemoryPropertyFlagBits::eHostCached ) result += "HostCached | "; + if ( value & MemoryPropertyFlagBits::eLazilyAllocated ) result += "LazilyAllocated | "; + if ( value & MemoryPropertyFlagBits::eProtected ) result += "Protected | "; + if ( value & MemoryPropertyFlagBits::eDeviceCoherentAMD ) result += "DeviceCoherentAMD | "; + if ( value & MemoryPropertyFlagBits::eDeviceUncachedAMD ) result += "DeviceUncachedAMD | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + +#ifdef VK_USE_PLATFORM_METAL_EXT + enum class MetalSurfaceCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( MetalSurfaceCreateFlagBitsEXT ) + { + return "(void)"; + } + + using MetalSurfaceCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( MetalSurfaceCreateFlagsEXT ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + + using PeerMemoryFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PeerMemoryFeatureFlagBits::eCopySrc) | VkFlags(PeerMemoryFeatureFlagBits::eCopyDst) | VkFlags(PeerMemoryFeatureFlagBits::eGenericSrc) | VkFlags(PeerMemoryFeatureFlagBits::eGenericDst) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PeerMemoryFeatureFlags operator|( PeerMemoryFeatureFlagBits bit0, PeerMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PeerMemoryFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PeerMemoryFeatureFlags operator&( PeerMemoryFeatureFlagBits bit0, PeerMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PeerMemoryFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PeerMemoryFeatureFlags operator^( PeerMemoryFeatureFlagBits bit0, PeerMemoryFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PeerMemoryFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PeerMemoryFeatureFlags operator~( PeerMemoryFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PeerMemoryFeatureFlags( bits ) ); + } + + using PeerMemoryFeatureFlagsKHR = PeerMemoryFeatureFlags; + + VULKAN_HPP_INLINE std::string to_string( PeerMemoryFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PeerMemoryFeatureFlagBits::eCopySrc ) result += "CopySrc | "; + if ( value & PeerMemoryFeatureFlagBits::eCopyDst ) result += "CopyDst | "; + if ( value & PeerMemoryFeatureFlagBits::eGenericSrc ) result += "GenericSrc | "; + if ( value & PeerMemoryFeatureFlagBits::eGenericDst ) result += "GenericDst | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using PerformanceCounterDescriptionFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PerformanceCounterDescriptionFlagBitsKHR::ePerformanceImpacting) | VkFlags(PerformanceCounterDescriptionFlagBitsKHR::eConcurrentlyImpacted) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PerformanceCounterDescriptionFlagsKHR operator|( PerformanceCounterDescriptionFlagBitsKHR bit0, PerformanceCounterDescriptionFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return PerformanceCounterDescriptionFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PerformanceCounterDescriptionFlagsKHR operator&( PerformanceCounterDescriptionFlagBitsKHR bit0, PerformanceCounterDescriptionFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return PerformanceCounterDescriptionFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PerformanceCounterDescriptionFlagsKHR operator^( PerformanceCounterDescriptionFlagBitsKHR bit0, PerformanceCounterDescriptionFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return PerformanceCounterDescriptionFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PerformanceCounterDescriptionFlagsKHR operator~( PerformanceCounterDescriptionFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PerformanceCounterDescriptionFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PerformanceCounterDescriptionFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PerformanceCounterDescriptionFlagBitsKHR::ePerformanceImpacting ) result += "PerformanceImpacting | "; + if ( value & PerformanceCounterDescriptionFlagBitsKHR::eConcurrentlyImpacted ) result += "ConcurrentlyImpacted | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using PipelineCacheCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PipelineCacheCreateFlagBits::eExternallySynchronizedEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCacheCreateFlags operator|( PipelineCacheCreateFlagBits bit0, PipelineCacheCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCacheCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCacheCreateFlags operator&( PipelineCacheCreateFlagBits bit0, PipelineCacheCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCacheCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCacheCreateFlags operator^( PipelineCacheCreateFlagBits bit0, PipelineCacheCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCacheCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCacheCreateFlags operator~( PipelineCacheCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PipelineCacheCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PipelineCacheCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PipelineCacheCreateFlagBits::eExternallySynchronizedEXT ) result += "ExternallySynchronizedEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class PipelineColorBlendStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineColorBlendStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineColorBlendStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineColorBlendStateCreateFlags ) + { + + return "{}"; + } + + + using PipelineCompilerControlFlagsAMD = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineCompilerControlFlagsAMD ) + { + + return "{}"; + } + + enum class PipelineCoverageModulationStateCreateFlagBitsNV : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageModulationStateCreateFlagBitsNV ) + { + return "(void)"; + } + + using PipelineCoverageModulationStateCreateFlagsNV = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageModulationStateCreateFlagsNV ) + { + + return "{}"; + } + + enum class PipelineCoverageReductionStateCreateFlagBitsNV : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageReductionStateCreateFlagBitsNV ) + { + return "(void)"; + } + + using PipelineCoverageReductionStateCreateFlagsNV = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageReductionStateCreateFlagsNV ) + { + + return "{}"; + } + + enum class PipelineCoverageToColorStateCreateFlagBitsNV : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageToColorStateCreateFlagBitsNV ) + { + return "(void)"; + } + + using PipelineCoverageToColorStateCreateFlagsNV = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineCoverageToColorStateCreateFlagsNV ) + { + + return "{}"; + } + + + using PipelineCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PipelineCreateFlagBits::eDisableOptimization) | VkFlags(PipelineCreateFlagBits::eAllowDerivatives) | VkFlags(PipelineCreateFlagBits::eDerivative) | VkFlags(PipelineCreateFlagBits::eViewIndexFromDeviceIndex) | VkFlags(PipelineCreateFlagBits::eDispatchBase) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingSkipAabbsKHR) | VkFlags(PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR) | VkFlags(PipelineCreateFlagBits::eDeferCompileNV) | VkFlags(PipelineCreateFlagBits::eCaptureStatisticsKHR) | VkFlags(PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR) | VkFlags(PipelineCreateFlagBits::eIndirectBindableNV) | VkFlags(PipelineCreateFlagBits::eLibraryKHR) | VkFlags(PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT) | VkFlags(PipelineCreateFlagBits::eEarlyReturnOnFailureEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreateFlags operator|( PipelineCreateFlagBits bit0, PipelineCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreateFlags operator&( PipelineCreateFlagBits bit0, PipelineCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreateFlags operator^( PipelineCreateFlagBits bit0, PipelineCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreateFlags operator~( PipelineCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PipelineCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PipelineCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PipelineCreateFlagBits::eDisableOptimization ) result += "DisableOptimization | "; + if ( value & PipelineCreateFlagBits::eAllowDerivatives ) result += "AllowDerivatives | "; + if ( value & PipelineCreateFlagBits::eDerivative ) result += "Derivative | "; + if ( value & PipelineCreateFlagBits::eViewIndexFromDeviceIndex ) result += "ViewIndexFromDeviceIndex | "; + if ( value & PipelineCreateFlagBits::eDispatchBase ) result += "DispatchBase | "; + if ( value & PipelineCreateFlagBits::eRayTracingNoNullAnyHitShadersKHR ) result += "RayTracingNoNullAnyHitShadersKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingNoNullClosestHitShadersKHR ) result += "RayTracingNoNullClosestHitShadersKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingNoNullMissShadersKHR ) result += "RayTracingNoNullMissShadersKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingNoNullIntersectionShadersKHR ) result += "RayTracingNoNullIntersectionShadersKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingSkipTrianglesKHR ) result += "RayTracingSkipTrianglesKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingSkipAabbsKHR ) result += "RayTracingSkipAabbsKHR | "; + if ( value & PipelineCreateFlagBits::eRayTracingShaderGroupHandleCaptureReplayKHR ) result += "RayTracingShaderGroupHandleCaptureReplayKHR | "; + if ( value & PipelineCreateFlagBits::eDeferCompileNV ) result += "DeferCompileNV | "; + if ( value & PipelineCreateFlagBits::eCaptureStatisticsKHR ) result += "CaptureStatisticsKHR | "; + if ( value & PipelineCreateFlagBits::eCaptureInternalRepresentationsKHR ) result += "CaptureInternalRepresentationsKHR | "; + if ( value & PipelineCreateFlagBits::eIndirectBindableNV ) result += "IndirectBindableNV | "; + if ( value & PipelineCreateFlagBits::eLibraryKHR ) result += "LibraryKHR | "; + if ( value & PipelineCreateFlagBits::eFailOnPipelineCompileRequiredEXT ) result += "FailOnPipelineCompileRequiredEXT | "; + if ( value & PipelineCreateFlagBits::eEarlyReturnOnFailureEXT ) result += "EarlyReturnOnFailureEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using PipelineCreationFeedbackFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PipelineCreationFeedbackFlagBitsEXT::eValid) | VkFlags(PipelineCreationFeedbackFlagBitsEXT::eApplicationPipelineCacheHit) | VkFlags(PipelineCreationFeedbackFlagBitsEXT::eBasePipelineAcceleration) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackFlagsEXT operator|( PipelineCreationFeedbackFlagBitsEXT bit0, PipelineCreationFeedbackFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreationFeedbackFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackFlagsEXT operator&( PipelineCreationFeedbackFlagBitsEXT bit0, PipelineCreationFeedbackFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreationFeedbackFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackFlagsEXT operator^( PipelineCreationFeedbackFlagBitsEXT bit0, PipelineCreationFeedbackFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineCreationFeedbackFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackFlagsEXT operator~( PipelineCreationFeedbackFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PipelineCreationFeedbackFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PipelineCreationFeedbackFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PipelineCreationFeedbackFlagBitsEXT::eValid ) result += "Valid | "; + if ( value & PipelineCreationFeedbackFlagBitsEXT::eApplicationPipelineCacheHit ) result += "ApplicationPipelineCacheHit | "; + if ( value & PipelineCreationFeedbackFlagBitsEXT::eBasePipelineAcceleration ) result += "BasePipelineAcceleration | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class PipelineDepthStencilStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineDepthStencilStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineDepthStencilStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineDepthStencilStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineDiscardRectangleStateCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineDiscardRectangleStateCreateFlagBitsEXT ) + { + return "(void)"; + } + + using PipelineDiscardRectangleStateCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineDiscardRectangleStateCreateFlagsEXT ) + { + + return "{}"; + } + + enum class PipelineDynamicStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineDynamicStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineDynamicStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineDynamicStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineInputAssemblyStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineInputAssemblyStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineInputAssemblyStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineInputAssemblyStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineLayoutCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineLayoutCreateFlagBits ) + { + return "(void)"; + } + + using PipelineLayoutCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineLayoutCreateFlags ) + { + + return "{}"; + } + + enum class PipelineMultisampleStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineMultisampleStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineMultisampleStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineMultisampleStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineRasterizationConservativeStateCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationConservativeStateCreateFlagBitsEXT ) + { + return "(void)"; + } + + using PipelineRasterizationConservativeStateCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationConservativeStateCreateFlagsEXT ) + { + + return "{}"; + } + + enum class PipelineRasterizationDepthClipStateCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationDepthClipStateCreateFlagBitsEXT ) + { + return "(void)"; + } + + using PipelineRasterizationDepthClipStateCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationDepthClipStateCreateFlagsEXT ) + { + + return "{}"; + } + + enum class PipelineRasterizationStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineRasterizationStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineRasterizationStateStreamCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationStateStreamCreateFlagBitsEXT ) + { + return "(void)"; + } + + using PipelineRasterizationStateStreamCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineRasterizationStateStreamCreateFlagsEXT ) + { + + return "{}"; + } + + + using PipelineShaderStageCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PipelineShaderStageCreateFlagBits::eAllowVaryingSubgroupSizeEXT) | VkFlags(PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateFlags operator|( PipelineShaderStageCreateFlagBits bit0, PipelineShaderStageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineShaderStageCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateFlags operator&( PipelineShaderStageCreateFlagBits bit0, PipelineShaderStageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineShaderStageCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateFlags operator^( PipelineShaderStageCreateFlagBits bit0, PipelineShaderStageCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineShaderStageCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateFlags operator~( PipelineShaderStageCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PipelineShaderStageCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PipelineShaderStageCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PipelineShaderStageCreateFlagBits::eAllowVaryingSubgroupSizeEXT ) result += "AllowVaryingSubgroupSizeEXT | "; + if ( value & PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT ) result += "RequireFullSubgroupsEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using PipelineStageFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(PipelineStageFlagBits::eTopOfPipe) | VkFlags(PipelineStageFlagBits::eDrawIndirect) | VkFlags(PipelineStageFlagBits::eVertexInput) | VkFlags(PipelineStageFlagBits::eVertexShader) | VkFlags(PipelineStageFlagBits::eTessellationControlShader) | VkFlags(PipelineStageFlagBits::eTessellationEvaluationShader) | VkFlags(PipelineStageFlagBits::eGeometryShader) | VkFlags(PipelineStageFlagBits::eFragmentShader) | VkFlags(PipelineStageFlagBits::eEarlyFragmentTests) | VkFlags(PipelineStageFlagBits::eLateFragmentTests) | VkFlags(PipelineStageFlagBits::eColorAttachmentOutput) | VkFlags(PipelineStageFlagBits::eComputeShader) | VkFlags(PipelineStageFlagBits::eTransfer) | VkFlags(PipelineStageFlagBits::eBottomOfPipe) | VkFlags(PipelineStageFlagBits::eHost) | VkFlags(PipelineStageFlagBits::eAllGraphics) | VkFlags(PipelineStageFlagBits::eAllCommands) | VkFlags(PipelineStageFlagBits::eTransformFeedbackEXT) | VkFlags(PipelineStageFlagBits::eConditionalRenderingEXT) | VkFlags(PipelineStageFlagBits::eAccelerationStructureBuildKHR) | VkFlags(PipelineStageFlagBits::eRayTracingShaderKHR) | VkFlags(PipelineStageFlagBits::eShadingRateImageNV) | VkFlags(PipelineStageFlagBits::eTaskShaderNV) | VkFlags(PipelineStageFlagBits::eMeshShaderNV) | VkFlags(PipelineStageFlagBits::eFragmentDensityProcessEXT) | VkFlags(PipelineStageFlagBits::eCommandPreprocessNV) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineStageFlags operator|( PipelineStageFlagBits bit0, PipelineStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineStageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineStageFlags operator&( PipelineStageFlagBits bit0, PipelineStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineStageFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineStageFlags operator^( PipelineStageFlagBits bit0, PipelineStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return PipelineStageFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PipelineStageFlags operator~( PipelineStageFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( PipelineStageFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( PipelineStageFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & PipelineStageFlagBits::eTopOfPipe ) result += "TopOfPipe | "; + if ( value & PipelineStageFlagBits::eDrawIndirect ) result += "DrawIndirect | "; + if ( value & PipelineStageFlagBits::eVertexInput ) result += "VertexInput | "; + if ( value & PipelineStageFlagBits::eVertexShader ) result += "VertexShader | "; + if ( value & PipelineStageFlagBits::eTessellationControlShader ) result += "TessellationControlShader | "; + if ( value & PipelineStageFlagBits::eTessellationEvaluationShader ) result += "TessellationEvaluationShader | "; + if ( value & PipelineStageFlagBits::eGeometryShader ) result += "GeometryShader | "; + if ( value & PipelineStageFlagBits::eFragmentShader ) result += "FragmentShader | "; + if ( value & PipelineStageFlagBits::eEarlyFragmentTests ) result += "EarlyFragmentTests | "; + if ( value & PipelineStageFlagBits::eLateFragmentTests ) result += "LateFragmentTests | "; + if ( value & PipelineStageFlagBits::eColorAttachmentOutput ) result += "ColorAttachmentOutput | "; + if ( value & PipelineStageFlagBits::eComputeShader ) result += "ComputeShader | "; + if ( value & PipelineStageFlagBits::eTransfer ) result += "Transfer | "; + if ( value & PipelineStageFlagBits::eBottomOfPipe ) result += "BottomOfPipe | "; + if ( value & PipelineStageFlagBits::eHost ) result += "Host | "; + if ( value & PipelineStageFlagBits::eAllGraphics ) result += "AllGraphics | "; + if ( value & PipelineStageFlagBits::eAllCommands ) result += "AllCommands | "; + if ( value & PipelineStageFlagBits::eTransformFeedbackEXT ) result += "TransformFeedbackEXT | "; + if ( value & PipelineStageFlagBits::eConditionalRenderingEXT ) result += "ConditionalRenderingEXT | "; + if ( value & PipelineStageFlagBits::eAccelerationStructureBuildKHR ) result += "AccelerationStructureBuildKHR | "; + if ( value & PipelineStageFlagBits::eRayTracingShaderKHR ) result += "RayTracingShaderKHR | "; + if ( value & PipelineStageFlagBits::eShadingRateImageNV ) result += "ShadingRateImageNV | "; + if ( value & PipelineStageFlagBits::eTaskShaderNV ) result += "TaskShaderNV | "; + if ( value & PipelineStageFlagBits::eMeshShaderNV ) result += "MeshShaderNV | "; + if ( value & PipelineStageFlagBits::eFragmentDensityProcessEXT ) result += "FragmentDensityProcessEXT | "; + if ( value & PipelineStageFlagBits::eCommandPreprocessNV ) result += "CommandPreprocessNV | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class PipelineTessellationStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineTessellationStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineTessellationStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineTessellationStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineVertexInputStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineVertexInputStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineVertexInputStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineVertexInputStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineViewportStateCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineViewportStateCreateFlagBits ) + { + return "(void)"; + } + + using PipelineViewportStateCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineViewportStateCreateFlags ) + { + + return "{}"; + } + + enum class PipelineViewportSwizzleStateCreateFlagBitsNV : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( PipelineViewportSwizzleStateCreateFlagBitsNV ) + { + return "(void)"; + } + + using PipelineViewportSwizzleStateCreateFlagsNV = Flags; + + VULKAN_HPP_INLINE std::string to_string( PipelineViewportSwizzleStateCreateFlagsNV ) + { + + return "{}"; + } + + + using PrivateDataSlotCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( PrivateDataSlotCreateFlagsEXT ) + { + + return "{}"; + } + + + using QueryControlFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(QueryControlFlagBits::ePrecise) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryControlFlags operator|( QueryControlFlagBits bit0, QueryControlFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryControlFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryControlFlags operator&( QueryControlFlagBits bit0, QueryControlFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryControlFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryControlFlags operator^( QueryControlFlagBits bit0, QueryControlFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryControlFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryControlFlags operator~( QueryControlFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( QueryControlFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( QueryControlFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & QueryControlFlagBits::ePrecise ) result += "Precise | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using QueryPipelineStatisticFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyVertices) | VkFlags(QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eVertexShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eClippingInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eClippingPrimitives) | VkFlags(QueryPipelineStatisticFlagBits::eFragmentShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches) | VkFlags(QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations) | VkFlags(QueryPipelineStatisticFlagBits::eComputeShaderInvocations) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryPipelineStatisticFlags operator|( QueryPipelineStatisticFlagBits bit0, QueryPipelineStatisticFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryPipelineStatisticFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryPipelineStatisticFlags operator&( QueryPipelineStatisticFlagBits bit0, QueryPipelineStatisticFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryPipelineStatisticFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryPipelineStatisticFlags operator^( QueryPipelineStatisticFlagBits bit0, QueryPipelineStatisticFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryPipelineStatisticFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryPipelineStatisticFlags operator~( QueryPipelineStatisticFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( QueryPipelineStatisticFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( QueryPipelineStatisticFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & QueryPipelineStatisticFlagBits::eInputAssemblyVertices ) result += "InputAssemblyVertices | "; + if ( value & QueryPipelineStatisticFlagBits::eInputAssemblyPrimitives ) result += "InputAssemblyPrimitives | "; + if ( value & QueryPipelineStatisticFlagBits::eVertexShaderInvocations ) result += "VertexShaderInvocations | "; + if ( value & QueryPipelineStatisticFlagBits::eGeometryShaderInvocations ) result += "GeometryShaderInvocations | "; + if ( value & QueryPipelineStatisticFlagBits::eGeometryShaderPrimitives ) result += "GeometryShaderPrimitives | "; + if ( value & QueryPipelineStatisticFlagBits::eClippingInvocations ) result += "ClippingInvocations | "; + if ( value & QueryPipelineStatisticFlagBits::eClippingPrimitives ) result += "ClippingPrimitives | "; + if ( value & QueryPipelineStatisticFlagBits::eFragmentShaderInvocations ) result += "FragmentShaderInvocations | "; + if ( value & QueryPipelineStatisticFlagBits::eTessellationControlShaderPatches ) result += "TessellationControlShaderPatches | "; + if ( value & QueryPipelineStatisticFlagBits::eTessellationEvaluationShaderInvocations ) result += "TessellationEvaluationShaderInvocations | "; + if ( value & QueryPipelineStatisticFlagBits::eComputeShaderInvocations ) result += "ComputeShaderInvocations | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using QueryPoolCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( QueryPoolCreateFlags ) + { + + return "{}"; + } + + + using QueryResultFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(QueryResultFlagBits::e64) | VkFlags(QueryResultFlagBits::eWait) | VkFlags(QueryResultFlagBits::eWithAvailability) | VkFlags(QueryResultFlagBits::ePartial) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryResultFlags operator|( QueryResultFlagBits bit0, QueryResultFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryResultFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryResultFlags operator&( QueryResultFlagBits bit0, QueryResultFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryResultFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryResultFlags operator^( QueryResultFlagBits bit0, QueryResultFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueryResultFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueryResultFlags operator~( QueryResultFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( QueryResultFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( QueryResultFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & QueryResultFlagBits::e64 ) result += "64 | "; + if ( value & QueryResultFlagBits::eWait ) result += "Wait | "; + if ( value & QueryResultFlagBits::eWithAvailability ) result += "WithAvailability | "; + if ( value & QueryResultFlagBits::ePartial ) result += "Partial | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using QueueFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(QueueFlagBits::eGraphics) | VkFlags(QueueFlagBits::eCompute) | VkFlags(QueueFlagBits::eTransfer) | VkFlags(QueueFlagBits::eSparseBinding) | VkFlags(QueueFlagBits::eProtected) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueueFlags operator|( QueueFlagBits bit0, QueueFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueueFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueueFlags operator&( QueueFlagBits bit0, QueueFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueueFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueueFlags operator^( QueueFlagBits bit0, QueueFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return QueueFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR QueueFlags operator~( QueueFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( QueueFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( QueueFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & QueueFlagBits::eGraphics ) result += "Graphics | "; + if ( value & QueueFlagBits::eCompute ) result += "Compute | "; + if ( value & QueueFlagBits::eTransfer ) result += "Transfer | "; + if ( value & QueueFlagBits::eSparseBinding ) result += "SparseBinding | "; + if ( value & QueueFlagBits::eProtected ) result += "Protected | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using RenderPassCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(RenderPassCreateFlagBits::eTransformQCOM) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR RenderPassCreateFlags operator|( RenderPassCreateFlagBits bit0, RenderPassCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return RenderPassCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR RenderPassCreateFlags operator&( RenderPassCreateFlagBits bit0, RenderPassCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return RenderPassCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR RenderPassCreateFlags operator^( RenderPassCreateFlagBits bit0, RenderPassCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return RenderPassCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR RenderPassCreateFlags operator~( RenderPassCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( RenderPassCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( RenderPassCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & RenderPassCreateFlagBits::eTransformQCOM ) result += "TransformQCOM | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ResolveModeFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ResolveModeFlagBits::eNone) | VkFlags(ResolveModeFlagBits::eSampleZero) | VkFlags(ResolveModeFlagBits::eAverage) | VkFlags(ResolveModeFlagBits::eMin) | VkFlags(ResolveModeFlagBits::eMax) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ResolveModeFlags operator|( ResolveModeFlagBits bit0, ResolveModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ResolveModeFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ResolveModeFlags operator&( ResolveModeFlagBits bit0, ResolveModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ResolveModeFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ResolveModeFlags operator^( ResolveModeFlagBits bit0, ResolveModeFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ResolveModeFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ResolveModeFlags operator~( ResolveModeFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ResolveModeFlags( bits ) ); + } + + using ResolveModeFlagsKHR = ResolveModeFlags; + + VULKAN_HPP_INLINE std::string to_string( ResolveModeFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ResolveModeFlagBits::eSampleZero ) result += "SampleZero | "; + if ( value & ResolveModeFlagBits::eAverage ) result += "Average | "; + if ( value & ResolveModeFlagBits::eMin ) result += "Min | "; + if ( value & ResolveModeFlagBits::eMax ) result += "Max | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SampleCountFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SampleCountFlagBits::e1) | VkFlags(SampleCountFlagBits::e2) | VkFlags(SampleCountFlagBits::e4) | VkFlags(SampleCountFlagBits::e8) | VkFlags(SampleCountFlagBits::e16) | VkFlags(SampleCountFlagBits::e32) | VkFlags(SampleCountFlagBits::e64) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SampleCountFlags operator|( SampleCountFlagBits bit0, SampleCountFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SampleCountFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SampleCountFlags operator&( SampleCountFlagBits bit0, SampleCountFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SampleCountFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SampleCountFlags operator^( SampleCountFlagBits bit0, SampleCountFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SampleCountFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SampleCountFlags operator~( SampleCountFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SampleCountFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SampleCountFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SampleCountFlagBits::e1 ) result += "1 | "; + if ( value & SampleCountFlagBits::e2 ) result += "2 | "; + if ( value & SampleCountFlagBits::e4 ) result += "4 | "; + if ( value & SampleCountFlagBits::e8 ) result += "8 | "; + if ( value & SampleCountFlagBits::e16 ) result += "16 | "; + if ( value & SampleCountFlagBits::e32 ) result += "32 | "; + if ( value & SampleCountFlagBits::e64 ) result += "64 | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SamplerCreateFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SamplerCreateFlagBits::eSubsampledEXT) | VkFlags(SamplerCreateFlagBits::eSubsampledCoarseReconstructionEXT) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SamplerCreateFlags operator|( SamplerCreateFlagBits bit0, SamplerCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SamplerCreateFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SamplerCreateFlags operator&( SamplerCreateFlagBits bit0, SamplerCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SamplerCreateFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SamplerCreateFlags operator^( SamplerCreateFlagBits bit0, SamplerCreateFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SamplerCreateFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SamplerCreateFlags operator~( SamplerCreateFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SamplerCreateFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SamplerCreateFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SamplerCreateFlagBits::eSubsampledEXT ) result += "SubsampledEXT | "; + if ( value & SamplerCreateFlagBits::eSubsampledCoarseReconstructionEXT ) result += "SubsampledCoarseReconstructionEXT | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class SemaphoreCreateFlagBits : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreCreateFlagBits ) + { + return "(void)"; + } + + using SemaphoreCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreCreateFlags ) + { + + return "{}"; + } + + + using SemaphoreImportFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SemaphoreImportFlagBits::eTemporary) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreImportFlags operator|( SemaphoreImportFlagBits bit0, SemaphoreImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreImportFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreImportFlags operator&( SemaphoreImportFlagBits bit0, SemaphoreImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreImportFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreImportFlags operator^( SemaphoreImportFlagBits bit0, SemaphoreImportFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreImportFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreImportFlags operator~( SemaphoreImportFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SemaphoreImportFlags( bits ) ); + } + + using SemaphoreImportFlagsKHR = SemaphoreImportFlags; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreImportFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SemaphoreImportFlagBits::eTemporary ) result += "Temporary | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SemaphoreWaitFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SemaphoreWaitFlagBits::eAny) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreWaitFlags operator|( SemaphoreWaitFlagBits bit0, SemaphoreWaitFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreWaitFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreWaitFlags operator&( SemaphoreWaitFlagBits bit0, SemaphoreWaitFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreWaitFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreWaitFlags operator^( SemaphoreWaitFlagBits bit0, SemaphoreWaitFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SemaphoreWaitFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SemaphoreWaitFlags operator~( SemaphoreWaitFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SemaphoreWaitFlags( bits ) ); + } + + using SemaphoreWaitFlagsKHR = SemaphoreWaitFlags; + + VULKAN_HPP_INLINE std::string to_string( SemaphoreWaitFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SemaphoreWaitFlagBits::eAny ) result += "Any | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ShaderCorePropertiesFlagsAMD = Flags; + + VULKAN_HPP_INLINE std::string to_string( ShaderCorePropertiesFlagsAMD ) + { + + return "{}"; + } + + + using ShaderModuleCreateFlags = Flags; + + VULKAN_HPP_INLINE std::string to_string( ShaderModuleCreateFlags ) + { + + return "{}"; + } + + + using ShaderStageFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ShaderStageFlagBits::eVertex) | VkFlags(ShaderStageFlagBits::eTessellationControl) | VkFlags(ShaderStageFlagBits::eTessellationEvaluation) | VkFlags(ShaderStageFlagBits::eGeometry) | VkFlags(ShaderStageFlagBits::eFragment) | VkFlags(ShaderStageFlagBits::eCompute) | VkFlags(ShaderStageFlagBits::eAllGraphics) | VkFlags(ShaderStageFlagBits::eAll) | VkFlags(ShaderStageFlagBits::eRaygenKHR) | VkFlags(ShaderStageFlagBits::eAnyHitKHR) | VkFlags(ShaderStageFlagBits::eClosestHitKHR) | VkFlags(ShaderStageFlagBits::eMissKHR) | VkFlags(ShaderStageFlagBits::eIntersectionKHR) | VkFlags(ShaderStageFlagBits::eCallableKHR) | VkFlags(ShaderStageFlagBits::eTaskNV) | VkFlags(ShaderStageFlagBits::eMeshNV) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ShaderStageFlags operator|( ShaderStageFlagBits bit0, ShaderStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ShaderStageFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ShaderStageFlags operator&( ShaderStageFlagBits bit0, ShaderStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ShaderStageFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ShaderStageFlags operator^( ShaderStageFlagBits bit0, ShaderStageFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return ShaderStageFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ShaderStageFlags operator~( ShaderStageFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ShaderStageFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ShaderStageFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ShaderStageFlagBits::eVertex ) result += "Vertex | "; + if ( value & ShaderStageFlagBits::eTessellationControl ) result += "TessellationControl | "; + if ( value & ShaderStageFlagBits::eTessellationEvaluation ) result += "TessellationEvaluation | "; + if ( value & ShaderStageFlagBits::eGeometry ) result += "Geometry | "; + if ( value & ShaderStageFlagBits::eFragment ) result += "Fragment | "; + if ( value & ShaderStageFlagBits::eCompute ) result += "Compute | "; + if ( value & ShaderStageFlagBits::eRaygenKHR ) result += "RaygenKHR | "; + if ( value & ShaderStageFlagBits::eAnyHitKHR ) result += "AnyHitKHR | "; + if ( value & ShaderStageFlagBits::eClosestHitKHR ) result += "ClosestHitKHR | "; + if ( value & ShaderStageFlagBits::eMissKHR ) result += "MissKHR | "; + if ( value & ShaderStageFlagBits::eIntersectionKHR ) result += "IntersectionKHR | "; + if ( value & ShaderStageFlagBits::eCallableKHR ) result += "CallableKHR | "; + if ( value & ShaderStageFlagBits::eTaskNV ) result += "TaskNV | "; + if ( value & ShaderStageFlagBits::eMeshNV ) result += "MeshNV | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SparseImageFormatFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SparseImageFormatFlagBits::eSingleMiptail) | VkFlags(SparseImageFormatFlagBits::eAlignedMipSize) | VkFlags(SparseImageFormatFlagBits::eNonstandardBlockSize) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseImageFormatFlags operator|( SparseImageFormatFlagBits bit0, SparseImageFormatFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseImageFormatFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseImageFormatFlags operator&( SparseImageFormatFlagBits bit0, SparseImageFormatFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseImageFormatFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseImageFormatFlags operator^( SparseImageFormatFlagBits bit0, SparseImageFormatFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseImageFormatFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseImageFormatFlags operator~( SparseImageFormatFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SparseImageFormatFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SparseImageFormatFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SparseImageFormatFlagBits::eSingleMiptail ) result += "SingleMiptail | "; + if ( value & SparseImageFormatFlagBits::eAlignedMipSize ) result += "AlignedMipSize | "; + if ( value & SparseImageFormatFlagBits::eNonstandardBlockSize ) result += "NonstandardBlockSize | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SparseMemoryBindFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SparseMemoryBindFlagBits::eMetadata) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseMemoryBindFlags operator|( SparseMemoryBindFlagBits bit0, SparseMemoryBindFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseMemoryBindFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseMemoryBindFlags operator&( SparseMemoryBindFlagBits bit0, SparseMemoryBindFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseMemoryBindFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseMemoryBindFlags operator^( SparseMemoryBindFlagBits bit0, SparseMemoryBindFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SparseMemoryBindFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SparseMemoryBindFlags operator~( SparseMemoryBindFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SparseMemoryBindFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SparseMemoryBindFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SparseMemoryBindFlagBits::eMetadata ) result += "Metadata | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using StencilFaceFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(StencilFaceFlagBits::eFront) | VkFlags(StencilFaceFlagBits::eBack) | VkFlags(StencilFaceFlagBits::eFrontAndBack) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR StencilFaceFlags operator|( StencilFaceFlagBits bit0, StencilFaceFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return StencilFaceFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR StencilFaceFlags operator&( StencilFaceFlagBits bit0, StencilFaceFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return StencilFaceFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR StencilFaceFlags operator^( StencilFaceFlagBits bit0, StencilFaceFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return StencilFaceFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR StencilFaceFlags operator~( StencilFaceFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( StencilFaceFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( StencilFaceFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & StencilFaceFlagBits::eFront ) result += "Front | "; + if ( value & StencilFaceFlagBits::eBack ) result += "Back | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + +#ifdef VK_USE_PLATFORM_GGP + enum class StreamDescriptorSurfaceCreateFlagBitsGGP : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( StreamDescriptorSurfaceCreateFlagBitsGGP ) + { + return "(void)"; + } + + using StreamDescriptorSurfaceCreateFlagsGGP = Flags; + + VULKAN_HPP_INLINE std::string to_string( StreamDescriptorSurfaceCreateFlagsGGP ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_GGP*/ + + + using SubgroupFeatureFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SubgroupFeatureFlagBits::eBasic) | VkFlags(SubgroupFeatureFlagBits::eVote) | VkFlags(SubgroupFeatureFlagBits::eArithmetic) | VkFlags(SubgroupFeatureFlagBits::eBallot) | VkFlags(SubgroupFeatureFlagBits::eShuffle) | VkFlags(SubgroupFeatureFlagBits::eShuffleRelative) | VkFlags(SubgroupFeatureFlagBits::eClustered) | VkFlags(SubgroupFeatureFlagBits::eQuad) | VkFlags(SubgroupFeatureFlagBits::ePartitionedNV) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubgroupFeatureFlags operator|( SubgroupFeatureFlagBits bit0, SubgroupFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubgroupFeatureFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubgroupFeatureFlags operator&( SubgroupFeatureFlagBits bit0, SubgroupFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubgroupFeatureFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubgroupFeatureFlags operator^( SubgroupFeatureFlagBits bit0, SubgroupFeatureFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubgroupFeatureFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubgroupFeatureFlags operator~( SubgroupFeatureFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SubgroupFeatureFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SubgroupFeatureFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SubgroupFeatureFlagBits::eBasic ) result += "Basic | "; + if ( value & SubgroupFeatureFlagBits::eVote ) result += "Vote | "; + if ( value & SubgroupFeatureFlagBits::eArithmetic ) result += "Arithmetic | "; + if ( value & SubgroupFeatureFlagBits::eBallot ) result += "Ballot | "; + if ( value & SubgroupFeatureFlagBits::eShuffle ) result += "Shuffle | "; + if ( value & SubgroupFeatureFlagBits::eShuffleRelative ) result += "ShuffleRelative | "; + if ( value & SubgroupFeatureFlagBits::eClustered ) result += "Clustered | "; + if ( value & SubgroupFeatureFlagBits::eQuad ) result += "Quad | "; + if ( value & SubgroupFeatureFlagBits::ePartitionedNV ) result += "PartitionedNV | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SubpassDescriptionFlags = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SubpassDescriptionFlagBits::ePerViewAttributesNVX) | VkFlags(SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX) | VkFlags(SubpassDescriptionFlagBits::eFragmentRegionQCOM) | VkFlags(SubpassDescriptionFlagBits::eShaderResolveQCOM) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubpassDescriptionFlags operator|( SubpassDescriptionFlagBits bit0, SubpassDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubpassDescriptionFlags( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubpassDescriptionFlags operator&( SubpassDescriptionFlagBits bit0, SubpassDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubpassDescriptionFlags( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubpassDescriptionFlags operator^( SubpassDescriptionFlagBits bit0, SubpassDescriptionFlagBits bit1 ) VULKAN_HPP_NOEXCEPT + { + return SubpassDescriptionFlags( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SubpassDescriptionFlags operator~( SubpassDescriptionFlagBits bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SubpassDescriptionFlags( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SubpassDescriptionFlags value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SubpassDescriptionFlagBits::ePerViewAttributesNVX ) result += "PerViewAttributesNVX | "; + if ( value & SubpassDescriptionFlagBits::ePerViewPositionXOnlyNVX ) result += "PerViewPositionXOnlyNVX | "; + if ( value & SubpassDescriptionFlagBits::eFragmentRegionQCOM ) result += "FragmentRegionQCOM | "; + if ( value & SubpassDescriptionFlagBits::eShaderResolveQCOM ) result += "ShaderResolveQCOM | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SurfaceCounterFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SurfaceCounterFlagBitsEXT::eVblank) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceCounterFlagsEXT operator|( SurfaceCounterFlagBitsEXT bit0, SurfaceCounterFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceCounterFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceCounterFlagsEXT operator&( SurfaceCounterFlagBitsEXT bit0, SurfaceCounterFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceCounterFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceCounterFlagsEXT operator^( SurfaceCounterFlagBitsEXT bit0, SurfaceCounterFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceCounterFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceCounterFlagsEXT operator~( SurfaceCounterFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SurfaceCounterFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SurfaceCounterFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SurfaceCounterFlagBitsEXT::eVblank ) result += "Vblank | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SurfaceTransformFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SurfaceTransformFlagBitsKHR::eIdentity) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirror) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180) | VkFlags(SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270) | VkFlags(SurfaceTransformFlagBitsKHR::eInherit) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceTransformFlagsKHR operator|( SurfaceTransformFlagBitsKHR bit0, SurfaceTransformFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceTransformFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceTransformFlagsKHR operator&( SurfaceTransformFlagBitsKHR bit0, SurfaceTransformFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceTransformFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceTransformFlagsKHR operator^( SurfaceTransformFlagBitsKHR bit0, SurfaceTransformFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SurfaceTransformFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SurfaceTransformFlagsKHR operator~( SurfaceTransformFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SurfaceTransformFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SurfaceTransformFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SurfaceTransformFlagBitsKHR::eIdentity ) result += "Identity | "; + if ( value & SurfaceTransformFlagBitsKHR::eRotate90 ) result += "Rotate90 | "; + if ( value & SurfaceTransformFlagBitsKHR::eRotate180 ) result += "Rotate180 | "; + if ( value & SurfaceTransformFlagBitsKHR::eRotate270 ) result += "Rotate270 | "; + if ( value & SurfaceTransformFlagBitsKHR::eHorizontalMirror ) result += "HorizontalMirror | "; + if ( value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate90 ) result += "HorizontalMirrorRotate90 | "; + if ( value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate180 ) result += "HorizontalMirrorRotate180 | "; + if ( value & SurfaceTransformFlagBitsKHR::eHorizontalMirrorRotate270 ) result += "HorizontalMirrorRotate270 | "; + if ( value & SurfaceTransformFlagBitsKHR::eInherit ) result += "Inherit | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using SwapchainCreateFlagsKHR = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions) | VkFlags(SwapchainCreateFlagBitsKHR::eProtected) | VkFlags(SwapchainCreateFlagBitsKHR::eMutableFormat) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SwapchainCreateFlagsKHR operator|( SwapchainCreateFlagBitsKHR bit0, SwapchainCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SwapchainCreateFlagsKHR( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SwapchainCreateFlagsKHR operator&( SwapchainCreateFlagBitsKHR bit0, SwapchainCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SwapchainCreateFlagsKHR( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SwapchainCreateFlagsKHR operator^( SwapchainCreateFlagBitsKHR bit0, SwapchainCreateFlagBitsKHR bit1 ) VULKAN_HPP_NOEXCEPT + { + return SwapchainCreateFlagsKHR( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR SwapchainCreateFlagsKHR operator~( SwapchainCreateFlagBitsKHR bits ) VULKAN_HPP_NOEXCEPT + { + return ~( SwapchainCreateFlagsKHR( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( SwapchainCreateFlagsKHR value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & SwapchainCreateFlagBitsKHR::eSplitInstanceBindRegions ) result += "SplitInstanceBindRegions | "; + if ( value & SwapchainCreateFlagBitsKHR::eProtected ) result += "Protected | "; + if ( value & SwapchainCreateFlagBitsKHR::eMutableFormat ) result += "MutableFormat | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + + using ToolPurposeFlagsEXT = Flags; + + template <> struct FlagTraits + { + enum : VkFlags + { + allFlags = VkFlags(ToolPurposeFlagBitsEXT::eValidation) | VkFlags(ToolPurposeFlagBitsEXT::eProfiling) | VkFlags(ToolPurposeFlagBitsEXT::eTracing) | VkFlags(ToolPurposeFlagBitsEXT::eAdditionalFeatures) | VkFlags(ToolPurposeFlagBitsEXT::eModifyingFeatures) | VkFlags(ToolPurposeFlagBitsEXT::eDebugReporting) | VkFlags(ToolPurposeFlagBitsEXT::eDebugMarkers) + }; + }; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ToolPurposeFlagsEXT operator|( ToolPurposeFlagBitsEXT bit0, ToolPurposeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ToolPurposeFlagsEXT( bit0 ) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ToolPurposeFlagsEXT operator&( ToolPurposeFlagBitsEXT bit0, ToolPurposeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ToolPurposeFlagsEXT( bit0 ) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ToolPurposeFlagsEXT operator^( ToolPurposeFlagBitsEXT bit0, ToolPurposeFlagBitsEXT bit1 ) VULKAN_HPP_NOEXCEPT + { + return ToolPurposeFlagsEXT( bit0 ) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR ToolPurposeFlagsEXT operator~( ToolPurposeFlagBitsEXT bits ) VULKAN_HPP_NOEXCEPT + { + return ~( ToolPurposeFlagsEXT( bits ) ); + } + + VULKAN_HPP_INLINE std::string to_string( ToolPurposeFlagsEXT value ) + { + + if ( !value ) return "{}"; + std::string result; + + if ( value & ToolPurposeFlagBitsEXT::eValidation ) result += "Validation | "; + if ( value & ToolPurposeFlagBitsEXT::eProfiling ) result += "Profiling | "; + if ( value & ToolPurposeFlagBitsEXT::eTracing ) result += "Tracing | "; + if ( value & ToolPurposeFlagBitsEXT::eAdditionalFeatures ) result += "AdditionalFeatures | "; + if ( value & ToolPurposeFlagBitsEXT::eModifyingFeatures ) result += "ModifyingFeatures | "; + if ( value & ToolPurposeFlagBitsEXT::eDebugReporting ) result += "DebugReporting | "; + if ( value & ToolPurposeFlagBitsEXT::eDebugMarkers ) result += "DebugMarkers | "; + return "{ " + result.substr(0, result.size() - 3) + " }"; + } + + enum class ValidationCacheCreateFlagBitsEXT : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( ValidationCacheCreateFlagBitsEXT ) + { + return "(void)"; + } + + using ValidationCacheCreateFlagsEXT = Flags; + + VULKAN_HPP_INLINE std::string to_string( ValidationCacheCreateFlagsEXT ) + { + + return "{}"; + } + +#ifdef VK_USE_PLATFORM_VI_NN + enum class ViSurfaceCreateFlagBitsNN : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( ViSurfaceCreateFlagBitsNN ) + { + return "(void)"; + } + + using ViSurfaceCreateFlagsNN = Flags; + + VULKAN_HPP_INLINE std::string to_string( ViSurfaceCreateFlagsNN ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + enum class WaylandSurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( WaylandSurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using WaylandSurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( WaylandSurfaceCreateFlagsKHR ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + enum class Win32SurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( Win32SurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using Win32SurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( Win32SurfaceCreateFlagsKHR ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_XCB_KHR + enum class XcbSurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( XcbSurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using XcbSurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( XcbSurfaceCreateFlagsKHR ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + enum class XlibSurfaceCreateFlagBitsKHR : VkFlags + {}; + + VULKAN_HPP_INLINE std::string to_string( XlibSurfaceCreateFlagBitsKHR ) + { + return "(void)"; + } + + using XlibSurfaceCreateFlagsKHR = Flags; + + VULKAN_HPP_INLINE std::string to_string( XlibSurfaceCreateFlagsKHR ) + { + + return "{}"; + } +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ +} // namespace VULKAN_HPP_NAMESPACE + +#ifndef VULKAN_HPP_NO_EXCEPTIONS +namespace std +{ + template <> + struct is_error_code_enum : public true_type + {}; +} +#endif + +namespace VULKAN_HPP_NAMESPACE +{ +#ifndef VULKAN_HPP_NO_EXCEPTIONS + class ErrorCategoryImpl : public std::error_category + { + public: + virtual const char* name() const VULKAN_HPP_NOEXCEPT override { return VULKAN_HPP_NAMESPACE_STRING"::Result"; } + virtual std::string message(int ev) const override { return to_string(static_cast(ev)); } + }; + + class Error + { + public: + Error() VULKAN_HPP_NOEXCEPT = default; + Error(const Error&) VULKAN_HPP_NOEXCEPT = default; + virtual ~Error() VULKAN_HPP_NOEXCEPT = default; + + virtual const char* what() const VULKAN_HPP_NOEXCEPT = 0; + }; + + class LogicError : public Error, public std::logic_error + { + public: + explicit LogicError( const std::string& what ) + : Error(), std::logic_error(what) {} + explicit LogicError( char const * what ) + : Error(), std::logic_error(what) {} + + virtual const char* what() const VULKAN_HPP_NOEXCEPT { return std::logic_error::what(); } + }; + + class SystemError : public Error, public std::system_error + { + public: + SystemError( std::error_code ec ) + : Error(), std::system_error(ec) {} + SystemError( std::error_code ec, std::string const& what ) + : Error(), std::system_error(ec, what) {} + SystemError( std::error_code ec, char const * what ) + : Error(), std::system_error(ec, what) {} + SystemError( int ev, std::error_category const& ecat ) + : Error(), std::system_error(ev, ecat) {} + SystemError( int ev, std::error_category const& ecat, std::string const& what) + : Error(), std::system_error(ev, ecat, what) {} + SystemError( int ev, std::error_category const& ecat, char const * what) + : Error(), std::system_error(ev, ecat, what) {} + + virtual const char* what() const VULKAN_HPP_NOEXCEPT { return std::system_error::what(); } + }; + + VULKAN_HPP_INLINE const std::error_category& errorCategory() VULKAN_HPP_NOEXCEPT + { + static ErrorCategoryImpl instance; + return instance; + } + + VULKAN_HPP_INLINE std::error_code make_error_code(Result e) VULKAN_HPP_NOEXCEPT + { + return std::error_code(static_cast(e), errorCategory()); + } + + VULKAN_HPP_INLINE std::error_condition make_error_condition(Result e) VULKAN_HPP_NOEXCEPT + { + return std::error_condition(static_cast(e), errorCategory()); + } + + class OutOfHostMemoryError : public SystemError + { + public: + OutOfHostMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfHostMemory ), message ) {} + OutOfHostMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfHostMemory ), message ) {} + }; + + class OutOfDeviceMemoryError : public SystemError + { + public: + OutOfDeviceMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfDeviceMemory ), message ) {} + OutOfDeviceMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfDeviceMemory ), message ) {} + }; + + class InitializationFailedError : public SystemError + { + public: + InitializationFailedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInitializationFailed ), message ) {} + InitializationFailedError( char const * message ) + : SystemError( make_error_code( Result::eErrorInitializationFailed ), message ) {} + }; + + class DeviceLostError : public SystemError + { + public: + DeviceLostError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorDeviceLost ), message ) {} + DeviceLostError( char const * message ) + : SystemError( make_error_code( Result::eErrorDeviceLost ), message ) {} + }; + + class MemoryMapFailedError : public SystemError + { + public: + MemoryMapFailedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorMemoryMapFailed ), message ) {} + MemoryMapFailedError( char const * message ) + : SystemError( make_error_code( Result::eErrorMemoryMapFailed ), message ) {} + }; + + class LayerNotPresentError : public SystemError + { + public: + LayerNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorLayerNotPresent ), message ) {} + LayerNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorLayerNotPresent ), message ) {} + }; + + class ExtensionNotPresentError : public SystemError + { + public: + ExtensionNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorExtensionNotPresent ), message ) {} + ExtensionNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorExtensionNotPresent ), message ) {} + }; + + class FeatureNotPresentError : public SystemError + { + public: + FeatureNotPresentError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFeatureNotPresent ), message ) {} + FeatureNotPresentError( char const * message ) + : SystemError( make_error_code( Result::eErrorFeatureNotPresent ), message ) {} + }; + + class IncompatibleDriverError : public SystemError + { + public: + IncompatibleDriverError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDriver ), message ) {} + IncompatibleDriverError( char const * message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDriver ), message ) {} + }; + + class TooManyObjectsError : public SystemError + { + public: + TooManyObjectsError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorTooManyObjects ), message ) {} + TooManyObjectsError( char const * message ) + : SystemError( make_error_code( Result::eErrorTooManyObjects ), message ) {} + }; + + class FormatNotSupportedError : public SystemError + { + public: + FormatNotSupportedError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFormatNotSupported ), message ) {} + FormatNotSupportedError( char const * message ) + : SystemError( make_error_code( Result::eErrorFormatNotSupported ), message ) {} + }; + + class FragmentedPoolError : public SystemError + { + public: + FragmentedPoolError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFragmentedPool ), message ) {} + FragmentedPoolError( char const * message ) + : SystemError( make_error_code( Result::eErrorFragmentedPool ), message ) {} + }; + + class UnknownError : public SystemError + { + public: + UnknownError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorUnknown ), message ) {} + UnknownError( char const * message ) + : SystemError( make_error_code( Result::eErrorUnknown ), message ) {} + }; + + class OutOfPoolMemoryError : public SystemError + { + public: + OutOfPoolMemoryError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfPoolMemory ), message ) {} + OutOfPoolMemoryError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfPoolMemory ), message ) {} + }; + + class InvalidExternalHandleError : public SystemError + { + public: + InvalidExternalHandleError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidExternalHandle ), message ) {} + InvalidExternalHandleError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidExternalHandle ), message ) {} + }; + + class FragmentationError : public SystemError + { + public: + FragmentationError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFragmentation ), message ) {} + FragmentationError( char const * message ) + : SystemError( make_error_code( Result::eErrorFragmentation ), message ) {} + }; + + class InvalidOpaqueCaptureAddressError : public SystemError + { + public: + InvalidOpaqueCaptureAddressError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidOpaqueCaptureAddress ), message ) {} + InvalidOpaqueCaptureAddressError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidOpaqueCaptureAddress ), message ) {} + }; + + class SurfaceLostKHRError : public SystemError + { + public: + SurfaceLostKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorSurfaceLostKHR ), message ) {} + SurfaceLostKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorSurfaceLostKHR ), message ) {} + }; + + class NativeWindowInUseKHRError : public SystemError + { + public: + NativeWindowInUseKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorNativeWindowInUseKHR ), message ) {} + NativeWindowInUseKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorNativeWindowInUseKHR ), message ) {} + }; + + class OutOfDateKHRError : public SystemError + { + public: + OutOfDateKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorOutOfDateKHR ), message ) {} + OutOfDateKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorOutOfDateKHR ), message ) {} + }; + + class IncompatibleDisplayKHRError : public SystemError + { + public: + IncompatibleDisplayKHRError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDisplayKHR ), message ) {} + IncompatibleDisplayKHRError( char const * message ) + : SystemError( make_error_code( Result::eErrorIncompatibleDisplayKHR ), message ) {} + }; + + class ValidationFailedEXTError : public SystemError + { + public: + ValidationFailedEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorValidationFailedEXT ), message ) {} + ValidationFailedEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorValidationFailedEXT ), message ) {} + }; + + class InvalidShaderNVError : public SystemError + { + public: + InvalidShaderNVError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidShaderNV ), message ) {} + InvalidShaderNVError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidShaderNV ), message ) {} + }; + + class InvalidDrmFormatModifierPlaneLayoutEXTError : public SystemError + { + public: + InvalidDrmFormatModifierPlaneLayoutEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT ), message ) {} + InvalidDrmFormatModifierPlaneLayoutEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT ), message ) {} + }; + + class NotPermittedEXTError : public SystemError + { + public: + NotPermittedEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorNotPermittedEXT ), message ) {} + NotPermittedEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorNotPermittedEXT ), message ) {} + }; + + class FullScreenExclusiveModeLostEXTError : public SystemError + { + public: + FullScreenExclusiveModeLostEXTError( std::string const& message ) + : SystemError( make_error_code( Result::eErrorFullScreenExclusiveModeLostEXT ), message ) {} + FullScreenExclusiveModeLostEXTError( char const * message ) + : SystemError( make_error_code( Result::eErrorFullScreenExclusiveModeLostEXT ), message ) {} + }; + + + [[noreturn]] static void throwResultException( Result result, char const * message ) + { + switch ( result ) + { + case Result::eErrorOutOfHostMemory: throw OutOfHostMemoryError( message ); + case Result::eErrorOutOfDeviceMemory: throw OutOfDeviceMemoryError( message ); + case Result::eErrorInitializationFailed: throw InitializationFailedError( message ); + case Result::eErrorDeviceLost: throw DeviceLostError( message ); + case Result::eErrorMemoryMapFailed: throw MemoryMapFailedError( message ); + case Result::eErrorLayerNotPresent: throw LayerNotPresentError( message ); + case Result::eErrorExtensionNotPresent: throw ExtensionNotPresentError( message ); + case Result::eErrorFeatureNotPresent: throw FeatureNotPresentError( message ); + case Result::eErrorIncompatibleDriver: throw IncompatibleDriverError( message ); + case Result::eErrorTooManyObjects: throw TooManyObjectsError( message ); + case Result::eErrorFormatNotSupported: throw FormatNotSupportedError( message ); + case Result::eErrorFragmentedPool: throw FragmentedPoolError( message ); + case Result::eErrorUnknown: throw UnknownError( message ); + case Result::eErrorOutOfPoolMemory: throw OutOfPoolMemoryError( message ); + case Result::eErrorInvalidExternalHandle: throw InvalidExternalHandleError( message ); + case Result::eErrorFragmentation: throw FragmentationError( message ); + case Result::eErrorInvalidOpaqueCaptureAddress: throw InvalidOpaqueCaptureAddressError( message ); + case Result::eErrorSurfaceLostKHR: throw SurfaceLostKHRError( message ); + case Result::eErrorNativeWindowInUseKHR: throw NativeWindowInUseKHRError( message ); + case Result::eErrorOutOfDateKHR: throw OutOfDateKHRError( message ); + case Result::eErrorIncompatibleDisplayKHR: throw IncompatibleDisplayKHRError( message ); + case Result::eErrorValidationFailedEXT: throw ValidationFailedEXTError( message ); + case Result::eErrorInvalidShaderNV: throw InvalidShaderNVError( message ); + case Result::eErrorInvalidDrmFormatModifierPlaneLayoutEXT: throw InvalidDrmFormatModifierPlaneLayoutEXTError( message ); + case Result::eErrorNotPermittedEXT: throw NotPermittedEXTError( message ); + case Result::eErrorFullScreenExclusiveModeLostEXT: throw FullScreenExclusiveModeLostEXTError( message ); + default: throw SystemError( make_error_code( result ) ); + } + } +#endif + + template void ignore(T const&) VULKAN_HPP_NOEXCEPT {} + + template + struct ResultValue + { +#ifdef VULKAN_HPP_HAS_NOEXCEPT + ResultValue( Result r, T & v ) VULKAN_HPP_NOEXCEPT(VULKAN_HPP_NOEXCEPT(T(v))) +#else + ResultValue( Result r, T & v ) +#endif + : result( r ) + , value( v ) + {} + +#ifdef VULKAN_HPP_HAS_NOEXCEPT + ResultValue( Result r, T && v ) VULKAN_HPP_NOEXCEPT(VULKAN_HPP_NOEXCEPT(T(std::move(v)))) +#else + ResultValue( Result r, T && v ) +#endif + : result( r ) + , value( std::move( v ) ) + {} + + Result result; + T value; + + operator std::tuple() VULKAN_HPP_NOEXCEPT { return std::tuple(result, value); } + +#if !defined(VULKAN_HPP_DISABLE_IMPLICIT_RESULT_VALUE_CAST) + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator T const& () const & VULKAN_HPP_NOEXCEPT + { + return value; + } + + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator T& () & VULKAN_HPP_NOEXCEPT + { + return value; + } + + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator T const&& () const && VULKAN_HPP_NOEXCEPT + { + return std::move( value ); + } + + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator T&& () && VULKAN_HPP_NOEXCEPT + { + return std::move( value ); + } +#endif + }; + +#if !defined(VULKAN_HPP_NO_SMART_HANDLE) + template + struct ResultValue> + { +#ifdef VULKAN_HPP_HAS_NOEXCEPT + ResultValue(Result r, UniqueHandle && v) VULKAN_HPP_NOEXCEPT +#else + ResultValue(Result r, UniqueHandle && v) +#endif + : result(r) + , value(std::move(v)) + {} + + std::tuple> asTuple() + { + return std::make_tuple( result, std::move( value ) ); + } + +# if !defined(VULKAN_HPP_DISABLE_IMPLICIT_RESULT_VALUE_CAST) + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator UniqueHandle& () & VULKAN_HPP_NOEXCEPT + { + return value; + } + + VULKAN_HPP_DEPRECATED("Implicit-cast operators on vk::ResultValue are deprecated. Explicitly access the value as member of ResultValue.") + operator UniqueHandle() VULKAN_HPP_NOEXCEPT + { + return std::move(value); + } +# endif + + Result result; + UniqueHandle value; + }; + + template + struct ResultValue>> + { +# ifdef VULKAN_HPP_HAS_NOEXCEPT + ResultValue( Result r, std::vector> && v ) VULKAN_HPP_NOEXCEPT +# else + ResultValue( Result r, std::vector> && v ) +# endif + : result( r ) + , value( std::move( v ) ) + {} + + Result result; + std::vector> value; + + operator std::tuple> &>() VULKAN_HPP_NOEXCEPT + { + return std::tuple> &>( result, value ); + } + }; +#endif + + template + struct ResultValueType + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + typedef ResultValue type; +#else + typedef T type; +#endif + }; + + template <> + struct ResultValueType + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + typedef Result type; +#else + typedef void type; +#endif + }; + + VULKAN_HPP_INLINE ResultValueType::type createResultValue( Result result, char const * message ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore(message); + VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); + return result; +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } +#endif + } + + template + VULKAN_HPP_INLINE typename ResultValueType::type createResultValue( Result result, T & data, char const * message ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore(message); + VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); + return ResultValue( result, std::move( data ) ); +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } + return std::move( data ); +#endif + } + + VULKAN_HPP_INLINE Result createResultValue( Result result, char const * message, std::initializer_list successCodes ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore(message); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +#else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +#endif + return result; + } + + template + VULKAN_HPP_INLINE ResultValue createResultValue( Result result, T & data, char const * message, std::initializer_list successCodes ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore(message); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +#else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +#endif + return ResultValue( result, data ); + } + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createResultValue( Result result, T & data, char const * message, typename UniqueHandleTraits::deleter const& deleter ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore(message); + VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); + return ResultValue>( result, UniqueHandle(data, deleter) ); +#else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } + return UniqueHandle(data, deleter); +#endif + } + + template + VULKAN_HPP_INLINE ResultValue> + createResultValue( Result result, + T & data, + char const * message, + std::initializer_list successCodes, + typename UniqueHandleTraits::deleter const & deleter ) + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore( message ); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +# else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +# endif + return ResultValue>( result, UniqueHandle( data, deleter ) ); + } + + template + VULKAN_HPP_INLINE typename ResultValueType>>::type + createResultValue( Result result, std::vector> && data, char const * message ) + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore( message ); + VULKAN_HPP_ASSERT_ON_RESULT( result == Result::eSuccess ); + return ResultValue>>( result, std::move( data ) ); +# else + if ( result != Result::eSuccess ) + { + throwResultException( result, message ); + } + return std::move( data ); +# endif + } + + template + VULKAN_HPP_INLINE ResultValue>> + createResultValue( Result result, + std::vector> && data, + char const * message, + std::initializer_list successCodes ) + { +# ifdef VULKAN_HPP_NO_EXCEPTIONS + ignore( message ); + ignore(successCodes); // just in case VULKAN_HPP_ASSERT_ON_RESULT is empty + VULKAN_HPP_ASSERT_ON_RESULT( std::find( successCodes.begin(), successCodes.end(), result ) != successCodes.end() ); +# else + if ( std::find( successCodes.begin(), successCodes.end(), result ) == successCodes.end() ) + { + throwResultException( result, message ); + } +# endif + return ResultValue>>( result, std::move( data ) ); + } +#endif + + struct AabbPositionsKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AabbPositionsKHR(float minX_ = {}, float minY_ = {}, float minZ_ = {}, float maxX_ = {}, float maxY_ = {}, float maxZ_ = {}) VULKAN_HPP_NOEXCEPT + : minX( minX_ ), minY( minY_ ), minZ( minZ_ ), maxX( maxX_ ), maxY( maxY_ ), maxZ( maxZ_ ) + {} + + VULKAN_HPP_CONSTEXPR AabbPositionsKHR( AabbPositionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AabbPositionsKHR( VkAabbPositionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AabbPositionsKHR & operator=( VkAabbPositionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AabbPositionsKHR & operator=( AabbPositionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AabbPositionsKHR ) ); + return *this; + } + + AabbPositionsKHR & setMinX( float minX_ ) VULKAN_HPP_NOEXCEPT + { + minX = minX_; + return *this; + } + + AabbPositionsKHR & setMinY( float minY_ ) VULKAN_HPP_NOEXCEPT + { + minY = minY_; + return *this; + } + + AabbPositionsKHR & setMinZ( float minZ_ ) VULKAN_HPP_NOEXCEPT + { + minZ = minZ_; + return *this; + } + + AabbPositionsKHR & setMaxX( float maxX_ ) VULKAN_HPP_NOEXCEPT + { + maxX = maxX_; + return *this; + } + + AabbPositionsKHR & setMaxY( float maxY_ ) VULKAN_HPP_NOEXCEPT + { + maxY = maxY_; + return *this; + } + + AabbPositionsKHR & setMaxZ( float maxZ_ ) VULKAN_HPP_NOEXCEPT + { + maxZ = maxZ_; + return *this; + } + + + operator VkAabbPositionsKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAabbPositionsKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AabbPositionsKHR const& ) const = default; +#else + bool operator==( AabbPositionsKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( minX == rhs.minX ) + && ( minY == rhs.minY ) + && ( minZ == rhs.minZ ) + && ( maxX == rhs.maxX ) + && ( maxY == rhs.maxY ) + && ( maxZ == rhs.maxZ ); + } + + bool operator!=( AabbPositionsKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float minX = {}; + float minY = {}; + float minZ = {}; + float maxX = {}; + float maxY = {}; + float maxZ = {}; + + }; + static_assert( sizeof( AabbPositionsKHR ) == sizeof( VkAabbPositionsKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using AabbPositionsNV = AabbPositionsKHR; + + class AccelerationStructureKHR + { + public: + using CType = VkAccelerationStructureKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureKHR; + + public: + VULKAN_HPP_CONSTEXPR AccelerationStructureKHR() VULKAN_HPP_NOEXCEPT + : m_accelerationStructureKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT AccelerationStructureKHR( VkAccelerationStructureKHR accelerationStructureKHR ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureKHR( accelerationStructureKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + AccelerationStructureKHR & operator=(VkAccelerationStructureKHR accelerationStructureKHR) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureKHR = accelerationStructureKHR; + return *this; + } +#endif + + AccelerationStructureKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureKHR const& ) const = default; +#else + bool operator==( AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR == rhs.m_accelerationStructureKHR; + } + + bool operator!=(AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR != rhs.m_accelerationStructureKHR; + } + + bool operator<(AccelerationStructureKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR < rhs.m_accelerationStructureKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureKHR() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureKHR == VK_NULL_HANDLE; + } + + private: + VkAccelerationStructureKHR m_accelerationStructureKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR ) == sizeof( VkAccelerationStructureKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::AccelerationStructureKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureKHR; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureKHR; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + union DeviceOrHostAddressConstKHR + { + DeviceOrHostAddressConstKHR( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR ) ); + } + + DeviceOrHostAddressConstKHR( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {} ) + : deviceAddress( deviceAddress_ ) + {} + + DeviceOrHostAddressConstKHR( const void* hostAddress_ ) + : hostAddress( hostAddress_ ) + {} + + DeviceOrHostAddressConstKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + deviceAddress = deviceAddress_; + return *this; + } + + DeviceOrHostAddressConstKHR & setHostAddress( const void* hostAddress_ ) VULKAN_HPP_NOEXCEPT + { + hostAddress = hostAddress_; + return *this; + } + + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR & operator=( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR ) ); + return *this; + } + + operator VkDeviceOrHostAddressConstKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceOrHostAddressConstKHR &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress; + const void* hostAddress; +#else + VkDeviceAddress deviceAddress; + const void* hostAddress; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct AccelerationStructureGeometryTrianglesDataKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureGeometryTrianglesDataKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + AccelerationStructureGeometryTrianglesDataKHR(VULKAN_HPP_NAMESPACE::Format vertexFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ = {}, uint32_t maxVertex_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData_ = {}) VULKAN_HPP_NOEXCEPT + : vertexFormat( vertexFormat_ ), vertexData( vertexData_ ), vertexStride( vertexStride_ ), maxVertex( maxVertex_ ), indexType( indexType_ ), indexData( indexData_ ), transformData( transformData_ ) + {} + + AccelerationStructureGeometryTrianglesDataKHR( AccelerationStructureGeometryTrianglesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureGeometryTrianglesDataKHR( VkAccelerationStructureGeometryTrianglesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureGeometryTrianglesDataKHR & operator=( VkAccelerationStructureGeometryTrianglesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & operator=( AccelerationStructureGeometryTrianglesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureGeometryTrianglesDataKHR ) ); + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setVertexFormat( VULKAN_HPP_NAMESPACE::Format vertexFormat_ ) VULKAN_HPP_NOEXCEPT + { + vertexFormat = vertexFormat_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setVertexData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & vertexData_ ) VULKAN_HPP_NOEXCEPT + { + vertexData = vertexData_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setVertexStride( VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ ) VULKAN_HPP_NOEXCEPT + { + vertexStride = vertexStride_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setMaxVertex( uint32_t maxVertex_ ) VULKAN_HPP_NOEXCEPT + { + maxVertex = maxVertex_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setIndexType( VULKAN_HPP_NAMESPACE::IndexType indexType_ ) VULKAN_HPP_NOEXCEPT + { + indexType = indexType_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setIndexData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & indexData_ ) VULKAN_HPP_NOEXCEPT + { + indexData = indexData_; + return *this; + } + + AccelerationStructureGeometryTrianglesDataKHR & setTransformData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & transformData_ ) VULKAN_HPP_NOEXCEPT + { + transformData = transformData_; + return *this; + } + + + operator VkAccelerationStructureGeometryTrianglesDataKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureGeometryTrianglesDataKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureGeometryTrianglesDataKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format vertexFormat = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR vertexData = {}; + VULKAN_HPP_NAMESPACE::DeviceSize vertexStride = {}; + uint32_t maxVertex = {}; + VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR indexData = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR transformData = {}; + + }; + static_assert( sizeof( AccelerationStructureGeometryTrianglesDataKHR ) == sizeof( VkAccelerationStructureGeometryTrianglesDataKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureGeometryTrianglesDataKHR; + }; + + struct AccelerationStructureGeometryAabbsDataKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureGeometryAabbsDataKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + AccelerationStructureGeometryAabbsDataKHR(VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}) VULKAN_HPP_NOEXCEPT + : data( data_ ), stride( stride_ ) + {} + + AccelerationStructureGeometryAabbsDataKHR( AccelerationStructureGeometryAabbsDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureGeometryAabbsDataKHR( VkAccelerationStructureGeometryAabbsDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureGeometryAabbsDataKHR & operator=( VkAccelerationStructureGeometryAabbsDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureGeometryAabbsDataKHR & operator=( AccelerationStructureGeometryAabbsDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureGeometryAabbsDataKHR ) ); + return *this; + } + + AccelerationStructureGeometryAabbsDataKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureGeometryAabbsDataKHR & setData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & data_ ) VULKAN_HPP_NOEXCEPT + { + data = data_; + return *this; + } + + AccelerationStructureGeometryAabbsDataKHR & setStride( VULKAN_HPP_NAMESPACE::DeviceSize stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + + operator VkAccelerationStructureGeometryAabbsDataKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureGeometryAabbsDataKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureGeometryAabbsDataKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data = {}; + VULKAN_HPP_NAMESPACE::DeviceSize stride = {}; + + }; + static_assert( sizeof( AccelerationStructureGeometryAabbsDataKHR ) == sizeof( VkAccelerationStructureGeometryAabbsDataKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureGeometryAabbsDataKHR; + }; + + struct AccelerationStructureGeometryInstancesDataKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureGeometryInstancesDataKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + AccelerationStructureGeometryInstancesDataKHR(VULKAN_HPP_NAMESPACE::Bool32 arrayOfPointers_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data_ = {}) VULKAN_HPP_NOEXCEPT + : arrayOfPointers( arrayOfPointers_ ), data( data_ ) + {} + + AccelerationStructureGeometryInstancesDataKHR( AccelerationStructureGeometryInstancesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureGeometryInstancesDataKHR( VkAccelerationStructureGeometryInstancesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureGeometryInstancesDataKHR & operator=( VkAccelerationStructureGeometryInstancesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureGeometryInstancesDataKHR & operator=( AccelerationStructureGeometryInstancesDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureGeometryInstancesDataKHR ) ); + return *this; + } + + AccelerationStructureGeometryInstancesDataKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureGeometryInstancesDataKHR & setArrayOfPointers( VULKAN_HPP_NAMESPACE::Bool32 arrayOfPointers_ ) VULKAN_HPP_NOEXCEPT + { + arrayOfPointers = arrayOfPointers_; + return *this; + } + + AccelerationStructureGeometryInstancesDataKHR & setData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & data_ ) VULKAN_HPP_NOEXCEPT + { + data = data_; + return *this; + } + + + operator VkAccelerationStructureGeometryInstancesDataKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureGeometryInstancesDataKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureGeometryInstancesDataKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 arrayOfPointers = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR data = {}; + + }; + static_assert( sizeof( AccelerationStructureGeometryInstancesDataKHR ) == sizeof( VkAccelerationStructureGeometryInstancesDataKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureGeometryInstancesDataKHR; + }; + + union AccelerationStructureGeometryDataKHR + { + AccelerationStructureGeometryDataKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR ) ); + } + + AccelerationStructureGeometryDataKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryTrianglesDataKHR triangles_ = {} ) + : triangles( triangles_ ) + {} + + AccelerationStructureGeometryDataKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryAabbsDataKHR aabbs_ ) + : aabbs( aabbs_ ) + {} + + AccelerationStructureGeometryDataKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryInstancesDataKHR instances_ ) + : instances( instances_ ) + {} + + AccelerationStructureGeometryDataKHR & setTriangles( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryTrianglesDataKHR const & triangles_ ) VULKAN_HPP_NOEXCEPT + { + triangles = triangles_; + return *this; + } + + AccelerationStructureGeometryDataKHR & setAabbs( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryAabbsDataKHR const & aabbs_ ) VULKAN_HPP_NOEXCEPT + { + aabbs = aabbs_; + return *this; + } + + AccelerationStructureGeometryDataKHR & setInstances( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryInstancesDataKHR const & instances_ ) VULKAN_HPP_NOEXCEPT + { + instances = instances_; + return *this; + } + + VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR & operator=( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR ) ); + return *this; + } + + operator VkAccelerationStructureGeometryDataKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkAccelerationStructureGeometryDataKHR &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryTrianglesDataKHR triangles; + VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryAabbsDataKHR aabbs; + VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryInstancesDataKHR instances; +#else + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct AccelerationStructureGeometryKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureGeometryKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + AccelerationStructureGeometryKHR(VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles, VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR geometry_ = {}, VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ = {}) VULKAN_HPP_NOEXCEPT + : geometryType( geometryType_ ), geometry( geometry_ ), flags( flags_ ) + {} + + AccelerationStructureGeometryKHR( AccelerationStructureGeometryKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureGeometryKHR( VkAccelerationStructureGeometryKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureGeometryKHR & operator=( VkAccelerationStructureGeometryKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureGeometryKHR & operator=( AccelerationStructureGeometryKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureGeometryKHR ) ); + return *this; + } + + AccelerationStructureGeometryKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureGeometryKHR & setGeometryType( VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ ) VULKAN_HPP_NOEXCEPT + { + geometryType = geometryType_; + return *this; + } + + AccelerationStructureGeometryKHR & setGeometry( VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR const & geometry_ ) VULKAN_HPP_NOEXCEPT + { + geometry = geometry_; + return *this; + } + + AccelerationStructureGeometryKHR & setFlags( VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkAccelerationStructureGeometryKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureGeometryKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureGeometryKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles; + VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryDataKHR geometry = {}; + VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags = {}; + + }; + static_assert( sizeof( AccelerationStructureGeometryKHR ) == sizeof( VkAccelerationStructureGeometryKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureGeometryKHR; + }; + + union DeviceOrHostAddressKHR + { + DeviceOrHostAddressKHR( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR ) ); + } + + DeviceOrHostAddressKHR( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {} ) + : deviceAddress( deviceAddress_ ) + {} + + DeviceOrHostAddressKHR( void* hostAddress_ ) + : hostAddress( hostAddress_ ) + {} + + DeviceOrHostAddressKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + deviceAddress = deviceAddress_; + return *this; + } + + DeviceOrHostAddressKHR & setHostAddress( void* hostAddress_ ) VULKAN_HPP_NOEXCEPT + { + hostAddress = hostAddress_; + return *this; + } + + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR & operator=( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR ) ); + return *this; + } + + operator VkDeviceOrHostAddressKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkDeviceOrHostAddressKHR &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress; + void* hostAddress; +#else + VkDeviceAddress deviceAddress; + void* hostAddress; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct AccelerationStructureBuildGeometryInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureBuildGeometryInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + AccelerationStructureBuildGeometryInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR::eBuild, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_ = {}, uint32_t geometryCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), flags( flags_ ), mode( mode_ ), srcAccelerationStructure( srcAccelerationStructure_ ), dstAccelerationStructure( dstAccelerationStructure_ ), geometryCount( geometryCount_ ), pGeometries( pGeometries_ ), ppGeometries( ppGeometries_ ), scratchData( scratchData_ ) + {} + + AccelerationStructureBuildGeometryInfoKHR( AccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureBuildGeometryInfoKHR( VkAccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & geometries_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pGeometries_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData_ = {} ) + : type( type_ ), flags( flags_ ), mode( mode_ ), srcAccelerationStructure( srcAccelerationStructure_ ), dstAccelerationStructure( dstAccelerationStructure_ ), geometryCount( static_cast( geometries_.size() ) ), pGeometries( geometries_.data() ), ppGeometries( pGeometries_.data() ), scratchData( scratchData_ ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( geometries_.size() == pGeometries_.size() ); +#else + if ( geometries_.size() != pGeometries_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::AccelerationStructureBuildGeometryInfoKHR::AccelerationStructureBuildGeometryInfoKHR: geometries_.size() != pGeometries_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureBuildGeometryInfoKHR & operator=( VkAccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & operator=( AccelerationStructureBuildGeometryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureBuildGeometryInfoKHR ) ); + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setMode( VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setSrcAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + srcAccelerationStructure = srcAccelerationStructure_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setDstAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + dstAccelerationStructure = dstAccelerationStructure_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setGeometryCount( uint32_t geometryCount_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = geometryCount_; + return *this; + } + + AccelerationStructureBuildGeometryInfoKHR & setPGeometries( const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries_ ) VULKAN_HPP_NOEXCEPT + { + pGeometries = pGeometries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setGeometries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & geometries_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = static_cast( geometries_.size() ); + pGeometries = geometries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + AccelerationStructureBuildGeometryInfoKHR & setPpGeometries( const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries_ ) VULKAN_HPP_NOEXCEPT + { + ppGeometries = ppGeometries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureBuildGeometryInfoKHR & setPGeometries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pGeometries_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = static_cast( pGeometries_.size() ); + ppGeometries = pGeometries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + AccelerationStructureBuildGeometryInfoKHR & setScratchData( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const & scratchData_ ) VULKAN_HPP_NOEXCEPT + { + scratchData = scratchData_; + return *this; + } + + + operator VkAccelerationStructureBuildGeometryInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureBuildGeometryInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureBuildGeometryInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel; + VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsKHR flags = {}; + VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR mode = VULKAN_HPP_NAMESPACE::BuildAccelerationStructureModeKHR::eBuild; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR srcAccelerationStructure = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dstAccelerationStructure = {}; + uint32_t geometryCount = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* pGeometries = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureGeometryKHR* const * ppGeometries = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR scratchData = {}; + + }; + static_assert( sizeof( AccelerationStructureBuildGeometryInfoKHR ) == sizeof( VkAccelerationStructureBuildGeometryInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureBuildGeometryInfoKHR; + }; + + struct AccelerationStructureBuildRangeInfoKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildRangeInfoKHR(uint32_t primitiveCount_ = {}, uint32_t primitiveOffset_ = {}, uint32_t firstVertex_ = {}, uint32_t transformOffset_ = {}) VULKAN_HPP_NOEXCEPT + : primitiveCount( primitiveCount_ ), primitiveOffset( primitiveOffset_ ), firstVertex( firstVertex_ ), transformOffset( transformOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildRangeInfoKHR( AccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureBuildRangeInfoKHR( VkAccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureBuildRangeInfoKHR & operator=( VkAccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureBuildRangeInfoKHR & operator=( AccelerationStructureBuildRangeInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureBuildRangeInfoKHR ) ); + return *this; + } + + AccelerationStructureBuildRangeInfoKHR & setPrimitiveCount( uint32_t primitiveCount_ ) VULKAN_HPP_NOEXCEPT + { + primitiveCount = primitiveCount_; + return *this; + } + + AccelerationStructureBuildRangeInfoKHR & setPrimitiveOffset( uint32_t primitiveOffset_ ) VULKAN_HPP_NOEXCEPT + { + primitiveOffset = primitiveOffset_; + return *this; + } + + AccelerationStructureBuildRangeInfoKHR & setFirstVertex( uint32_t firstVertex_ ) VULKAN_HPP_NOEXCEPT + { + firstVertex = firstVertex_; + return *this; + } + + AccelerationStructureBuildRangeInfoKHR & setTransformOffset( uint32_t transformOffset_ ) VULKAN_HPP_NOEXCEPT + { + transformOffset = transformOffset_; + return *this; + } + + + operator VkAccelerationStructureBuildRangeInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureBuildRangeInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureBuildRangeInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureBuildRangeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( primitiveCount == rhs.primitiveCount ) + && ( primitiveOffset == rhs.primitiveOffset ) + && ( firstVertex == rhs.firstVertex ) + && ( transformOffset == rhs.transformOffset ); + } + + bool operator!=( AccelerationStructureBuildRangeInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t primitiveCount = {}; + uint32_t primitiveOffset = {}; + uint32_t firstVertex = {}; + uint32_t transformOffset = {}; + + }; + static_assert( sizeof( AccelerationStructureBuildRangeInfoKHR ) == sizeof( VkAccelerationStructureBuildRangeInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct AccelerationStructureBuildSizesInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureBuildSizesInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildSizesInfoKHR(VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructureSize( accelerationStructureSize_ ), updateScratchSize( updateScratchSize_ ), buildScratchSize( buildScratchSize_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureBuildSizesInfoKHR( AccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureBuildSizesInfoKHR( VkAccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureBuildSizesInfoKHR & operator=( VkAccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureBuildSizesInfoKHR & operator=( AccelerationStructureBuildSizesInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureBuildSizesInfoKHR ) ); + return *this; + } + + AccelerationStructureBuildSizesInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureBuildSizesInfoKHR & setAccelerationStructureSize( VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureSize = accelerationStructureSize_; + return *this; + } + + AccelerationStructureBuildSizesInfoKHR & setUpdateScratchSize( VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize_ ) VULKAN_HPP_NOEXCEPT + { + updateScratchSize = updateScratchSize_; + return *this; + } + + AccelerationStructureBuildSizesInfoKHR & setBuildScratchSize( VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize_ ) VULKAN_HPP_NOEXCEPT + { + buildScratchSize = buildScratchSize_; + return *this; + } + + + operator VkAccelerationStructureBuildSizesInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureBuildSizesInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureBuildSizesInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureBuildSizesInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureSize == rhs.accelerationStructureSize ) + && ( updateScratchSize == rhs.updateScratchSize ) + && ( buildScratchSize == rhs.buildScratchSize ); + } + + bool operator!=( AccelerationStructureBuildSizesInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureBuildSizesInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize accelerationStructureSize = {}; + VULKAN_HPP_NAMESPACE::DeviceSize updateScratchSize = {}; + VULKAN_HPP_NAMESPACE::DeviceSize buildScratchSize = {}; + + }; + static_assert( sizeof( AccelerationStructureBuildSizesInfoKHR ) == sizeof( VkAccelerationStructureBuildSizesInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureBuildSizesInfoKHR; + }; + + class Buffer + { + public: + using CType = VkBuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eBuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBuffer; + + public: + VULKAN_HPP_CONSTEXPR Buffer() VULKAN_HPP_NOEXCEPT + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Buffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_buffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Buffer( VkBuffer buffer ) VULKAN_HPP_NOEXCEPT + : m_buffer( buffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Buffer & operator=(VkBuffer buffer) VULKAN_HPP_NOEXCEPT + { + m_buffer = buffer; + return *this; + } +#endif + + Buffer & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_buffer = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Buffer const& ) const = default; +#else + bool operator==( Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_buffer == rhs.m_buffer; + } + + bool operator!=(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_buffer != rhs.m_buffer; + } + + bool operator<(Buffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_buffer < rhs.m_buffer; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBuffer() const VULKAN_HPP_NOEXCEPT + { + return m_buffer; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_buffer != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_buffer == VK_NULL_HANDLE; + } + + private: + VkBuffer m_buffer; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Buffer ) == sizeof( VkBuffer ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Buffer; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct AccelerationStructureCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel, VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}) VULKAN_HPP_NOEXCEPT + : createFlags( createFlags_ ), buffer( buffer_ ), offset( offset_ ), size( size_ ), type( type_ ), deviceAddress( deviceAddress_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoKHR( AccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureCreateInfoKHR( VkAccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureCreateInfoKHR & operator=( VkAccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureCreateInfoKHR & operator=( AccelerationStructureCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureCreateInfoKHR ) ); + return *this; + } + + AccelerationStructureCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setCreateFlags( VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags_ ) VULKAN_HPP_NOEXCEPT + { + createFlags = createFlags_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + AccelerationStructureCreateInfoKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + deviceAddress = deviceAddress_; + return *this; + } + + + operator VkAccelerationStructureCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureCreateInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( createFlags == rhs.createFlags ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( size == rhs.size ) + && ( type == rhs.type ) + && ( deviceAddress == rhs.deviceAddress ); + } + + bool operator!=( AccelerationStructureCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureCreateFlagsKHR createFlags = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR type = VULKAN_HPP_NAMESPACE::AccelerationStructureTypeKHR::eTopLevel; + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; + + }; + static_assert( sizeof( AccelerationStructureCreateInfoKHR ) == sizeof( VkAccelerationStructureCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureCreateInfoKHR; + }; + + struct GeometryTrianglesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGeometryTrianglesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeometryTrianglesNV(VULKAN_HPP_NAMESPACE::Buffer vertexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vertexOffset_ = {}, uint32_t vertexCount_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ = {}, VULKAN_HPP_NAMESPACE::Format vertexFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::Buffer indexData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize indexOffset_ = {}, uint32_t indexCount_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16, VULKAN_HPP_NAMESPACE::Buffer transformData_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize transformOffset_ = {}) VULKAN_HPP_NOEXCEPT + : vertexData( vertexData_ ), vertexOffset( vertexOffset_ ), vertexCount( vertexCount_ ), vertexStride( vertexStride_ ), vertexFormat( vertexFormat_ ), indexData( indexData_ ), indexOffset( indexOffset_ ), indexCount( indexCount_ ), indexType( indexType_ ), transformData( transformData_ ), transformOffset( transformOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR GeometryTrianglesNV( GeometryTrianglesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeometryTrianglesNV( VkGeometryTrianglesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeometryTrianglesNV & operator=( VkGeometryTrianglesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeometryTrianglesNV & operator=( GeometryTrianglesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeometryTrianglesNV ) ); + return *this; + } + + GeometryTrianglesNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GeometryTrianglesNV & setVertexData( VULKAN_HPP_NAMESPACE::Buffer vertexData_ ) VULKAN_HPP_NOEXCEPT + { + vertexData = vertexData_; + return *this; + } + + GeometryTrianglesNV & setVertexOffset( VULKAN_HPP_NAMESPACE::DeviceSize vertexOffset_ ) VULKAN_HPP_NOEXCEPT + { + vertexOffset = vertexOffset_; + return *this; + } + + GeometryTrianglesNV & setVertexCount( uint32_t vertexCount_ ) VULKAN_HPP_NOEXCEPT + { + vertexCount = vertexCount_; + return *this; + } + + GeometryTrianglesNV & setVertexStride( VULKAN_HPP_NAMESPACE::DeviceSize vertexStride_ ) VULKAN_HPP_NOEXCEPT + { + vertexStride = vertexStride_; + return *this; + } + + GeometryTrianglesNV & setVertexFormat( VULKAN_HPP_NAMESPACE::Format vertexFormat_ ) VULKAN_HPP_NOEXCEPT + { + vertexFormat = vertexFormat_; + return *this; + } + + GeometryTrianglesNV & setIndexData( VULKAN_HPP_NAMESPACE::Buffer indexData_ ) VULKAN_HPP_NOEXCEPT + { + indexData = indexData_; + return *this; + } + + GeometryTrianglesNV & setIndexOffset( VULKAN_HPP_NAMESPACE::DeviceSize indexOffset_ ) VULKAN_HPP_NOEXCEPT + { + indexOffset = indexOffset_; + return *this; + } + + GeometryTrianglesNV & setIndexCount( uint32_t indexCount_ ) VULKAN_HPP_NOEXCEPT + { + indexCount = indexCount_; + return *this; + } + + GeometryTrianglesNV & setIndexType( VULKAN_HPP_NAMESPACE::IndexType indexType_ ) VULKAN_HPP_NOEXCEPT + { + indexType = indexType_; + return *this; + } + + GeometryTrianglesNV & setTransformData( VULKAN_HPP_NAMESPACE::Buffer transformData_ ) VULKAN_HPP_NOEXCEPT + { + transformData = transformData_; + return *this; + } + + GeometryTrianglesNV & setTransformOffset( VULKAN_HPP_NAMESPACE::DeviceSize transformOffset_ ) VULKAN_HPP_NOEXCEPT + { + transformOffset = transformOffset_; + return *this; + } + + + operator VkGeometryTrianglesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeometryTrianglesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeometryTrianglesNV const& ) const = default; +#else + bool operator==( GeometryTrianglesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexData == rhs.vertexData ) + && ( vertexOffset == rhs.vertexOffset ) + && ( vertexCount == rhs.vertexCount ) + && ( vertexStride == rhs.vertexStride ) + && ( vertexFormat == rhs.vertexFormat ) + && ( indexData == rhs.indexData ) + && ( indexOffset == rhs.indexOffset ) + && ( indexCount == rhs.indexCount ) + && ( indexType == rhs.indexType ) + && ( transformData == rhs.transformData ) + && ( transformOffset == rhs.transformOffset ); + } + + bool operator!=( GeometryTrianglesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGeometryTrianglesNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer vertexData = {}; + VULKAN_HPP_NAMESPACE::DeviceSize vertexOffset = {}; + uint32_t vertexCount = {}; + VULKAN_HPP_NAMESPACE::DeviceSize vertexStride = {}; + VULKAN_HPP_NAMESPACE::Format vertexFormat = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::Buffer indexData = {}; + VULKAN_HPP_NAMESPACE::DeviceSize indexOffset = {}; + uint32_t indexCount = {}; + VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; + VULKAN_HPP_NAMESPACE::Buffer transformData = {}; + VULKAN_HPP_NAMESPACE::DeviceSize transformOffset = {}; + + }; + static_assert( sizeof( GeometryTrianglesNV ) == sizeof( VkGeometryTrianglesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GeometryTrianglesNV; + }; + + struct GeometryAABBNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGeometryAabbNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeometryAABBNV(VULKAN_HPP_NAMESPACE::Buffer aabbData_ = {}, uint32_t numAABBs_ = {}, uint32_t stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}) VULKAN_HPP_NOEXCEPT + : aabbData( aabbData_ ), numAABBs( numAABBs_ ), stride( stride_ ), offset( offset_ ) + {} + + VULKAN_HPP_CONSTEXPR GeometryAABBNV( GeometryAABBNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeometryAABBNV( VkGeometryAABBNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeometryAABBNV & operator=( VkGeometryAABBNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeometryAABBNV & operator=( GeometryAABBNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeometryAABBNV ) ); + return *this; + } + + GeometryAABBNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GeometryAABBNV & setAabbData( VULKAN_HPP_NAMESPACE::Buffer aabbData_ ) VULKAN_HPP_NOEXCEPT + { + aabbData = aabbData_; + return *this; + } + + GeometryAABBNV & setNumAABBs( uint32_t numAABBs_ ) VULKAN_HPP_NOEXCEPT + { + numAABBs = numAABBs_; + return *this; + } + + GeometryAABBNV & setStride( uint32_t stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + GeometryAABBNV & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + + operator VkGeometryAABBNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeometryAABBNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeometryAABBNV const& ) const = default; +#else + bool operator==( GeometryAABBNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( aabbData == rhs.aabbData ) + && ( numAABBs == rhs.numAABBs ) + && ( stride == rhs.stride ) + && ( offset == rhs.offset ); + } + + bool operator!=( GeometryAABBNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGeometryAabbNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer aabbData = {}; + uint32_t numAABBs = {}; + uint32_t stride = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + + }; + static_assert( sizeof( GeometryAABBNV ) == sizeof( VkGeometryAABBNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GeometryAABBNV; + }; + + struct GeometryDataNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeometryDataNV(VULKAN_HPP_NAMESPACE::GeometryTrianglesNV triangles_ = {}, VULKAN_HPP_NAMESPACE::GeometryAABBNV aabbs_ = {}) VULKAN_HPP_NOEXCEPT + : triangles( triangles_ ), aabbs( aabbs_ ) + {} + + VULKAN_HPP_CONSTEXPR GeometryDataNV( GeometryDataNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeometryDataNV( VkGeometryDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeometryDataNV & operator=( VkGeometryDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeometryDataNV & operator=( GeometryDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeometryDataNV ) ); + return *this; + } + + GeometryDataNV & setTriangles( VULKAN_HPP_NAMESPACE::GeometryTrianglesNV const & triangles_ ) VULKAN_HPP_NOEXCEPT + { + triangles = triangles_; + return *this; + } + + GeometryDataNV & setAabbs( VULKAN_HPP_NAMESPACE::GeometryAABBNV const & aabbs_ ) VULKAN_HPP_NOEXCEPT + { + aabbs = aabbs_; + return *this; + } + + + operator VkGeometryDataNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeometryDataNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeometryDataNV const& ) const = default; +#else + bool operator==( GeometryDataNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( triangles == rhs.triangles ) + && ( aabbs == rhs.aabbs ); + } + + bool operator!=( GeometryDataNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::GeometryTrianglesNV triangles = {}; + VULKAN_HPP_NAMESPACE::GeometryAABBNV aabbs = {}; + + }; + static_assert( sizeof( GeometryDataNV ) == sizeof( VkGeometryDataNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct GeometryNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGeometryNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeometryNV(VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles, VULKAN_HPP_NAMESPACE::GeometryDataNV geometry_ = {}, VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ = {}) VULKAN_HPP_NOEXCEPT + : geometryType( geometryType_ ), geometry( geometry_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR GeometryNV( GeometryNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeometryNV( VkGeometryNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeometryNV & operator=( VkGeometryNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeometryNV & operator=( GeometryNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeometryNV ) ); + return *this; + } + + GeometryNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GeometryNV & setGeometryType( VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType_ ) VULKAN_HPP_NOEXCEPT + { + geometryType = geometryType_; + return *this; + } + + GeometryNV & setGeometry( VULKAN_HPP_NAMESPACE::GeometryDataNV const & geometry_ ) VULKAN_HPP_NOEXCEPT + { + geometry = geometry_; + return *this; + } + + GeometryNV & setFlags( VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkGeometryNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeometryNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeometryNV const& ) const = default; +#else + bool operator==( GeometryNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( geometryType == rhs.geometryType ) + && ( geometry == rhs.geometry ) + && ( flags == rhs.flags ); + } + + bool operator!=( GeometryNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGeometryNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::GeometryTypeKHR geometryType = VULKAN_HPP_NAMESPACE::GeometryTypeKHR::eTriangles; + VULKAN_HPP_NAMESPACE::GeometryDataNV geometry = {}; + VULKAN_HPP_NAMESPACE::GeometryFlagsKHR flags = {}; + + }; + static_assert( sizeof( GeometryNV ) == sizeof( VkGeometryNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GeometryNV; + }; + + struct AccelerationStructureInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureTypeNV type_ = {}, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsNV flags_ = {}, uint32_t instanceCount_ = {}, uint32_t geometryCount_ = {}, const VULKAN_HPP_NAMESPACE::GeometryNV* pGeometries_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), flags( flags_ ), instanceCount( instanceCount_ ), geometryCount( geometryCount_ ), pGeometries( pGeometries_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureInfoNV( AccelerationStructureInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureInfoNV( VkAccelerationStructureInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureInfoNV( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeNV type_, VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsNV flags_, uint32_t instanceCount_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & geometries_ ) + : type( type_ ), flags( flags_ ), instanceCount( instanceCount_ ), geometryCount( static_cast( geometries_.size() ) ), pGeometries( geometries_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureInfoNV & operator=( VkAccelerationStructureInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureInfoNV & operator=( AccelerationStructureInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureInfoNV ) ); + return *this; + } + + AccelerationStructureInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureInfoNV & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureTypeNV type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + AccelerationStructureInfoNV & setFlags( VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AccelerationStructureInfoNV & setInstanceCount( uint32_t instanceCount_ ) VULKAN_HPP_NOEXCEPT + { + instanceCount = instanceCount_; + return *this; + } + + AccelerationStructureInfoNV & setGeometryCount( uint32_t geometryCount_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = geometryCount_; + return *this; + } + + AccelerationStructureInfoNV & setPGeometries( const VULKAN_HPP_NAMESPACE::GeometryNV* pGeometries_ ) VULKAN_HPP_NOEXCEPT + { + pGeometries = pGeometries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + AccelerationStructureInfoNV & setGeometries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & geometries_ ) VULKAN_HPP_NOEXCEPT + { + geometryCount = static_cast( geometries_.size() ); + pGeometries = geometries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkAccelerationStructureInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureInfoNV const& ) const = default; +#else + bool operator==( AccelerationStructureInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( flags == rhs.flags ) + && ( instanceCount == rhs.instanceCount ) + && ( geometryCount == rhs.geometryCount ) + && ( pGeometries == rhs.pGeometries ); + } + + bool operator!=( AccelerationStructureInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureTypeNV type = {}; + VULKAN_HPP_NAMESPACE::BuildAccelerationStructureFlagsNV flags = {}; + uint32_t instanceCount = {}; + uint32_t geometryCount = {}; + const VULKAN_HPP_NAMESPACE::GeometryNV* pGeometries = {}; + + }; + static_assert( sizeof( AccelerationStructureInfoNV ) == sizeof( VkAccelerationStructureInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureInfoNV; + }; + + struct AccelerationStructureCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoNV(VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV info_ = {}) VULKAN_HPP_NOEXCEPT + : compactedSize( compactedSize_ ), info( info_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureCreateInfoNV( AccelerationStructureCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureCreateInfoNV( VkAccelerationStructureCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureCreateInfoNV & operator=( VkAccelerationStructureCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureCreateInfoNV & operator=( AccelerationStructureCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureCreateInfoNV ) ); + return *this; + } + + AccelerationStructureCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureCreateInfoNV & setCompactedSize( VULKAN_HPP_NAMESPACE::DeviceSize compactedSize_ ) VULKAN_HPP_NOEXCEPT + { + compactedSize = compactedSize_; + return *this; + } + + AccelerationStructureCreateInfoNV & setInfo( VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV const & info_ ) VULKAN_HPP_NOEXCEPT + { + info = info_; + return *this; + } + + + operator VkAccelerationStructureCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureCreateInfoNV const& ) const = default; +#else + bool operator==( AccelerationStructureCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( compactedSize == rhs.compactedSize ) + && ( info == rhs.info ); + } + + bool operator!=( AccelerationStructureCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize compactedSize = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV info = {}; + + }; + static_assert( sizeof( AccelerationStructureCreateInfoNV ) == sizeof( VkAccelerationStructureCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureCreateInfoNV; + }; + + struct AccelerationStructureDeviceAddressInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureDeviceAddressInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureDeviceAddressInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructure( accelerationStructure_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureDeviceAddressInfoKHR( AccelerationStructureDeviceAddressInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureDeviceAddressInfoKHR( VkAccelerationStructureDeviceAddressInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureDeviceAddressInfoKHR & operator=( VkAccelerationStructureDeviceAddressInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureDeviceAddressInfoKHR & operator=( AccelerationStructureDeviceAddressInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureDeviceAddressInfoKHR ) ); + return *this; + } + + AccelerationStructureDeviceAddressInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureDeviceAddressInfoKHR & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructure = accelerationStructure_; + return *this; + } + + + operator VkAccelerationStructureDeviceAddressInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureDeviceAddressInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureDeviceAddressInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureDeviceAddressInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ); + } + + bool operator!=( AccelerationStructureDeviceAddressInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureDeviceAddressInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure = {}; + + }; + static_assert( sizeof( AccelerationStructureDeviceAddressInfoKHR ) == sizeof( VkAccelerationStructureDeviceAddressInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureDeviceAddressInfoKHR; + }; + + struct TransformMatrixKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 TransformMatrixKHR(std::array,3> const& matrix_ = {}) VULKAN_HPP_NOEXCEPT + : matrix( matrix_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 TransformMatrixKHR( TransformMatrixKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + TransformMatrixKHR( VkTransformMatrixKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + TransformMatrixKHR & operator=( VkTransformMatrixKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + TransformMatrixKHR & operator=( TransformMatrixKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( TransformMatrixKHR ) ); + return *this; + } + + TransformMatrixKHR & setMatrix( std::array,3> matrix_ ) VULKAN_HPP_NOEXCEPT + { + matrix = matrix_; + return *this; + } + + + operator VkTransformMatrixKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkTransformMatrixKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( TransformMatrixKHR const& ) const = default; +#else + bool operator==( TransformMatrixKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( matrix == rhs.matrix ); + } + + bool operator!=( TransformMatrixKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ArrayWrapper2D matrix = {}; + + }; + static_assert( sizeof( TransformMatrixKHR ) == sizeof( VkTransformMatrixKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using TransformMatrixNV = TransformMatrixKHR; + + struct AccelerationStructureInstanceKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 AccelerationStructureInstanceKHR(VULKAN_HPP_NAMESPACE::TransformMatrixKHR transform_ = {}, uint32_t instanceCustomIndex_ = {}, uint32_t mask_ = {}, uint32_t instanceShaderBindingTableRecordOffset_ = {}, VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR flags_ = {}, uint64_t accelerationStructureReference_ = {}) VULKAN_HPP_NOEXCEPT + : transform( transform_ ), instanceCustomIndex( instanceCustomIndex_ ), mask( mask_ ), instanceShaderBindingTableRecordOffset( instanceShaderBindingTableRecordOffset_ ), flags( flags_ ), accelerationStructureReference( accelerationStructureReference_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 AccelerationStructureInstanceKHR( AccelerationStructureInstanceKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureInstanceKHR( VkAccelerationStructureInstanceKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureInstanceKHR & operator=( VkAccelerationStructureInstanceKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureInstanceKHR & operator=( AccelerationStructureInstanceKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureInstanceKHR ) ); + return *this; + } + + AccelerationStructureInstanceKHR & setTransform( VULKAN_HPP_NAMESPACE::TransformMatrixKHR const & transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + AccelerationStructureInstanceKHR & setInstanceCustomIndex( uint32_t instanceCustomIndex_ ) VULKAN_HPP_NOEXCEPT + { + instanceCustomIndex = instanceCustomIndex_; + return *this; + } + + AccelerationStructureInstanceKHR & setMask( uint32_t mask_ ) VULKAN_HPP_NOEXCEPT + { + mask = mask_; + return *this; + } + + AccelerationStructureInstanceKHR & setInstanceShaderBindingTableRecordOffset( uint32_t instanceShaderBindingTableRecordOffset_ ) VULKAN_HPP_NOEXCEPT + { + instanceShaderBindingTableRecordOffset = instanceShaderBindingTableRecordOffset_; + return *this; + } + + AccelerationStructureInstanceKHR & setFlags( VULKAN_HPP_NAMESPACE::GeometryInstanceFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = *reinterpret_cast(&flags_); + return *this; + } + + AccelerationStructureInstanceKHR & setAccelerationStructureReference( uint64_t accelerationStructureReference_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureReference = accelerationStructureReference_; + return *this; + } + + + operator VkAccelerationStructureInstanceKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureInstanceKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureInstanceKHR const& ) const = default; +#else + bool operator==( AccelerationStructureInstanceKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( transform == rhs.transform ) + && ( instanceCustomIndex == rhs.instanceCustomIndex ) + && ( mask == rhs.mask ) + && ( instanceShaderBindingTableRecordOffset == rhs.instanceShaderBindingTableRecordOffset ) + && ( flags == rhs.flags ) + && ( accelerationStructureReference == rhs.accelerationStructureReference ); + } + + bool operator!=( AccelerationStructureInstanceKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::TransformMatrixKHR transform = {}; + uint32_t instanceCustomIndex : 24; + uint32_t mask : 8; + uint32_t instanceShaderBindingTableRecordOffset : 24; + VkGeometryInstanceFlagsKHR flags : 8; + uint64_t accelerationStructureReference = {}; + + }; + static_assert( sizeof( AccelerationStructureInstanceKHR ) == sizeof( VkAccelerationStructureInstanceKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using AccelerationStructureInstanceNV = AccelerationStructureInstanceKHR; + + class AccelerationStructureNV + { + public: + using CType = VkAccelerationStructureNV; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eAccelerationStructureNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eAccelerationStructureNV; + + public: + VULKAN_HPP_CONSTEXPR AccelerationStructureNV() VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT AccelerationStructureNV( VkAccelerationStructureNV accelerationStructureNV ) VULKAN_HPP_NOEXCEPT + : m_accelerationStructureNV( accelerationStructureNV ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + AccelerationStructureNV & operator=(VkAccelerationStructureNV accelerationStructureNV) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureNV = accelerationStructureNV; + return *this; + } +#endif + + AccelerationStructureNV & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_accelerationStructureNV = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureNV const& ) const = default; +#else + bool operator==( AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV == rhs.m_accelerationStructureNV; + } + + bool operator!=(AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV != rhs.m_accelerationStructureNV; + } + + bool operator<(AccelerationStructureNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV < rhs.m_accelerationStructureNV; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkAccelerationStructureNV() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_accelerationStructureNV == VK_NULL_HANDLE; + } + + private: + VkAccelerationStructureNV m_accelerationStructureNV; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::AccelerationStructureNV ) == sizeof( VkAccelerationStructureNV ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::AccelerationStructureNV; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct AccelerationStructureMemoryRequirementsInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureMemoryRequirementsInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type_ = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV::eObject, VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), accelerationStructure( accelerationStructure_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureMemoryRequirementsInfoNV( AccelerationStructureMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureMemoryRequirementsInfoNV( VkAccelerationStructureMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureMemoryRequirementsInfoNV & operator=( VkAccelerationStructureMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureMemoryRequirementsInfoNV & operator=( AccelerationStructureMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureMemoryRequirementsInfoNV ) ); + return *this; + } + + AccelerationStructureMemoryRequirementsInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureMemoryRequirementsInfoNV & setType( VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + AccelerationStructureMemoryRequirementsInfoNV & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructure = accelerationStructure_; + return *this; + } + + + operator VkAccelerationStructureMemoryRequirementsInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureMemoryRequirementsInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureMemoryRequirementsInfoNV const& ) const = default; +#else + bool operator==( AccelerationStructureMemoryRequirementsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( accelerationStructure == rhs.accelerationStructure ); + } + + bool operator!=( AccelerationStructureMemoryRequirementsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureMemoryRequirementsInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV type = VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsTypeNV::eObject; + VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure = {}; + + }; + static_assert( sizeof( AccelerationStructureMemoryRequirementsInfoNV ) == sizeof( VkAccelerationStructureMemoryRequirementsInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureMemoryRequirementsInfoNV; + }; + + struct AccelerationStructureVersionInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAccelerationStructureVersionInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AccelerationStructureVersionInfoKHR(const uint8_t* pVersionData_ = {}) VULKAN_HPP_NOEXCEPT + : pVersionData( pVersionData_ ) + {} + + VULKAN_HPP_CONSTEXPR AccelerationStructureVersionInfoKHR( AccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AccelerationStructureVersionInfoKHR( VkAccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AccelerationStructureVersionInfoKHR & operator=( VkAccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AccelerationStructureVersionInfoKHR & operator=( AccelerationStructureVersionInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AccelerationStructureVersionInfoKHR ) ); + return *this; + } + + AccelerationStructureVersionInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AccelerationStructureVersionInfoKHR & setPVersionData( const uint8_t* pVersionData_ ) VULKAN_HPP_NOEXCEPT + { + pVersionData = pVersionData_; + return *this; + } + + + operator VkAccelerationStructureVersionInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAccelerationStructureVersionInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AccelerationStructureVersionInfoKHR const& ) const = default; +#else + bool operator==( AccelerationStructureVersionInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pVersionData == rhs.pVersionData ); + } + + bool operator!=( AccelerationStructureVersionInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAccelerationStructureVersionInfoKHR; + const void* pNext = {}; + const uint8_t* pVersionData = {}; + + }; + static_assert( sizeof( AccelerationStructureVersionInfoKHR ) == sizeof( VkAccelerationStructureVersionInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AccelerationStructureVersionInfoKHR; + }; + + class SwapchainKHR + { + public: + using CType = VkSwapchainKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eSwapchainKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSwapchainKHR; + + public: + VULKAN_HPP_CONSTEXPR SwapchainKHR() VULKAN_HPP_NOEXCEPT + : m_swapchainKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SwapchainKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_swapchainKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SwapchainKHR( VkSwapchainKHR swapchainKHR ) VULKAN_HPP_NOEXCEPT + : m_swapchainKHR( swapchainKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SwapchainKHR & operator=(VkSwapchainKHR swapchainKHR) VULKAN_HPP_NOEXCEPT + { + m_swapchainKHR = swapchainKHR; + return *this; + } +#endif + + SwapchainKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_swapchainKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SwapchainKHR const& ) const = default; +#else + bool operator==( SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR == rhs.m_swapchainKHR; + } + + bool operator!=(SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR != rhs.m_swapchainKHR; + } + + bool operator<(SwapchainKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR < rhs.m_swapchainKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSwapchainKHR() const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_swapchainKHR == VK_NULL_HANDLE; + } + + private: + VkSwapchainKHR m_swapchainKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::SwapchainKHR ) == sizeof( VkSwapchainKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::SwapchainKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SwapchainKHR; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SwapchainKHR; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class Semaphore + { + public: + using CType = VkSemaphore; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eSemaphore; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSemaphore; + + public: + VULKAN_HPP_CONSTEXPR Semaphore() VULKAN_HPP_NOEXCEPT + : m_semaphore(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Semaphore( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_semaphore(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Semaphore( VkSemaphore semaphore ) VULKAN_HPP_NOEXCEPT + : m_semaphore( semaphore ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Semaphore & operator=(VkSemaphore semaphore) VULKAN_HPP_NOEXCEPT + { + m_semaphore = semaphore; + return *this; + } +#endif + + Semaphore & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_semaphore = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Semaphore const& ) const = default; +#else + bool operator==( Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_semaphore == rhs.m_semaphore; + } + + bool operator!=(Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_semaphore != rhs.m_semaphore; + } + + bool operator<(Semaphore const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_semaphore < rhs.m_semaphore; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSemaphore() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_semaphore == VK_NULL_HANDLE; + } + + private: + VkSemaphore m_semaphore; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Semaphore ) == sizeof( VkSemaphore ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Semaphore; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Semaphore; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Semaphore; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class Fence + { + public: + using CType = VkFence; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eFence; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFence; + + public: + VULKAN_HPP_CONSTEXPR Fence() VULKAN_HPP_NOEXCEPT + : m_fence(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Fence( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_fence(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Fence( VkFence fence ) VULKAN_HPP_NOEXCEPT + : m_fence( fence ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Fence & operator=(VkFence fence) VULKAN_HPP_NOEXCEPT + { + m_fence = fence; + return *this; + } +#endif + + Fence & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_fence = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Fence const& ) const = default; +#else + bool operator==( Fence const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_fence == rhs.m_fence; + } + + bool operator!=(Fence const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_fence != rhs.m_fence; + } + + bool operator<(Fence const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_fence < rhs.m_fence; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFence() const VULKAN_HPP_NOEXCEPT + { + return m_fence; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_fence != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_fence == VK_NULL_HANDLE; + } + + private: + VkFence m_fence; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Fence ) == sizeof( VkFence ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Fence; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Fence; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Fence; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct AcquireNextImageInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAcquireNextImageInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AcquireNextImageInfoKHR(VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ = {}, uint64_t timeout_ = {}, VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::Fence fence_ = {}, uint32_t deviceMask_ = {}) VULKAN_HPP_NOEXCEPT + : swapchain( swapchain_ ), timeout( timeout_ ), semaphore( semaphore_ ), fence( fence_ ), deviceMask( deviceMask_ ) + {} + + VULKAN_HPP_CONSTEXPR AcquireNextImageInfoKHR( AcquireNextImageInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AcquireNextImageInfoKHR( VkAcquireNextImageInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AcquireNextImageInfoKHR & operator=( VkAcquireNextImageInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AcquireNextImageInfoKHR & operator=( AcquireNextImageInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AcquireNextImageInfoKHR ) ); + return *this; + } + + AcquireNextImageInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AcquireNextImageInfoKHR & setSwapchain( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ ) VULKAN_HPP_NOEXCEPT + { + swapchain = swapchain_; + return *this; + } + + AcquireNextImageInfoKHR & setTimeout( uint64_t timeout_ ) VULKAN_HPP_NOEXCEPT + { + timeout = timeout_; + return *this; + } + + AcquireNextImageInfoKHR & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + AcquireNextImageInfoKHR & setFence( VULKAN_HPP_NAMESPACE::Fence fence_ ) VULKAN_HPP_NOEXCEPT + { + fence = fence_; + return *this; + } + + AcquireNextImageInfoKHR & setDeviceMask( uint32_t deviceMask_ ) VULKAN_HPP_NOEXCEPT + { + deviceMask = deviceMask_; + return *this; + } + + + operator VkAcquireNextImageInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAcquireNextImageInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AcquireNextImageInfoKHR const& ) const = default; +#else + bool operator==( AcquireNextImageInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ) + && ( timeout == rhs.timeout ) + && ( semaphore == rhs.semaphore ) + && ( fence == rhs.fence ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( AcquireNextImageInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAcquireNextImageInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain = {}; + uint64_t timeout = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + VULKAN_HPP_NAMESPACE::Fence fence = {}; + uint32_t deviceMask = {}; + + }; + static_assert( sizeof( AcquireNextImageInfoKHR ) == sizeof( VkAcquireNextImageInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AcquireNextImageInfoKHR; + }; + + struct AcquireProfilingLockInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAcquireProfilingLockInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AcquireProfilingLockInfoKHR(VULKAN_HPP_NAMESPACE::AcquireProfilingLockFlagsKHR flags_ = {}, uint64_t timeout_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), timeout( timeout_ ) + {} + + VULKAN_HPP_CONSTEXPR AcquireProfilingLockInfoKHR( AcquireProfilingLockInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AcquireProfilingLockInfoKHR( VkAcquireProfilingLockInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AcquireProfilingLockInfoKHR & operator=( VkAcquireProfilingLockInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AcquireProfilingLockInfoKHR & operator=( AcquireProfilingLockInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AcquireProfilingLockInfoKHR ) ); + return *this; + } + + AcquireProfilingLockInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AcquireProfilingLockInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::AcquireProfilingLockFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AcquireProfilingLockInfoKHR & setTimeout( uint64_t timeout_ ) VULKAN_HPP_NOEXCEPT + { + timeout = timeout_; + return *this; + } + + + operator VkAcquireProfilingLockInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAcquireProfilingLockInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AcquireProfilingLockInfoKHR const& ) const = default; +#else + bool operator==( AcquireProfilingLockInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( timeout == rhs.timeout ); + } + + bool operator!=( AcquireProfilingLockInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAcquireProfilingLockInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AcquireProfilingLockFlagsKHR flags = {}; + uint64_t timeout = {}; + + }; + static_assert( sizeof( AcquireProfilingLockInfoKHR ) == sizeof( VkAcquireProfilingLockInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AcquireProfilingLockInfoKHR; + }; + + struct AllocationCallbacks + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AllocationCallbacks(void* pUserData_ = {}, PFN_vkAllocationFunction pfnAllocation_ = {}, PFN_vkReallocationFunction pfnReallocation_ = {}, PFN_vkFreeFunction pfnFree_ = {}, PFN_vkInternalAllocationNotification pfnInternalAllocation_ = {}, PFN_vkInternalFreeNotification pfnInternalFree_ = {}) VULKAN_HPP_NOEXCEPT + : pUserData( pUserData_ ), pfnAllocation( pfnAllocation_ ), pfnReallocation( pfnReallocation_ ), pfnFree( pfnFree_ ), pfnInternalAllocation( pfnInternalAllocation_ ), pfnInternalFree( pfnInternalFree_ ) + {} + + VULKAN_HPP_CONSTEXPR AllocationCallbacks( AllocationCallbacks const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AllocationCallbacks( VkAllocationCallbacks const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AllocationCallbacks & operator=( VkAllocationCallbacks const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AllocationCallbacks & operator=( AllocationCallbacks const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AllocationCallbacks ) ); + return *this; + } + + AllocationCallbacks & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + AllocationCallbacks & setPfnAllocation( PFN_vkAllocationFunction pfnAllocation_ ) VULKAN_HPP_NOEXCEPT + { + pfnAllocation = pfnAllocation_; + return *this; + } + + AllocationCallbacks & setPfnReallocation( PFN_vkReallocationFunction pfnReallocation_ ) VULKAN_HPP_NOEXCEPT + { + pfnReallocation = pfnReallocation_; + return *this; + } + + AllocationCallbacks & setPfnFree( PFN_vkFreeFunction pfnFree_ ) VULKAN_HPP_NOEXCEPT + { + pfnFree = pfnFree_; + return *this; + } + + AllocationCallbacks & setPfnInternalAllocation( PFN_vkInternalAllocationNotification pfnInternalAllocation_ ) VULKAN_HPP_NOEXCEPT + { + pfnInternalAllocation = pfnInternalAllocation_; + return *this; + } + + AllocationCallbacks & setPfnInternalFree( PFN_vkInternalFreeNotification pfnInternalFree_ ) VULKAN_HPP_NOEXCEPT + { + pfnInternalFree = pfnInternalFree_; + return *this; + } + + + operator VkAllocationCallbacks const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAllocationCallbacks &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AllocationCallbacks const& ) const = default; +#else + bool operator==( AllocationCallbacks const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( pUserData == rhs.pUserData ) + && ( pfnAllocation == rhs.pfnAllocation ) + && ( pfnReallocation == rhs.pfnReallocation ) + && ( pfnFree == rhs.pfnFree ) + && ( pfnInternalAllocation == rhs.pfnInternalAllocation ) + && ( pfnInternalFree == rhs.pfnInternalFree ); + } + + bool operator!=( AllocationCallbacks const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + void* pUserData = {}; + PFN_vkAllocationFunction pfnAllocation = {}; + PFN_vkReallocationFunction pfnReallocation = {}; + PFN_vkFreeFunction pfnFree = {}; + PFN_vkInternalAllocationNotification pfnInternalAllocation = {}; + PFN_vkInternalFreeNotification pfnInternalFree = {}; + + }; + static_assert( sizeof( AllocationCallbacks ) == sizeof( VkAllocationCallbacks ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ComponentMapping + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ComponentMapping(VULKAN_HPP_NAMESPACE::ComponentSwizzle r_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity, VULKAN_HPP_NAMESPACE::ComponentSwizzle g_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity, VULKAN_HPP_NAMESPACE::ComponentSwizzle b_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity, VULKAN_HPP_NAMESPACE::ComponentSwizzle a_ = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity) VULKAN_HPP_NOEXCEPT + : r( r_ ), g( g_ ), b( b_ ), a( a_ ) + {} + + VULKAN_HPP_CONSTEXPR ComponentMapping( ComponentMapping const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ComponentMapping( VkComponentMapping const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ComponentMapping & operator=( VkComponentMapping const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ComponentMapping & operator=( ComponentMapping const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ComponentMapping ) ); + return *this; + } + + ComponentMapping & setR( VULKAN_HPP_NAMESPACE::ComponentSwizzle r_ ) VULKAN_HPP_NOEXCEPT + { + r = r_; + return *this; + } + + ComponentMapping & setG( VULKAN_HPP_NAMESPACE::ComponentSwizzle g_ ) VULKAN_HPP_NOEXCEPT + { + g = g_; + return *this; + } + + ComponentMapping & setB( VULKAN_HPP_NAMESPACE::ComponentSwizzle b_ ) VULKAN_HPP_NOEXCEPT + { + b = b_; + return *this; + } + + ComponentMapping & setA( VULKAN_HPP_NAMESPACE::ComponentSwizzle a_ ) VULKAN_HPP_NOEXCEPT + { + a = a_; + return *this; + } + + + operator VkComponentMapping const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkComponentMapping &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ComponentMapping const& ) const = default; +#else + bool operator==( ComponentMapping const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( r == rhs.r ) + && ( g == rhs.g ) + && ( b == rhs.b ) + && ( a == rhs.a ); + } + + bool operator!=( ComponentMapping const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ComponentSwizzle r = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity; + VULKAN_HPP_NAMESPACE::ComponentSwizzle g = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity; + VULKAN_HPP_NAMESPACE::ComponentSwizzle b = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity; + VULKAN_HPP_NAMESPACE::ComponentSwizzle a = VULKAN_HPP_NAMESPACE::ComponentSwizzle::eIdentity; + + }; + static_assert( sizeof( ComponentMapping ) == sizeof( VkComponentMapping ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct AndroidHardwareBufferFormatPropertiesANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAndroidHardwareBufferFormatPropertiesANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferFormatPropertiesANDROID(VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, uint64_t externalFormat_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags formatFeatures_ = {}, VULKAN_HPP_NAMESPACE::ComponentMapping samplerYcbcrConversionComponents_ = {}, VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion suggestedYcbcrModel_ = VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion::eRgbIdentity, VULKAN_HPP_NAMESPACE::SamplerYcbcrRange suggestedYcbcrRange_ = VULKAN_HPP_NAMESPACE::SamplerYcbcrRange::eItuFull, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven) VULKAN_HPP_NOEXCEPT + : format( format_ ), externalFormat( externalFormat_ ), formatFeatures( formatFeatures_ ), samplerYcbcrConversionComponents( samplerYcbcrConversionComponents_ ), suggestedYcbcrModel( suggestedYcbcrModel_ ), suggestedYcbcrRange( suggestedYcbcrRange_ ), suggestedXChromaOffset( suggestedXChromaOffset_ ), suggestedYChromaOffset( suggestedYChromaOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferFormatPropertiesANDROID( AndroidHardwareBufferFormatPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AndroidHardwareBufferFormatPropertiesANDROID( VkAndroidHardwareBufferFormatPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AndroidHardwareBufferFormatPropertiesANDROID & operator=( VkAndroidHardwareBufferFormatPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AndroidHardwareBufferFormatPropertiesANDROID & operator=( AndroidHardwareBufferFormatPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AndroidHardwareBufferFormatPropertiesANDROID ) ); + return *this; + } + + + operator VkAndroidHardwareBufferFormatPropertiesANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAndroidHardwareBufferFormatPropertiesANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AndroidHardwareBufferFormatPropertiesANDROID const& ) const = default; +#else + bool operator==( AndroidHardwareBufferFormatPropertiesANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( externalFormat == rhs.externalFormat ) + && ( formatFeatures == rhs.formatFeatures ) + && ( samplerYcbcrConversionComponents == rhs.samplerYcbcrConversionComponents ) + && ( suggestedYcbcrModel == rhs.suggestedYcbcrModel ) + && ( suggestedYcbcrRange == rhs.suggestedYcbcrRange ) + && ( suggestedXChromaOffset == rhs.suggestedXChromaOffset ) + && ( suggestedYChromaOffset == rhs.suggestedYChromaOffset ); + } + + bool operator!=( AndroidHardwareBufferFormatPropertiesANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAndroidHardwareBufferFormatPropertiesANDROID; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + uint64_t externalFormat = {}; + VULKAN_HPP_NAMESPACE::FormatFeatureFlags formatFeatures = {}; + VULKAN_HPP_NAMESPACE::ComponentMapping samplerYcbcrConversionComponents = {}; + VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion suggestedYcbcrModel = VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion::eRgbIdentity; + VULKAN_HPP_NAMESPACE::SamplerYcbcrRange suggestedYcbcrRange = VULKAN_HPP_NAMESPACE::SamplerYcbcrRange::eItuFull; + VULKAN_HPP_NAMESPACE::ChromaLocation suggestedXChromaOffset = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven; + VULKAN_HPP_NAMESPACE::ChromaLocation suggestedYChromaOffset = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven; + + }; + static_assert( sizeof( AndroidHardwareBufferFormatPropertiesANDROID ) == sizeof( VkAndroidHardwareBufferFormatPropertiesANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AndroidHardwareBufferFormatPropertiesANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct AndroidHardwareBufferPropertiesANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAndroidHardwareBufferPropertiesANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferPropertiesANDROID(VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ = {}, uint32_t memoryTypeBits_ = {}) VULKAN_HPP_NOEXCEPT + : allocationSize( allocationSize_ ), memoryTypeBits( memoryTypeBits_ ) + {} + + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferPropertiesANDROID( AndroidHardwareBufferPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AndroidHardwareBufferPropertiesANDROID( VkAndroidHardwareBufferPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AndroidHardwareBufferPropertiesANDROID & operator=( VkAndroidHardwareBufferPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AndroidHardwareBufferPropertiesANDROID & operator=( AndroidHardwareBufferPropertiesANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AndroidHardwareBufferPropertiesANDROID ) ); + return *this; + } + + + operator VkAndroidHardwareBufferPropertiesANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAndroidHardwareBufferPropertiesANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AndroidHardwareBufferPropertiesANDROID const& ) const = default; +#else + bool operator==( AndroidHardwareBufferPropertiesANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( allocationSize == rhs.allocationSize ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( AndroidHardwareBufferPropertiesANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAndroidHardwareBufferPropertiesANDROID; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize allocationSize = {}; + uint32_t memoryTypeBits = {}; + + }; + static_assert( sizeof( AndroidHardwareBufferPropertiesANDROID ) == sizeof( VkAndroidHardwareBufferPropertiesANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AndroidHardwareBufferPropertiesANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct AndroidHardwareBufferUsageANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAndroidHardwareBufferUsageANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferUsageANDROID(uint64_t androidHardwareBufferUsage_ = {}) VULKAN_HPP_NOEXCEPT + : androidHardwareBufferUsage( androidHardwareBufferUsage_ ) + {} + + VULKAN_HPP_CONSTEXPR AndroidHardwareBufferUsageANDROID( AndroidHardwareBufferUsageANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AndroidHardwareBufferUsageANDROID( VkAndroidHardwareBufferUsageANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AndroidHardwareBufferUsageANDROID & operator=( VkAndroidHardwareBufferUsageANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AndroidHardwareBufferUsageANDROID & operator=( AndroidHardwareBufferUsageANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AndroidHardwareBufferUsageANDROID ) ); + return *this; + } + + + operator VkAndroidHardwareBufferUsageANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAndroidHardwareBufferUsageANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AndroidHardwareBufferUsageANDROID const& ) const = default; +#else + bool operator==( AndroidHardwareBufferUsageANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( androidHardwareBufferUsage == rhs.androidHardwareBufferUsage ); + } + + bool operator!=( AndroidHardwareBufferUsageANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAndroidHardwareBufferUsageANDROID; + void* pNext = {}; + uint64_t androidHardwareBufferUsage = {}; + + }; + static_assert( sizeof( AndroidHardwareBufferUsageANDROID ) == sizeof( VkAndroidHardwareBufferUsageANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AndroidHardwareBufferUsageANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct AndroidSurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAndroidSurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AndroidSurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateFlagsKHR flags_ = {}, struct ANativeWindow* window_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), window( window_ ) + {} + + VULKAN_HPP_CONSTEXPR AndroidSurfaceCreateInfoKHR( AndroidSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AndroidSurfaceCreateInfoKHR( VkAndroidSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AndroidSurfaceCreateInfoKHR & operator=( VkAndroidSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AndroidSurfaceCreateInfoKHR & operator=( AndroidSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AndroidSurfaceCreateInfoKHR ) ); + return *this; + } + + AndroidSurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AndroidSurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AndroidSurfaceCreateInfoKHR & setWindow( struct ANativeWindow* window_ ) VULKAN_HPP_NOEXCEPT + { + window = window_; + return *this; + } + + + operator VkAndroidSurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAndroidSurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AndroidSurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( AndroidSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( window == rhs.window ); + } + + bool operator!=( AndroidSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAndroidSurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateFlagsKHR flags = {}; + struct ANativeWindow* window = {}; + + }; + static_assert( sizeof( AndroidSurfaceCreateInfoKHR ) == sizeof( VkAndroidSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AndroidSurfaceCreateInfoKHR; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + struct ApplicationInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eApplicationInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ApplicationInfo(const char* pApplicationName_ = {}, uint32_t applicationVersion_ = {}, const char* pEngineName_ = {}, uint32_t engineVersion_ = {}, uint32_t apiVersion_ = {}) VULKAN_HPP_NOEXCEPT + : pApplicationName( pApplicationName_ ), applicationVersion( applicationVersion_ ), pEngineName( pEngineName_ ), engineVersion( engineVersion_ ), apiVersion( apiVersion_ ) + {} + + VULKAN_HPP_CONSTEXPR ApplicationInfo( ApplicationInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ApplicationInfo( VkApplicationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ApplicationInfo & operator=( VkApplicationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ApplicationInfo & operator=( ApplicationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ApplicationInfo ) ); + return *this; + } + + ApplicationInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ApplicationInfo & setPApplicationName( const char* pApplicationName_ ) VULKAN_HPP_NOEXCEPT + { + pApplicationName = pApplicationName_; + return *this; + } + + ApplicationInfo & setApplicationVersion( uint32_t applicationVersion_ ) VULKAN_HPP_NOEXCEPT + { + applicationVersion = applicationVersion_; + return *this; + } + + ApplicationInfo & setPEngineName( const char* pEngineName_ ) VULKAN_HPP_NOEXCEPT + { + pEngineName = pEngineName_; + return *this; + } + + ApplicationInfo & setEngineVersion( uint32_t engineVersion_ ) VULKAN_HPP_NOEXCEPT + { + engineVersion = engineVersion_; + return *this; + } + + ApplicationInfo & setApiVersion( uint32_t apiVersion_ ) VULKAN_HPP_NOEXCEPT + { + apiVersion = apiVersion_; + return *this; + } + + + operator VkApplicationInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkApplicationInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ApplicationInfo const& ) const = default; +#else + bool operator==( ApplicationInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pApplicationName == rhs.pApplicationName ) + && ( applicationVersion == rhs.applicationVersion ) + && ( pEngineName == rhs.pEngineName ) + && ( engineVersion == rhs.engineVersion ) + && ( apiVersion == rhs.apiVersion ); + } + + bool operator!=( ApplicationInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eApplicationInfo; + const void* pNext = {}; + const char* pApplicationName = {}; + uint32_t applicationVersion = {}; + const char* pEngineName = {}; + uint32_t engineVersion = {}; + uint32_t apiVersion = {}; + + }; + static_assert( sizeof( ApplicationInfo ) == sizeof( VkApplicationInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ApplicationInfo; + }; + + struct AttachmentDescription + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentDescription(VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp_ = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad, VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp_ = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad, VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), format( format_ ), samples( samples_ ), loadOp( loadOp_ ), storeOp( storeOp_ ), stencilLoadOp( stencilLoadOp_ ), stencilStoreOp( stencilStoreOp_ ), initialLayout( initialLayout_ ), finalLayout( finalLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentDescription( AttachmentDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentDescription( VkAttachmentDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentDescription & operator=( VkAttachmentDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentDescription & operator=( AttachmentDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentDescription ) ); + return *this; + } + + AttachmentDescription & setFlags( VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AttachmentDescription & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + AttachmentDescription & setSamples( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ ) VULKAN_HPP_NOEXCEPT + { + samples = samples_; + return *this; + } + + AttachmentDescription & setLoadOp( VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp_ ) VULKAN_HPP_NOEXCEPT + { + loadOp = loadOp_; + return *this; + } + + AttachmentDescription & setStoreOp( VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp_ ) VULKAN_HPP_NOEXCEPT + { + storeOp = storeOp_; + return *this; + } + + AttachmentDescription & setStencilLoadOp( VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp_ ) VULKAN_HPP_NOEXCEPT + { + stencilLoadOp = stencilLoadOp_; + return *this; + } + + AttachmentDescription & setStencilStoreOp( VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp_ ) VULKAN_HPP_NOEXCEPT + { + stencilStoreOp = stencilStoreOp_; + return *this; + } + + AttachmentDescription & setInitialLayout( VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ ) VULKAN_HPP_NOEXCEPT + { + initialLayout = initialLayout_; + return *this; + } + + AttachmentDescription & setFinalLayout( VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ ) VULKAN_HPP_NOEXCEPT + { + finalLayout = finalLayout_; + return *this; + } + + + operator VkAttachmentDescription const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentDescription &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentDescription const& ) const = default; +#else + bool operator==( AttachmentDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( flags == rhs.flags ) + && ( format == rhs.format ) + && ( samples == rhs.samples ) + && ( loadOp == rhs.loadOp ) + && ( storeOp == rhs.storeOp ) + && ( stencilLoadOp == rhs.stencilLoadOp ) + && ( stencilStoreOp == rhs.stencilStoreOp ) + && ( initialLayout == rhs.initialLayout ) + && ( finalLayout == rhs.finalLayout ); + } + + bool operator!=( AttachmentDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad; + VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore; + VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad; + VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore; + VULKAN_HPP_NAMESPACE::ImageLayout initialLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::ImageLayout finalLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( AttachmentDescription ) == sizeof( VkAttachmentDescription ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct AttachmentDescription2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAttachmentDescription2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentDescription2(VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp_ = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad, VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp_ = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad, VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp_ = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), format( format_ ), samples( samples_ ), loadOp( loadOp_ ), storeOp( storeOp_ ), stencilLoadOp( stencilLoadOp_ ), stencilStoreOp( stencilStoreOp_ ), initialLayout( initialLayout_ ), finalLayout( finalLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentDescription2( AttachmentDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentDescription2( VkAttachmentDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentDescription2 & operator=( VkAttachmentDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentDescription2 & operator=( AttachmentDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentDescription2 ) ); + return *this; + } + + AttachmentDescription2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AttachmentDescription2 & setFlags( VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + AttachmentDescription2 & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + AttachmentDescription2 & setSamples( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ ) VULKAN_HPP_NOEXCEPT + { + samples = samples_; + return *this; + } + + AttachmentDescription2 & setLoadOp( VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp_ ) VULKAN_HPP_NOEXCEPT + { + loadOp = loadOp_; + return *this; + } + + AttachmentDescription2 & setStoreOp( VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp_ ) VULKAN_HPP_NOEXCEPT + { + storeOp = storeOp_; + return *this; + } + + AttachmentDescription2 & setStencilLoadOp( VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp_ ) VULKAN_HPP_NOEXCEPT + { + stencilLoadOp = stencilLoadOp_; + return *this; + } + + AttachmentDescription2 & setStencilStoreOp( VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp_ ) VULKAN_HPP_NOEXCEPT + { + stencilStoreOp = stencilStoreOp_; + return *this; + } + + AttachmentDescription2 & setInitialLayout( VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ ) VULKAN_HPP_NOEXCEPT + { + initialLayout = initialLayout_; + return *this; + } + + AttachmentDescription2 & setFinalLayout( VULKAN_HPP_NAMESPACE::ImageLayout finalLayout_ ) VULKAN_HPP_NOEXCEPT + { + finalLayout = finalLayout_; + return *this; + } + + + operator VkAttachmentDescription2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentDescription2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentDescription2 const& ) const = default; +#else + bool operator==( AttachmentDescription2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( format == rhs.format ) + && ( samples == rhs.samples ) + && ( loadOp == rhs.loadOp ) + && ( storeOp == rhs.storeOp ) + && ( stencilLoadOp == rhs.stencilLoadOp ) + && ( stencilStoreOp == rhs.stencilStoreOp ) + && ( initialLayout == rhs.initialLayout ) + && ( finalLayout == rhs.finalLayout ); + } + + bool operator!=( AttachmentDescription2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAttachmentDescription2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AttachmentDescriptionFlags flags = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::AttachmentLoadOp loadOp = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad; + VULKAN_HPP_NAMESPACE::AttachmentStoreOp storeOp = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore; + VULKAN_HPP_NAMESPACE::AttachmentLoadOp stencilLoadOp = VULKAN_HPP_NAMESPACE::AttachmentLoadOp::eLoad; + VULKAN_HPP_NAMESPACE::AttachmentStoreOp stencilStoreOp = VULKAN_HPP_NAMESPACE::AttachmentStoreOp::eStore; + VULKAN_HPP_NAMESPACE::ImageLayout initialLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::ImageLayout finalLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( AttachmentDescription2 ) == sizeof( VkAttachmentDescription2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AttachmentDescription2; + }; + using AttachmentDescription2KHR = AttachmentDescription2; + + struct AttachmentDescriptionStencilLayout + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAttachmentDescriptionStencilLayout; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentDescriptionStencilLayout(VULKAN_HPP_NAMESPACE::ImageLayout stencilInitialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout stencilFinalLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : stencilInitialLayout( stencilInitialLayout_ ), stencilFinalLayout( stencilFinalLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentDescriptionStencilLayout( AttachmentDescriptionStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentDescriptionStencilLayout( VkAttachmentDescriptionStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentDescriptionStencilLayout & operator=( VkAttachmentDescriptionStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentDescriptionStencilLayout & operator=( AttachmentDescriptionStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentDescriptionStencilLayout ) ); + return *this; + } + + AttachmentDescriptionStencilLayout & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AttachmentDescriptionStencilLayout & setStencilInitialLayout( VULKAN_HPP_NAMESPACE::ImageLayout stencilInitialLayout_ ) VULKAN_HPP_NOEXCEPT + { + stencilInitialLayout = stencilInitialLayout_; + return *this; + } + + AttachmentDescriptionStencilLayout & setStencilFinalLayout( VULKAN_HPP_NAMESPACE::ImageLayout stencilFinalLayout_ ) VULKAN_HPP_NOEXCEPT + { + stencilFinalLayout = stencilFinalLayout_; + return *this; + } + + + operator VkAttachmentDescriptionStencilLayout const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentDescriptionStencilLayout &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentDescriptionStencilLayout const& ) const = default; +#else + bool operator==( AttachmentDescriptionStencilLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stencilInitialLayout == rhs.stencilInitialLayout ) + && ( stencilFinalLayout == rhs.stencilFinalLayout ); + } + + bool operator!=( AttachmentDescriptionStencilLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAttachmentDescriptionStencilLayout; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageLayout stencilInitialLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::ImageLayout stencilFinalLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( AttachmentDescriptionStencilLayout ) == sizeof( VkAttachmentDescriptionStencilLayout ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AttachmentDescriptionStencilLayout; + }; + using AttachmentDescriptionStencilLayoutKHR = AttachmentDescriptionStencilLayout; + + struct AttachmentReference + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentReference(uint32_t attachment_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout layout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : attachment( attachment_ ), layout( layout_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentReference( AttachmentReference const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentReference( VkAttachmentReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentReference & operator=( VkAttachmentReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentReference & operator=( AttachmentReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentReference ) ); + return *this; + } + + AttachmentReference & setAttachment( uint32_t attachment_ ) VULKAN_HPP_NOEXCEPT + { + attachment = attachment_; + return *this; + } + + AttachmentReference & setLayout( VULKAN_HPP_NAMESPACE::ImageLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + + operator VkAttachmentReference const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentReference &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentReference const& ) const = default; +#else + bool operator==( AttachmentReference const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( attachment == rhs.attachment ) + && ( layout == rhs.layout ); + } + + bool operator!=( AttachmentReference const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t attachment = {}; + VULKAN_HPP_NAMESPACE::ImageLayout layout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( AttachmentReference ) == sizeof( VkAttachmentReference ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct AttachmentReference2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAttachmentReference2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentReference2(uint32_t attachment_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout layout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}) VULKAN_HPP_NOEXCEPT + : attachment( attachment_ ), layout( layout_ ), aspectMask( aspectMask_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentReference2( AttachmentReference2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentReference2( VkAttachmentReference2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentReference2 & operator=( VkAttachmentReference2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentReference2 & operator=( AttachmentReference2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentReference2 ) ); + return *this; + } + + AttachmentReference2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AttachmentReference2 & setAttachment( uint32_t attachment_ ) VULKAN_HPP_NOEXCEPT + { + attachment = attachment_; + return *this; + } + + AttachmentReference2 & setLayout( VULKAN_HPP_NAMESPACE::ImageLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + AttachmentReference2 & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + + operator VkAttachmentReference2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentReference2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentReference2 const& ) const = default; +#else + bool operator==( AttachmentReference2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachment == rhs.attachment ) + && ( layout == rhs.layout ) + && ( aspectMask == rhs.aspectMask ); + } + + bool operator!=( AttachmentReference2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAttachmentReference2; + const void* pNext = {}; + uint32_t attachment = {}; + VULKAN_HPP_NAMESPACE::ImageLayout layout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + + }; + static_assert( sizeof( AttachmentReference2 ) == sizeof( VkAttachmentReference2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AttachmentReference2; + }; + using AttachmentReference2KHR = AttachmentReference2; + + struct AttachmentReferenceStencilLayout + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eAttachmentReferenceStencilLayout; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentReferenceStencilLayout(VULKAN_HPP_NAMESPACE::ImageLayout stencilLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : stencilLayout( stencilLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentReferenceStencilLayout( AttachmentReferenceStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentReferenceStencilLayout( VkAttachmentReferenceStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentReferenceStencilLayout & operator=( VkAttachmentReferenceStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentReferenceStencilLayout & operator=( AttachmentReferenceStencilLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentReferenceStencilLayout ) ); + return *this; + } + + AttachmentReferenceStencilLayout & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + AttachmentReferenceStencilLayout & setStencilLayout( VULKAN_HPP_NAMESPACE::ImageLayout stencilLayout_ ) VULKAN_HPP_NOEXCEPT + { + stencilLayout = stencilLayout_; + return *this; + } + + + operator VkAttachmentReferenceStencilLayout const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentReferenceStencilLayout &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentReferenceStencilLayout const& ) const = default; +#else + bool operator==( AttachmentReferenceStencilLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stencilLayout == rhs.stencilLayout ); + } + + bool operator!=( AttachmentReferenceStencilLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eAttachmentReferenceStencilLayout; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageLayout stencilLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( AttachmentReferenceStencilLayout ) == sizeof( VkAttachmentReferenceStencilLayout ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = AttachmentReferenceStencilLayout; + }; + using AttachmentReferenceStencilLayoutKHR = AttachmentReferenceStencilLayout; + + struct Extent2D + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Extent2D(uint32_t width_ = {}, uint32_t height_ = {}) VULKAN_HPP_NOEXCEPT + : width( width_ ), height( height_ ) + {} + + VULKAN_HPP_CONSTEXPR Extent2D( Extent2D const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Extent2D( VkExtent2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Extent2D & operator=( VkExtent2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Extent2D & operator=( Extent2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Extent2D ) ); + return *this; + } + + Extent2D & setWidth( uint32_t width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + Extent2D & setHeight( uint32_t height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + + operator VkExtent2D const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExtent2D &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Extent2D const& ) const = default; +#else + bool operator==( Extent2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( width == rhs.width ) + && ( height == rhs.height ); + } + + bool operator!=( Extent2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t width = {}; + uint32_t height = {}; + + }; + static_assert( sizeof( Extent2D ) == sizeof( VkExtent2D ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SampleLocationEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SampleLocationEXT(float x_ = {}, float y_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ) + {} + + VULKAN_HPP_CONSTEXPR SampleLocationEXT( SampleLocationEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SampleLocationEXT( VkSampleLocationEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SampleLocationEXT & operator=( VkSampleLocationEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SampleLocationEXT & operator=( SampleLocationEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SampleLocationEXT ) ); + return *this; + } + + SampleLocationEXT & setX( float x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + SampleLocationEXT & setY( float y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + + operator VkSampleLocationEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSampleLocationEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SampleLocationEXT const& ) const = default; +#else + bool operator==( SampleLocationEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( SampleLocationEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float x = {}; + float y = {}; + + }; + static_assert( sizeof( SampleLocationEXT ) == sizeof( VkSampleLocationEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SampleLocationsInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSampleLocationsInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SampleLocationsInfoEXT(VULKAN_HPP_NAMESPACE::SampleCountFlagBits sampleLocationsPerPixel_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::Extent2D sampleLocationGridSize_ = {}, uint32_t sampleLocationsCount_ = {}, const VULKAN_HPP_NAMESPACE::SampleLocationEXT* pSampleLocations_ = {}) VULKAN_HPP_NOEXCEPT + : sampleLocationsPerPixel( sampleLocationsPerPixel_ ), sampleLocationGridSize( sampleLocationGridSize_ ), sampleLocationsCount( sampleLocationsCount_ ), pSampleLocations( pSampleLocations_ ) + {} + + VULKAN_HPP_CONSTEXPR SampleLocationsInfoEXT( SampleLocationsInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SampleLocationsInfoEXT( VkSampleLocationsInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SampleLocationsInfoEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits sampleLocationsPerPixel_, VULKAN_HPP_NAMESPACE::Extent2D sampleLocationGridSize_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & sampleLocations_ ) + : sampleLocationsPerPixel( sampleLocationsPerPixel_ ), sampleLocationGridSize( sampleLocationGridSize_ ), sampleLocationsCount( static_cast( sampleLocations_.size() ) ), pSampleLocations( sampleLocations_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SampleLocationsInfoEXT & operator=( VkSampleLocationsInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SampleLocationsInfoEXT & operator=( SampleLocationsInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SampleLocationsInfoEXT ) ); + return *this; + } + + SampleLocationsInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SampleLocationsInfoEXT & setSampleLocationsPerPixel( VULKAN_HPP_NAMESPACE::SampleCountFlagBits sampleLocationsPerPixel_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsPerPixel = sampleLocationsPerPixel_; + return *this; + } + + SampleLocationsInfoEXT & setSampleLocationGridSize( VULKAN_HPP_NAMESPACE::Extent2D const & sampleLocationGridSize_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationGridSize = sampleLocationGridSize_; + return *this; + } + + SampleLocationsInfoEXT & setSampleLocationsCount( uint32_t sampleLocationsCount_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsCount = sampleLocationsCount_; + return *this; + } + + SampleLocationsInfoEXT & setPSampleLocations( const VULKAN_HPP_NAMESPACE::SampleLocationEXT* pSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + pSampleLocations = pSampleLocations_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SampleLocationsInfoEXT & setSampleLocations( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & sampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsCount = static_cast( sampleLocations_.size() ); + pSampleLocations = sampleLocations_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSampleLocationsInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSampleLocationsInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SampleLocationsInfoEXT const& ) const = default; +#else + bool operator==( SampleLocationsInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationsPerPixel == rhs.sampleLocationsPerPixel ) + && ( sampleLocationGridSize == rhs.sampleLocationGridSize ) + && ( sampleLocationsCount == rhs.sampleLocationsCount ) + && ( pSampleLocations == rhs.pSampleLocations ); + } + + bool operator!=( SampleLocationsInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSampleLocationsInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits sampleLocationsPerPixel = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::Extent2D sampleLocationGridSize = {}; + uint32_t sampleLocationsCount = {}; + const VULKAN_HPP_NAMESPACE::SampleLocationEXT* pSampleLocations = {}; + + }; + static_assert( sizeof( SampleLocationsInfoEXT ) == sizeof( VkSampleLocationsInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SampleLocationsInfoEXT; + }; + + struct AttachmentSampleLocationsEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AttachmentSampleLocationsEXT(uint32_t attachmentIndex_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {}) VULKAN_HPP_NOEXCEPT + : attachmentIndex( attachmentIndex_ ), sampleLocationsInfo( sampleLocationsInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR AttachmentSampleLocationsEXT( AttachmentSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + AttachmentSampleLocationsEXT( VkAttachmentSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + AttachmentSampleLocationsEXT & operator=( VkAttachmentSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + AttachmentSampleLocationsEXT & operator=( AttachmentSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( AttachmentSampleLocationsEXT ) ); + return *this; + } + + AttachmentSampleLocationsEXT & setAttachmentIndex( uint32_t attachmentIndex_ ) VULKAN_HPP_NOEXCEPT + { + attachmentIndex = attachmentIndex_; + return *this; + } + + AttachmentSampleLocationsEXT & setSampleLocationsInfo( VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT const & sampleLocationsInfo_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + + operator VkAttachmentSampleLocationsEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkAttachmentSampleLocationsEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( AttachmentSampleLocationsEXT const& ) const = default; +#else + bool operator==( AttachmentSampleLocationsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( attachmentIndex == rhs.attachmentIndex ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( AttachmentSampleLocationsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t attachmentIndex = {}; + VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo = {}; + + }; + static_assert( sizeof( AttachmentSampleLocationsEXT ) == sizeof( VkAttachmentSampleLocationsEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BaseInStructure + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + BaseInStructure(VULKAN_HPP_NAMESPACE::StructureType sType_ = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo) VULKAN_HPP_NOEXCEPT + : sType( sType_ ) + {} + + BaseInStructure( BaseInStructure const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BaseInStructure( VkBaseInStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BaseInStructure & operator=( VkBaseInStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BaseInStructure & operator=( BaseInStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BaseInStructure ) ); + return *this; + } + + BaseInStructure & setPNext( const struct VULKAN_HPP_NAMESPACE::BaseInStructure* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + + operator VkBaseInStructure const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBaseInStructure &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BaseInStructure const& ) const = default; +#else + bool operator==( BaseInStructure const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( BaseInStructure const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo; + const struct VULKAN_HPP_NAMESPACE::BaseInStructure* pNext = {}; + + }; + static_assert( sizeof( BaseInStructure ) == sizeof( VkBaseInStructure ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BaseOutStructure + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + BaseOutStructure(VULKAN_HPP_NAMESPACE::StructureType sType_ = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo) VULKAN_HPP_NOEXCEPT + : sType( sType_ ) + {} + + BaseOutStructure( BaseOutStructure const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BaseOutStructure( VkBaseOutStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BaseOutStructure & operator=( VkBaseOutStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BaseOutStructure & operator=( BaseOutStructure const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BaseOutStructure ) ); + return *this; + } + + BaseOutStructure & setPNext( struct VULKAN_HPP_NAMESPACE::BaseOutStructure* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + + operator VkBaseOutStructure const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBaseOutStructure &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BaseOutStructure const& ) const = default; +#else + bool operator==( BaseOutStructure const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( BaseOutStructure const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::StructureType sType = VULKAN_HPP_NAMESPACE::StructureType::eApplicationInfo; + struct VULKAN_HPP_NAMESPACE::BaseOutStructure* pNext = {}; + + }; + static_assert( sizeof( BaseOutStructure ) == sizeof( VkBaseOutStructure ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + class DeviceMemory + { + public: + using CType = VkDeviceMemory; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDeviceMemory; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDeviceMemory; + + public: + VULKAN_HPP_CONSTEXPR DeviceMemory() VULKAN_HPP_NOEXCEPT + : m_deviceMemory(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemory( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_deviceMemory(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DeviceMemory( VkDeviceMemory deviceMemory ) VULKAN_HPP_NOEXCEPT + : m_deviceMemory( deviceMemory ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DeviceMemory & operator=(VkDeviceMemory deviceMemory) VULKAN_HPP_NOEXCEPT + { + m_deviceMemory = deviceMemory; + return *this; + } +#endif + + DeviceMemory & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_deviceMemory = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceMemory const& ) const = default; +#else + bool operator==( DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory == rhs.m_deviceMemory; + } + + bool operator!=(DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory != rhs.m_deviceMemory; + } + + bool operator<(DeviceMemory const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory < rhs.m_deviceMemory; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeviceMemory() const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_deviceMemory == VK_NULL_HANDLE; + } + + private: + VkDeviceMemory m_deviceMemory; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DeviceMemory ) == sizeof( VkDeviceMemory ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DeviceMemory; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DeviceMemory; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DeviceMemory; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct BindAccelerationStructureMemoryInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindAccelerationStructureMemoryInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoNV(VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, uint32_t deviceIndexCount_ = {}, const uint32_t* pDeviceIndices_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructure( accelerationStructure_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), deviceIndexCount( deviceIndexCount_ ), pDeviceIndices( pDeviceIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR BindAccelerationStructureMemoryInfoNV( BindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindAccelerationStructureMemoryInfoNV( VkBindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindAccelerationStructureMemoryInfoNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_, VULKAN_HPP_NAMESPACE::DeviceMemory memory_, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_ ) + : accelerationStructure( accelerationStructure_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), deviceIndexCount( static_cast( deviceIndices_.size() ) ), pDeviceIndices( deviceIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindAccelerationStructureMemoryInfoNV & operator=( VkBindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindAccelerationStructureMemoryInfoNV & operator=( BindAccelerationStructureMemoryInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindAccelerationStructureMemoryInfoNV ) ); + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setAccelerationStructure( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructure = accelerationStructure_; + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + { + memoryOffset = memoryOffset_; + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setDeviceIndexCount( uint32_t deviceIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindAccelerationStructureMemoryInfoNV & setPDeviceIndices( const uint32_t* pDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindAccelerationStructureMemoryInfoNV & setDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = static_cast( deviceIndices_.size() ); + pDeviceIndices = deviceIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkBindAccelerationStructureMemoryInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindAccelerationStructureMemoryInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindAccelerationStructureMemoryInfoNV const& ) const = default; +#else + bool operator==( BindAccelerationStructureMemoryInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ); + } + + bool operator!=( BindAccelerationStructureMemoryInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindAccelerationStructureMemoryInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; + uint32_t deviceIndexCount = {}; + const uint32_t* pDeviceIndices = {}; + + }; + static_assert( sizeof( BindAccelerationStructureMemoryInfoNV ) == sizeof( VkBindAccelerationStructureMemoryInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindAccelerationStructureMemoryInfoNV; + }; + + struct BindBufferMemoryDeviceGroupInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindBufferMemoryDeviceGroupInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindBufferMemoryDeviceGroupInfo(uint32_t deviceIndexCount_ = {}, const uint32_t* pDeviceIndices_ = {}) VULKAN_HPP_NOEXCEPT + : deviceIndexCount( deviceIndexCount_ ), pDeviceIndices( pDeviceIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR BindBufferMemoryDeviceGroupInfo( BindBufferMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindBufferMemoryDeviceGroupInfo( VkBindBufferMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindBufferMemoryDeviceGroupInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_ ) + : deviceIndexCount( static_cast( deviceIndices_.size() ) ), pDeviceIndices( deviceIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindBufferMemoryDeviceGroupInfo & operator=( VkBindBufferMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindBufferMemoryDeviceGroupInfo & operator=( BindBufferMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindBufferMemoryDeviceGroupInfo ) ); + return *this; + } + + BindBufferMemoryDeviceGroupInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindBufferMemoryDeviceGroupInfo & setDeviceIndexCount( uint32_t deviceIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindBufferMemoryDeviceGroupInfo & setPDeviceIndices( const uint32_t* pDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindBufferMemoryDeviceGroupInfo & setDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = static_cast( deviceIndices_.size() ); + pDeviceIndices = deviceIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkBindBufferMemoryDeviceGroupInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindBufferMemoryDeviceGroupInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindBufferMemoryDeviceGroupInfo const& ) const = default; +#else + bool operator==( BindBufferMemoryDeviceGroupInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ); + } + + bool operator!=( BindBufferMemoryDeviceGroupInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindBufferMemoryDeviceGroupInfo; + const void* pNext = {}; + uint32_t deviceIndexCount = {}; + const uint32_t* pDeviceIndices = {}; + + }; + static_assert( sizeof( BindBufferMemoryDeviceGroupInfo ) == sizeof( VkBindBufferMemoryDeviceGroupInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindBufferMemoryDeviceGroupInfo; + }; + using BindBufferMemoryDeviceGroupInfoKHR = BindBufferMemoryDeviceGroupInfo; + + struct BindBufferMemoryInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindBufferMemoryInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindBufferMemoryInfo(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ), memory( memory_ ), memoryOffset( memoryOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR BindBufferMemoryInfo( BindBufferMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindBufferMemoryInfo( VkBindBufferMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindBufferMemoryInfo & operator=( VkBindBufferMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindBufferMemoryInfo & operator=( BindBufferMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindBufferMemoryInfo ) ); + return *this; + } + + BindBufferMemoryInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindBufferMemoryInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + BindBufferMemoryInfo & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + BindBufferMemoryInfo & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + { + memoryOffset = memoryOffset_; + return *this; + } + + + operator VkBindBufferMemoryInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindBufferMemoryInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindBufferMemoryInfo const& ) const = default; +#else + bool operator==( BindBufferMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ); + } + + bool operator!=( BindBufferMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindBufferMemoryInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; + + }; + static_assert( sizeof( BindBufferMemoryInfo ) == sizeof( VkBindBufferMemoryInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindBufferMemoryInfo; + }; + using BindBufferMemoryInfoKHR = BindBufferMemoryInfo; + + struct Offset2D + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Offset2D(int32_t x_ = {}, int32_t y_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ) + {} + + VULKAN_HPP_CONSTEXPR Offset2D( Offset2D const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Offset2D( VkOffset2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Offset2D & operator=( VkOffset2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Offset2D & operator=( Offset2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Offset2D ) ); + return *this; + } + + Offset2D & setX( int32_t x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + Offset2D & setY( int32_t y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + + operator VkOffset2D const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkOffset2D &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Offset2D const& ) const = default; +#else + bool operator==( Offset2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( Offset2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + int32_t x = {}; + int32_t y = {}; + + }; + static_assert( sizeof( Offset2D ) == sizeof( VkOffset2D ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct Rect2D + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Rect2D(VULKAN_HPP_NAMESPACE::Offset2D offset_ = {}, VULKAN_HPP_NAMESPACE::Extent2D extent_ = {}) VULKAN_HPP_NOEXCEPT + : offset( offset_ ), extent( extent_ ) + {} + + VULKAN_HPP_CONSTEXPR Rect2D( Rect2D const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Rect2D( VkRect2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Rect2D & operator=( VkRect2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Rect2D & operator=( Rect2D const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Rect2D ) ); + return *this; + } + + Rect2D & setOffset( VULKAN_HPP_NAMESPACE::Offset2D const & offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + Rect2D & setExtent( VULKAN_HPP_NAMESPACE::Extent2D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + + operator VkRect2D const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRect2D &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Rect2D const& ) const = default; +#else + bool operator==( Rect2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( offset == rhs.offset ) + && ( extent == rhs.extent ); + } + + bool operator!=( Rect2D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Offset2D offset = {}; + VULKAN_HPP_NAMESPACE::Extent2D extent = {}; + + }; + static_assert( sizeof( Rect2D ) == sizeof( VkRect2D ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BindImageMemoryDeviceGroupInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindImageMemoryDeviceGroupInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindImageMemoryDeviceGroupInfo(uint32_t deviceIndexCount_ = {}, const uint32_t* pDeviceIndices_ = {}, uint32_t splitInstanceBindRegionCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D* pSplitInstanceBindRegions_ = {}) VULKAN_HPP_NOEXCEPT + : deviceIndexCount( deviceIndexCount_ ), pDeviceIndices( pDeviceIndices_ ), splitInstanceBindRegionCount( splitInstanceBindRegionCount_ ), pSplitInstanceBindRegions( pSplitInstanceBindRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR BindImageMemoryDeviceGroupInfo( BindImageMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindImageMemoryDeviceGroupInfo( VkBindImageMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindImageMemoryDeviceGroupInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & splitInstanceBindRegions_ = {} ) + : deviceIndexCount( static_cast( deviceIndices_.size() ) ), pDeviceIndices( deviceIndices_.data() ), splitInstanceBindRegionCount( static_cast( splitInstanceBindRegions_.size() ) ), pSplitInstanceBindRegions( splitInstanceBindRegions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindImageMemoryDeviceGroupInfo & operator=( VkBindImageMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindImageMemoryDeviceGroupInfo & operator=( BindImageMemoryDeviceGroupInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindImageMemoryDeviceGroupInfo ) ); + return *this; + } + + BindImageMemoryDeviceGroupInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindImageMemoryDeviceGroupInfo & setDeviceIndexCount( uint32_t deviceIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = deviceIndexCount_; + return *this; + } + + BindImageMemoryDeviceGroupInfo & setPDeviceIndices( const uint32_t* pDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + pDeviceIndices = pDeviceIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindImageMemoryDeviceGroupInfo & setDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + deviceIndexCount = static_cast( deviceIndices_.size() ); + pDeviceIndices = deviceIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BindImageMemoryDeviceGroupInfo & setSplitInstanceBindRegionCount( uint32_t splitInstanceBindRegionCount_ ) VULKAN_HPP_NOEXCEPT + { + splitInstanceBindRegionCount = splitInstanceBindRegionCount_; + return *this; + } + + BindImageMemoryDeviceGroupInfo & setPSplitInstanceBindRegions( const VULKAN_HPP_NAMESPACE::Rect2D* pSplitInstanceBindRegions_ ) VULKAN_HPP_NOEXCEPT + { + pSplitInstanceBindRegions = pSplitInstanceBindRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindImageMemoryDeviceGroupInfo & setSplitInstanceBindRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & splitInstanceBindRegions_ ) VULKAN_HPP_NOEXCEPT + { + splitInstanceBindRegionCount = static_cast( splitInstanceBindRegions_.size() ); + pSplitInstanceBindRegions = splitInstanceBindRegions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkBindImageMemoryDeviceGroupInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindImageMemoryDeviceGroupInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindImageMemoryDeviceGroupInfo const& ) const = default; +#else + bool operator==( BindImageMemoryDeviceGroupInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceIndexCount == rhs.deviceIndexCount ) + && ( pDeviceIndices == rhs.pDeviceIndices ) + && ( splitInstanceBindRegionCount == rhs.splitInstanceBindRegionCount ) + && ( pSplitInstanceBindRegions == rhs.pSplitInstanceBindRegions ); + } + + bool operator!=( BindImageMemoryDeviceGroupInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindImageMemoryDeviceGroupInfo; + const void* pNext = {}; + uint32_t deviceIndexCount = {}; + const uint32_t* pDeviceIndices = {}; + uint32_t splitInstanceBindRegionCount = {}; + const VULKAN_HPP_NAMESPACE::Rect2D* pSplitInstanceBindRegions = {}; + + }; + static_assert( sizeof( BindImageMemoryDeviceGroupInfo ) == sizeof( VkBindImageMemoryDeviceGroupInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindImageMemoryDeviceGroupInfo; + }; + using BindImageMemoryDeviceGroupInfoKHR = BindImageMemoryDeviceGroupInfo; + + class Image + { + public: + using CType = VkImage; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eImage; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImage; + + public: + VULKAN_HPP_CONSTEXPR Image() VULKAN_HPP_NOEXCEPT + : m_image(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Image( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_image(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Image( VkImage image ) VULKAN_HPP_NOEXCEPT + : m_image( image ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Image & operator=(VkImage image) VULKAN_HPP_NOEXCEPT + { + m_image = image; + return *this; + } +#endif + + Image & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_image = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Image const& ) const = default; +#else + bool operator==( Image const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_image == rhs.m_image; + } + + bool operator!=(Image const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_image != rhs.m_image; + } + + bool operator<(Image const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_image < rhs.m_image; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImage() const VULKAN_HPP_NOEXCEPT + { + return m_image; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_image != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_image == VK_NULL_HANDLE; + } + + private: + VkImage m_image; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Image ) == sizeof( VkImage ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Image; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Image; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Image; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct BindImageMemoryInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindImageMemoryInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindImageMemoryInfo(VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ), memory( memory_ ), memoryOffset( memoryOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR BindImageMemoryInfo( BindImageMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindImageMemoryInfo( VkBindImageMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindImageMemoryInfo & operator=( VkBindImageMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindImageMemoryInfo & operator=( BindImageMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindImageMemoryInfo ) ); + return *this; + } + + BindImageMemoryInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindImageMemoryInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + BindImageMemoryInfo & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + BindImageMemoryInfo & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + { + memoryOffset = memoryOffset_; + return *this; + } + + + operator VkBindImageMemoryInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindImageMemoryInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindImageMemoryInfo const& ) const = default; +#else + bool operator==( BindImageMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ); + } + + bool operator!=( BindImageMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindImageMemoryInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; + + }; + static_assert( sizeof( BindImageMemoryInfo ) == sizeof( VkBindImageMemoryInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindImageMemoryInfo; + }; + using BindImageMemoryInfoKHR = BindImageMemoryInfo; + + struct BindImageMemorySwapchainInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindImageMemorySwapchainInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindImageMemorySwapchainInfoKHR(VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ = {}, uint32_t imageIndex_ = {}) VULKAN_HPP_NOEXCEPT + : swapchain( swapchain_ ), imageIndex( imageIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR BindImageMemorySwapchainInfoKHR( BindImageMemorySwapchainInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindImageMemorySwapchainInfoKHR( VkBindImageMemorySwapchainInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindImageMemorySwapchainInfoKHR & operator=( VkBindImageMemorySwapchainInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindImageMemorySwapchainInfoKHR & operator=( BindImageMemorySwapchainInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindImageMemorySwapchainInfoKHR ) ); + return *this; + } + + BindImageMemorySwapchainInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindImageMemorySwapchainInfoKHR & setSwapchain( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ ) VULKAN_HPP_NOEXCEPT + { + swapchain = swapchain_; + return *this; + } + + BindImageMemorySwapchainInfoKHR & setImageIndex( uint32_t imageIndex_ ) VULKAN_HPP_NOEXCEPT + { + imageIndex = imageIndex_; + return *this; + } + + + operator VkBindImageMemorySwapchainInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindImageMemorySwapchainInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindImageMemorySwapchainInfoKHR const& ) const = default; +#else + bool operator==( BindImageMemorySwapchainInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ) + && ( imageIndex == rhs.imageIndex ); + } + + bool operator!=( BindImageMemorySwapchainInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindImageMemorySwapchainInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain = {}; + uint32_t imageIndex = {}; + + }; + static_assert( sizeof( BindImageMemorySwapchainInfoKHR ) == sizeof( VkBindImageMemorySwapchainInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindImageMemorySwapchainInfoKHR; + }; + + struct BindImagePlaneMemoryInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindImagePlaneMemoryInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindImagePlaneMemoryInfo(VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor) VULKAN_HPP_NOEXCEPT + : planeAspect( planeAspect_ ) + {} + + VULKAN_HPP_CONSTEXPR BindImagePlaneMemoryInfo( BindImagePlaneMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindImagePlaneMemoryInfo( VkBindImagePlaneMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindImagePlaneMemoryInfo & operator=( VkBindImagePlaneMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindImagePlaneMemoryInfo & operator=( BindImagePlaneMemoryInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindImagePlaneMemoryInfo ) ); + return *this; + } + + BindImagePlaneMemoryInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindImagePlaneMemoryInfo & setPlaneAspect( VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ ) VULKAN_HPP_NOEXCEPT + { + planeAspect = planeAspect_; + return *this; + } + + + operator VkBindImagePlaneMemoryInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindImagePlaneMemoryInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindImagePlaneMemoryInfo const& ) const = default; +#else + bool operator==( BindImagePlaneMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( planeAspect == rhs.planeAspect ); + } + + bool operator!=( BindImagePlaneMemoryInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindImagePlaneMemoryInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor; + + }; + static_assert( sizeof( BindImagePlaneMemoryInfo ) == sizeof( VkBindImagePlaneMemoryInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindImagePlaneMemoryInfo; + }; + using BindImagePlaneMemoryInfoKHR = BindImagePlaneMemoryInfo; + + struct BindIndexBufferIndirectCommandNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindIndexBufferIndirectCommandNV(VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ = {}, uint32_t size_ = {}, VULKAN_HPP_NAMESPACE::IndexType indexType_ = VULKAN_HPP_NAMESPACE::IndexType::eUint16) VULKAN_HPP_NOEXCEPT + : bufferAddress( bufferAddress_ ), size( size_ ), indexType( indexType_ ) + {} + + VULKAN_HPP_CONSTEXPR BindIndexBufferIndirectCommandNV( BindIndexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindIndexBufferIndirectCommandNV( VkBindIndexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindIndexBufferIndirectCommandNV & operator=( VkBindIndexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindIndexBufferIndirectCommandNV & operator=( BindIndexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindIndexBufferIndirectCommandNV ) ); + return *this; + } + + BindIndexBufferIndirectCommandNV & setBufferAddress( VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ ) VULKAN_HPP_NOEXCEPT + { + bufferAddress = bufferAddress_; + return *this; + } + + BindIndexBufferIndirectCommandNV & setSize( uint32_t size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + BindIndexBufferIndirectCommandNV & setIndexType( VULKAN_HPP_NAMESPACE::IndexType indexType_ ) VULKAN_HPP_NOEXCEPT + { + indexType = indexType_; + return *this; + } + + + operator VkBindIndexBufferIndirectCommandNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindIndexBufferIndirectCommandNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindIndexBufferIndirectCommandNV const& ) const = default; +#else + bool operator==( BindIndexBufferIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( bufferAddress == rhs.bufferAddress ) + && ( size == rhs.size ) + && ( indexType == rhs.indexType ); + } + + bool operator!=( BindIndexBufferIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress = {}; + uint32_t size = {}; + VULKAN_HPP_NAMESPACE::IndexType indexType = VULKAN_HPP_NAMESPACE::IndexType::eUint16; + + }; + static_assert( sizeof( BindIndexBufferIndirectCommandNV ) == sizeof( VkBindIndexBufferIndirectCommandNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BindShaderGroupIndirectCommandNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindShaderGroupIndirectCommandNV(uint32_t groupIndex_ = {}) VULKAN_HPP_NOEXCEPT + : groupIndex( groupIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR BindShaderGroupIndirectCommandNV( BindShaderGroupIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindShaderGroupIndirectCommandNV( VkBindShaderGroupIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindShaderGroupIndirectCommandNV & operator=( VkBindShaderGroupIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindShaderGroupIndirectCommandNV & operator=( BindShaderGroupIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindShaderGroupIndirectCommandNV ) ); + return *this; + } + + BindShaderGroupIndirectCommandNV & setGroupIndex( uint32_t groupIndex_ ) VULKAN_HPP_NOEXCEPT + { + groupIndex = groupIndex_; + return *this; + } + + + operator VkBindShaderGroupIndirectCommandNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindShaderGroupIndirectCommandNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindShaderGroupIndirectCommandNV const& ) const = default; +#else + bool operator==( BindShaderGroupIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( groupIndex == rhs.groupIndex ); + } + + bool operator!=( BindShaderGroupIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t groupIndex = {}; + + }; + static_assert( sizeof( BindShaderGroupIndirectCommandNV ) == sizeof( VkBindShaderGroupIndirectCommandNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseMemoryBind + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseMemoryBind(VULKAN_HPP_NAMESPACE::DeviceSize resourceOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : resourceOffset( resourceOffset_ ), size( size_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseMemoryBind( SparseMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseMemoryBind( VkSparseMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseMemoryBind & operator=( VkSparseMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseMemoryBind & operator=( SparseMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseMemoryBind ) ); + return *this; + } + + SparseMemoryBind & setResourceOffset( VULKAN_HPP_NAMESPACE::DeviceSize resourceOffset_ ) VULKAN_HPP_NOEXCEPT + { + resourceOffset = resourceOffset_; + return *this; + } + + SparseMemoryBind & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + SparseMemoryBind & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + SparseMemoryBind & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + { + memoryOffset = memoryOffset_; + return *this; + } + + SparseMemoryBind & setFlags( VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkSparseMemoryBind const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseMemoryBind &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseMemoryBind const& ) const = default; +#else + bool operator==( SparseMemoryBind const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( resourceOffset == rhs.resourceOffset ) + && ( size == rhs.size ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseMemoryBind const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize resourceOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; + VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags = {}; + + }; + static_assert( sizeof( SparseMemoryBind ) == sizeof( VkSparseMemoryBind ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseBufferMemoryBindInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseBufferMemoryBindInfo(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ), bindCount( bindCount_ ), pBinds( pBinds_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseBufferMemoryBindInfo( SparseBufferMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseBufferMemoryBindInfo( VkSparseBufferMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseBufferMemoryBindInfo( VULKAN_HPP_NAMESPACE::Buffer buffer_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) + : buffer( buffer_ ), bindCount( static_cast( binds_.size() ) ), pBinds( binds_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseBufferMemoryBindInfo & operator=( VkSparseBufferMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseBufferMemoryBindInfo & operator=( SparseBufferMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseBufferMemoryBindInfo ) ); + return *this; + } + + SparseBufferMemoryBindInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + SparseBufferMemoryBindInfo & setBindCount( uint32_t bindCount_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = bindCount_; + return *this; + } + + SparseBufferMemoryBindInfo & setPBinds( const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds_ ) VULKAN_HPP_NOEXCEPT + { + pBinds = pBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseBufferMemoryBindInfo & setBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = static_cast( binds_.size() ); + pBinds = binds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSparseBufferMemoryBindInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseBufferMemoryBindInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseBufferMemoryBindInfo const& ) const = default; +#else + bool operator==( SparseBufferMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( buffer == rhs.buffer ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseBufferMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + uint32_t bindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds = {}; + + }; + static_assert( sizeof( SparseBufferMemoryBindInfo ) == sizeof( VkSparseBufferMemoryBindInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseImageOpaqueMemoryBindInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageOpaqueMemoryBindInfo(VULKAN_HPP_NAMESPACE::Image image_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ), bindCount( bindCount_ ), pBinds( pBinds_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageOpaqueMemoryBindInfo( SparseImageOpaqueMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageOpaqueMemoryBindInfo( VkSparseImageOpaqueMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseImageOpaqueMemoryBindInfo( VULKAN_HPP_NAMESPACE::Image image_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) + : image( image_ ), bindCount( static_cast( binds_.size() ) ), pBinds( binds_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageOpaqueMemoryBindInfo & operator=( VkSparseImageOpaqueMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageOpaqueMemoryBindInfo & operator=( SparseImageOpaqueMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageOpaqueMemoryBindInfo ) ); + return *this; + } + + SparseImageOpaqueMemoryBindInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + SparseImageOpaqueMemoryBindInfo & setBindCount( uint32_t bindCount_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = bindCount_; + return *this; + } + + SparseImageOpaqueMemoryBindInfo & setPBinds( const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds_ ) VULKAN_HPP_NOEXCEPT + { + pBinds = pBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseImageOpaqueMemoryBindInfo & setBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = static_cast( binds_.size() ); + pBinds = binds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSparseImageOpaqueMemoryBindInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageOpaqueMemoryBindInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageOpaqueMemoryBindInfo const& ) const = default; +#else + bool operator==( SparseImageOpaqueMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( image == rhs.image ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseImageOpaqueMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Image image = {}; + uint32_t bindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseMemoryBind* pBinds = {}; + + }; + static_assert( sizeof( SparseImageOpaqueMemoryBindInfo ) == sizeof( VkSparseImageOpaqueMemoryBindInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageSubresource + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageSubresource(VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t mipLevel_ = {}, uint32_t arrayLayer_ = {}) VULKAN_HPP_NOEXCEPT + : aspectMask( aspectMask_ ), mipLevel( mipLevel_ ), arrayLayer( arrayLayer_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageSubresource( ImageSubresource const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageSubresource( VkImageSubresource const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageSubresource & operator=( VkImageSubresource const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageSubresource & operator=( ImageSubresource const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageSubresource ) ); + return *this; + } + + ImageSubresource & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresource & setMipLevel( uint32_t mipLevel_ ) VULKAN_HPP_NOEXCEPT + { + mipLevel = mipLevel_; + return *this; + } + + ImageSubresource & setArrayLayer( uint32_t arrayLayer_ ) VULKAN_HPP_NOEXCEPT + { + arrayLayer = arrayLayer_; + return *this; + } + + + operator VkImageSubresource const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageSubresource &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageSubresource const& ) const = default; +#else + bool operator==( ImageSubresource const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( aspectMask == rhs.aspectMask ) + && ( mipLevel == rhs.mipLevel ) + && ( arrayLayer == rhs.arrayLayer ); + } + + bool operator!=( ImageSubresource const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + uint32_t mipLevel = {}; + uint32_t arrayLayer = {}; + + }; + static_assert( sizeof( ImageSubresource ) == sizeof( VkImageSubresource ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct Offset3D + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Offset3D(int32_t x_ = {}, int32_t y_ = {}, int32_t z_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ), z( z_ ) + {} + + VULKAN_HPP_CONSTEXPR Offset3D( Offset3D const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Offset3D( VkOffset3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + + explicit Offset3D( Offset2D const& offset2D, int32_t z_ = {} ) + : x( offset2D.x ) + , y( offset2D.y ) + , z( z_ ) + {} +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Offset3D & operator=( VkOffset3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Offset3D & operator=( Offset3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Offset3D ) ); + return *this; + } + + Offset3D & setX( int32_t x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + Offset3D & setY( int32_t y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + Offset3D & setZ( int32_t z_ ) VULKAN_HPP_NOEXCEPT + { + z = z_; + return *this; + } + + + operator VkOffset3D const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkOffset3D &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Offset3D const& ) const = default; +#else + bool operator==( Offset3D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ); + } + + bool operator!=( Offset3D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + int32_t x = {}; + int32_t y = {}; + int32_t z = {}; + + }; + static_assert( sizeof( Offset3D ) == sizeof( VkOffset3D ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct Extent3D + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Extent3D(uint32_t width_ = {}, uint32_t height_ = {}, uint32_t depth_ = {}) VULKAN_HPP_NOEXCEPT + : width( width_ ), height( height_ ), depth( depth_ ) + {} + + VULKAN_HPP_CONSTEXPR Extent3D( Extent3D const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Extent3D( VkExtent3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + + explicit Extent3D( Extent2D const& extent2D, uint32_t depth_ = {} ) + : width( extent2D.width ) + , height( extent2D.height ) + , depth( depth_ ) + {} +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Extent3D & operator=( VkExtent3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Extent3D & operator=( Extent3D const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Extent3D ) ); + return *this; + } + + Extent3D & setWidth( uint32_t width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + Extent3D & setHeight( uint32_t height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + Extent3D & setDepth( uint32_t depth_ ) VULKAN_HPP_NOEXCEPT + { + depth = depth_; + return *this; + } + + + operator VkExtent3D const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExtent3D &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Extent3D const& ) const = default; +#else + bool operator==( Extent3D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( width == rhs.width ) + && ( height == rhs.height ) + && ( depth == rhs.depth ); + } + + bool operator!=( Extent3D const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t width = {}; + uint32_t height = {}; + uint32_t depth = {}; + + }; + static_assert( sizeof( Extent3D ) == sizeof( VkExtent3D ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseImageMemoryBind + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageMemoryBind(VULKAN_HPP_NAMESPACE::ImageSubresource subresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D offset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ = {}, VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : subresource( subresource_ ), offset( offset_ ), extent( extent_ ), memory( memory_ ), memoryOffset( memoryOffset_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageMemoryBind( SparseImageMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageMemoryBind( VkSparseImageMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageMemoryBind & operator=( VkSparseImageMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageMemoryBind & operator=( SparseImageMemoryBind const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageMemoryBind ) ); + return *this; + } + + SparseImageMemoryBind & setSubresource( VULKAN_HPP_NAMESPACE::ImageSubresource const & subresource_ ) VULKAN_HPP_NOEXCEPT + { + subresource = subresource_; + return *this; + } + + SparseImageMemoryBind & setOffset( VULKAN_HPP_NAMESPACE::Offset3D const & offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + SparseImageMemoryBind & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + SparseImageMemoryBind & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + SparseImageMemoryBind & setMemoryOffset( VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset_ ) VULKAN_HPP_NOEXCEPT + { + memoryOffset = memoryOffset_; + return *this; + } + + SparseImageMemoryBind & setFlags( VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkSparseImageMemoryBind const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageMemoryBind &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageMemoryBind const& ) const = default; +#else + bool operator==( SparseImageMemoryBind const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( subresource == rhs.subresource ) + && ( offset == rhs.offset ) + && ( extent == rhs.extent ) + && ( memory == rhs.memory ) + && ( memoryOffset == rhs.memoryOffset ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseImageMemoryBind const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageSubresource subresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D offset = {}; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset = {}; + VULKAN_HPP_NAMESPACE::SparseMemoryBindFlags flags = {}; + + }; + static_assert( sizeof( SparseImageMemoryBind ) == sizeof( VkSparseImageMemoryBind ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseImageMemoryBindInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageMemoryBindInfo(VULKAN_HPP_NAMESPACE::Image image_ = {}, uint32_t bindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseImageMemoryBind* pBinds_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ), bindCount( bindCount_ ), pBinds( pBinds_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageMemoryBindInfo( SparseImageMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageMemoryBindInfo( VkSparseImageMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseImageMemoryBindInfo( VULKAN_HPP_NAMESPACE::Image image_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) + : image( image_ ), bindCount( static_cast( binds_.size() ) ), pBinds( binds_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageMemoryBindInfo & operator=( VkSparseImageMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageMemoryBindInfo & operator=( SparseImageMemoryBindInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageMemoryBindInfo ) ); + return *this; + } + + SparseImageMemoryBindInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + SparseImageMemoryBindInfo & setBindCount( uint32_t bindCount_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = bindCount_; + return *this; + } + + SparseImageMemoryBindInfo & setPBinds( const VULKAN_HPP_NAMESPACE::SparseImageMemoryBind* pBinds_ ) VULKAN_HPP_NOEXCEPT + { + pBinds = pBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SparseImageMemoryBindInfo & setBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & binds_ ) VULKAN_HPP_NOEXCEPT + { + bindCount = static_cast( binds_.size() ); + pBinds = binds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSparseImageMemoryBindInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageMemoryBindInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageMemoryBindInfo const& ) const = default; +#else + bool operator==( SparseImageMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( image == rhs.image ) + && ( bindCount == rhs.bindCount ) + && ( pBinds == rhs.pBinds ); + } + + bool operator!=( SparseImageMemoryBindInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Image image = {}; + uint32_t bindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseImageMemoryBind* pBinds = {}; + + }; + static_assert( sizeof( SparseImageMemoryBindInfo ) == sizeof( VkSparseImageMemoryBindInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BindSparseInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBindSparseInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindSparseInfo(uint32_t waitSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ = {}, uint32_t bufferBindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseBufferMemoryBindInfo* pBufferBinds_ = {}, uint32_t imageOpaqueBindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ = {}, uint32_t imageBindCount_ = {}, const VULKAN_HPP_NAMESPACE::SparseImageMemoryBindInfo* pImageBinds_ = {}, uint32_t signalSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreCount( waitSemaphoreCount_ ), pWaitSemaphores( pWaitSemaphores_ ), bufferBindCount( bufferBindCount_ ), pBufferBinds( pBufferBinds_ ), imageOpaqueBindCount( imageOpaqueBindCount_ ), pImageOpaqueBinds( pImageOpaqueBinds_ ), imageBindCount( imageBindCount_ ), pImageBinds( pImageBinds_ ), signalSemaphoreCount( signalSemaphoreCount_ ), pSignalSemaphores( pSignalSemaphores_ ) + {} + + VULKAN_HPP_CONSTEXPR BindSparseInfo( BindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindSparseInfo( VkBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bufferBinds_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageOpaqueBinds_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageBinds_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphores_ = {} ) + : waitSemaphoreCount( static_cast( waitSemaphores_.size() ) ), pWaitSemaphores( waitSemaphores_.data() ), bufferBindCount( static_cast( bufferBinds_.size() ) ), pBufferBinds( bufferBinds_.data() ), imageOpaqueBindCount( static_cast( imageOpaqueBinds_.size() ) ), pImageOpaqueBinds( imageOpaqueBinds_.data() ), imageBindCount( static_cast( imageBinds_.size() ) ), pImageBinds( imageBinds_.data() ), signalSemaphoreCount( static_cast( signalSemaphores_.size() ) ), pSignalSemaphores( signalSemaphores_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindSparseInfo & operator=( VkBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindSparseInfo & operator=( BindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindSparseInfo ) ); + return *this; + } + + BindSparseInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BindSparseInfo & setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + BindSparseInfo & setPWaitSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo & setWaitSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = static_cast( waitSemaphores_.size() ); + pWaitSemaphores = waitSemaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BindSparseInfo & setBufferBindCount( uint32_t bufferBindCount_ ) VULKAN_HPP_NOEXCEPT + { + bufferBindCount = bufferBindCount_; + return *this; + } + + BindSparseInfo & setPBufferBinds( const VULKAN_HPP_NAMESPACE::SparseBufferMemoryBindInfo* pBufferBinds_ ) VULKAN_HPP_NOEXCEPT + { + pBufferBinds = pBufferBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo & setBufferBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bufferBinds_ ) VULKAN_HPP_NOEXCEPT + { + bufferBindCount = static_cast( bufferBinds_.size() ); + pBufferBinds = bufferBinds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BindSparseInfo & setImageOpaqueBindCount( uint32_t imageOpaqueBindCount_ ) VULKAN_HPP_NOEXCEPT + { + imageOpaqueBindCount = imageOpaqueBindCount_; + return *this; + } + + BindSparseInfo & setPImageOpaqueBinds( const VULKAN_HPP_NAMESPACE::SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds_ ) VULKAN_HPP_NOEXCEPT + { + pImageOpaqueBinds = pImageOpaqueBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo & setImageOpaqueBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageOpaqueBinds_ ) VULKAN_HPP_NOEXCEPT + { + imageOpaqueBindCount = static_cast( imageOpaqueBinds_.size() ); + pImageOpaqueBinds = imageOpaqueBinds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BindSparseInfo & setImageBindCount( uint32_t imageBindCount_ ) VULKAN_HPP_NOEXCEPT + { + imageBindCount = imageBindCount_; + return *this; + } + + BindSparseInfo & setPImageBinds( const VULKAN_HPP_NAMESPACE::SparseImageMemoryBindInfo* pImageBinds_ ) VULKAN_HPP_NOEXCEPT + { + pImageBinds = pImageBinds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo & setImageBinds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageBinds_ ) VULKAN_HPP_NOEXCEPT + { + imageBindCount = static_cast( imageBinds_.size() ); + pImageBinds = imageBinds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BindSparseInfo & setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + BindSparseInfo & setPSignalSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pSignalSemaphores = pSignalSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BindSparseInfo & setSignalSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = static_cast( signalSemaphores_.size() ); + pSignalSemaphores = signalSemaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkBindSparseInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindSparseInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindSparseInfo const& ) const = default; +#else + bool operator==( BindSparseInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( bufferBindCount == rhs.bufferBindCount ) + && ( pBufferBinds == rhs.pBufferBinds ) + && ( imageOpaqueBindCount == rhs.imageOpaqueBindCount ) + && ( pImageOpaqueBinds == rhs.pImageOpaqueBinds ) + && ( imageBindCount == rhs.imageBindCount ) + && ( pImageBinds == rhs.pImageBinds ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphores == rhs.pSignalSemaphores ); + } + + bool operator!=( BindSparseInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBindSparseInfo; + const void* pNext = {}; + uint32_t waitSemaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores = {}; + uint32_t bufferBindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseBufferMemoryBindInfo* pBufferBinds = {}; + uint32_t imageOpaqueBindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds = {}; + uint32_t imageBindCount = {}; + const VULKAN_HPP_NAMESPACE::SparseImageMemoryBindInfo* pImageBinds = {}; + uint32_t signalSemaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores = {}; + + }; + static_assert( sizeof( BindSparseInfo ) == sizeof( VkBindSparseInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BindSparseInfo; + }; + + struct BindVertexBufferIndirectCommandNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BindVertexBufferIndirectCommandNV(VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ = {}, uint32_t size_ = {}, uint32_t stride_ = {}) VULKAN_HPP_NOEXCEPT + : bufferAddress( bufferAddress_ ), size( size_ ), stride( stride_ ) + {} + + VULKAN_HPP_CONSTEXPR BindVertexBufferIndirectCommandNV( BindVertexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BindVertexBufferIndirectCommandNV( VkBindVertexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BindVertexBufferIndirectCommandNV & operator=( VkBindVertexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BindVertexBufferIndirectCommandNV & operator=( BindVertexBufferIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BindVertexBufferIndirectCommandNV ) ); + return *this; + } + + BindVertexBufferIndirectCommandNV & setBufferAddress( VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress_ ) VULKAN_HPP_NOEXCEPT + { + bufferAddress = bufferAddress_; + return *this; + } + + BindVertexBufferIndirectCommandNV & setSize( uint32_t size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + BindVertexBufferIndirectCommandNV & setStride( uint32_t stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + + operator VkBindVertexBufferIndirectCommandNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBindVertexBufferIndirectCommandNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BindVertexBufferIndirectCommandNV const& ) const = default; +#else + bool operator==( BindVertexBufferIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( bufferAddress == rhs.bufferAddress ) + && ( size == rhs.size ) + && ( stride == rhs.stride ); + } + + bool operator!=( BindVertexBufferIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceAddress bufferAddress = {}; + uint32_t size = {}; + uint32_t stride = {}; + + }; + static_assert( sizeof( BindVertexBufferIndirectCommandNV ) == sizeof( VkBindVertexBufferIndirectCommandNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageSubresourceLayers + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageSubresourceLayers(VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t mipLevel_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {}) VULKAN_HPP_NOEXCEPT + : aspectMask( aspectMask_ ), mipLevel( mipLevel_ ), baseArrayLayer( baseArrayLayer_ ), layerCount( layerCount_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageSubresourceLayers( ImageSubresourceLayers const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageSubresourceLayers( VkImageSubresourceLayers const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageSubresourceLayers & operator=( VkImageSubresourceLayers const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageSubresourceLayers & operator=( ImageSubresourceLayers const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageSubresourceLayers ) ); + return *this; + } + + ImageSubresourceLayers & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresourceLayers & setMipLevel( uint32_t mipLevel_ ) VULKAN_HPP_NOEXCEPT + { + mipLevel = mipLevel_; + return *this; + } + + ImageSubresourceLayers & setBaseArrayLayer( uint32_t baseArrayLayer_ ) VULKAN_HPP_NOEXCEPT + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ImageSubresourceLayers & setLayerCount( uint32_t layerCount_ ) VULKAN_HPP_NOEXCEPT + { + layerCount = layerCount_; + return *this; + } + + + operator VkImageSubresourceLayers const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageSubresourceLayers &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageSubresourceLayers const& ) const = default; +#else + bool operator==( ImageSubresourceLayers const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( aspectMask == rhs.aspectMask ) + && ( mipLevel == rhs.mipLevel ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ImageSubresourceLayers const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + uint32_t mipLevel = {}; + uint32_t baseArrayLayer = {}; + uint32_t layerCount = {}; + + }; + static_assert( sizeof( ImageSubresourceLayers ) == sizeof( VkImageSubresourceLayers ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageBlit2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageBlit2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 ImageBlit2KHR(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, std::array const& srcOffsets_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, std::array const& dstOffsets_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffsets( srcOffsets_ ), dstSubresource( dstSubresource_ ), dstOffsets( dstOffsets_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 ImageBlit2KHR( ImageBlit2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageBlit2KHR( VkImageBlit2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageBlit2KHR & operator=( VkImageBlit2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageBlit2KHR & operator=( ImageBlit2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageBlit2KHR ) ); + return *this; + } + + ImageBlit2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageBlit2KHR & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageBlit2KHR & setSrcOffsets( std::array const & srcOffsets_ ) VULKAN_HPP_NOEXCEPT + { + srcOffsets = srcOffsets_; + return *this; + } + + ImageBlit2KHR & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageBlit2KHR & setDstOffsets( std::array const & dstOffsets_ ) VULKAN_HPP_NOEXCEPT + { + dstOffsets = dstOffsets_; + return *this; + } + + + operator VkImageBlit2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageBlit2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageBlit2KHR const& ) const = default; +#else + bool operator==( ImageBlit2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSubresource == rhs.srcSubresource ) + && ( srcOffsets == rhs.srcOffsets ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffsets == rhs.dstOffsets ); + } + + bool operator!=( ImageBlit2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageBlit2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D srcOffsets = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D dstOffsets = {}; + + }; + static_assert( sizeof( ImageBlit2KHR ) == sizeof( VkImageBlit2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageBlit2KHR; + }; + + struct BlitImageInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBlitImageInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 BlitImageInfo2KHR(VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageBlit2KHR* pRegions_ = {}, VULKAN_HPP_NAMESPACE::Filter filter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest) VULKAN_HPP_NOEXCEPT + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( regionCount_ ), pRegions( pRegions_ ), filter( filter_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 BlitImageInfo2KHR( BlitImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BlitImageInfo2KHR( VkBlitImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BlitImageInfo2KHR( VULKAN_HPP_NAMESPACE::Image srcImage_, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, VULKAN_HPP_NAMESPACE::Image dstImage_, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_, VULKAN_HPP_NAMESPACE::Filter filter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest ) + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ), filter( filter_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BlitImageInfo2KHR & operator=( VkBlitImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BlitImageInfo2KHR & operator=( BlitImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BlitImageInfo2KHR ) ); + return *this; + } + + BlitImageInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BlitImageInfo2KHR & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + { + srcImage = srcImage_; + return *this; + } + + BlitImageInfo2KHR & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + srcImageLayout = srcImageLayout_; + return *this; + } + + BlitImageInfo2KHR & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + { + dstImage = dstImage_; + return *this; + } + + BlitImageInfo2KHR & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + dstImageLayout = dstImageLayout_; + return *this; + } + + BlitImageInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + BlitImageInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::ImageBlit2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BlitImageInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + BlitImageInfo2KHR & setFilter( VULKAN_HPP_NAMESPACE::Filter filter_ ) VULKAN_HPP_NOEXCEPT + { + filter = filter_; + return *this; + } + + + operator VkBlitImageInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBlitImageInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BlitImageInfo2KHR const& ) const = default; +#else + bool operator==( BlitImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcImage == rhs.srcImage ) + && ( srcImageLayout == rhs.srcImageLayout ) + && ( dstImage == rhs.dstImage ) + && ( dstImageLayout == rhs.dstImageLayout ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ) + && ( filter == rhs.filter ); + } + + bool operator!=( BlitImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBlitImageInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::ImageBlit2KHR* pRegions = {}; + VULKAN_HPP_NAMESPACE::Filter filter = VULKAN_HPP_NAMESPACE::Filter::eNearest; + + }; + static_assert( sizeof( BlitImageInfo2KHR ) == sizeof( VkBlitImageInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BlitImageInfo2KHR; + }; + + struct BufferCopy + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferCopy(VULKAN_HPP_NAMESPACE::DeviceSize srcOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : srcOffset( srcOffset_ ), dstOffset( dstOffset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferCopy( BufferCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferCopy( VkBufferCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferCopy & operator=( VkBufferCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferCopy & operator=( BufferCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferCopy ) ); + return *this; + } + + BufferCopy & setSrcOffset( VULKAN_HPP_NAMESPACE::DeviceSize srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + BufferCopy & setDstOffset( VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + BufferCopy & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkBufferCopy const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferCopy &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferCopy const& ) const = default; +#else + bool operator==( BufferCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( srcOffset == rhs.srcOffset ) + && ( dstOffset == rhs.dstOffset ) + && ( size == rhs.size ); + } + + bool operator!=( BufferCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize srcOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( BufferCopy ) == sizeof( VkBufferCopy ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BufferCopy2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferCopy2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferCopy2KHR(VULKAN_HPP_NAMESPACE::DeviceSize srcOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : srcOffset( srcOffset_ ), dstOffset( dstOffset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferCopy2KHR( BufferCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferCopy2KHR( VkBufferCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferCopy2KHR & operator=( VkBufferCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferCopy2KHR & operator=( BufferCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferCopy2KHR ) ); + return *this; + } + + BufferCopy2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferCopy2KHR & setSrcOffset( VULKAN_HPP_NAMESPACE::DeviceSize srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + BufferCopy2KHR & setDstOffset( VULKAN_HPP_NAMESPACE::DeviceSize dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + BufferCopy2KHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkBufferCopy2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferCopy2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferCopy2KHR const& ) const = default; +#else + bool operator==( BufferCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcOffset == rhs.srcOffset ) + && ( dstOffset == rhs.dstOffset ) + && ( size == rhs.size ); + } + + bool operator!=( BufferCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferCopy2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize srcOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize dstOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( BufferCopy2KHR ) == sizeof( VkBufferCopy2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferCopy2KHR; + }; + + struct BufferCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferCreateInfo(VULKAN_HPP_NAMESPACE::BufferCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = {}, const uint32_t* pQueueFamilyIndices_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), size( size_ ), usage( usage_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( queueFamilyIndexCount_ ), pQueueFamilyIndices( pQueueFamilyIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferCreateInfo( BufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferCreateInfo( VkBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BufferCreateInfo( VULKAN_HPP_NAMESPACE::BufferCreateFlags flags_, VULKAN_HPP_NAMESPACE::DeviceSize size_, VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) + : flags( flags_ ), size( size_ ), usage( usage_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( static_cast( queueFamilyIndices_.size() ) ), pQueueFamilyIndices( queueFamilyIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferCreateInfo & operator=( VkBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferCreateInfo & operator=( BufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferCreateInfo ) ); + return *this; + } + + BufferCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::BufferCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + BufferCreateInfo & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + BufferCreateInfo & setUsage( VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + BufferCreateInfo & setSharingMode( VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ ) VULKAN_HPP_NOEXCEPT + { + sharingMode = sharingMode_; + return *this; + } + + BufferCreateInfo & setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + BufferCreateInfo & setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + BufferCreateInfo & setQueueFamilyIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = static_cast( queueFamilyIndices_.size() ); + pQueueFamilyIndices = queueFamilyIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkBufferCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferCreateInfo const& ) const = default; +#else + bool operator==( BufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( size == rhs.size ) + && ( usage == rhs.usage ) + && ( sharingMode == rhs.sharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ); + } + + bool operator!=( BufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::BufferCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::BufferUsageFlags usage = {}; + VULKAN_HPP_NAMESPACE::SharingMode sharingMode = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive; + uint32_t queueFamilyIndexCount = {}; + const uint32_t* pQueueFamilyIndices = {}; + + }; + static_assert( sizeof( BufferCreateInfo ) == sizeof( VkBufferCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferCreateInfo; + }; + + struct BufferDeviceAddressCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferDeviceAddressCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferDeviceAddressCreateInfoEXT(VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}) VULKAN_HPP_NOEXCEPT + : deviceAddress( deviceAddress_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferDeviceAddressCreateInfoEXT( BufferDeviceAddressCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferDeviceAddressCreateInfoEXT( VkBufferDeviceAddressCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferDeviceAddressCreateInfoEXT & operator=( VkBufferDeviceAddressCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferDeviceAddressCreateInfoEXT & operator=( BufferDeviceAddressCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferDeviceAddressCreateInfoEXT ) ); + return *this; + } + + BufferDeviceAddressCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferDeviceAddressCreateInfoEXT & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + deviceAddress = deviceAddress_; + return *this; + } + + + operator VkBufferDeviceAddressCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferDeviceAddressCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferDeviceAddressCreateInfoEXT const& ) const = default; +#else + bool operator==( BufferDeviceAddressCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceAddress == rhs.deviceAddress ); + } + + bool operator!=( BufferDeviceAddressCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferDeviceAddressCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; + + }; + static_assert( sizeof( BufferDeviceAddressCreateInfoEXT ) == sizeof( VkBufferDeviceAddressCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferDeviceAddressCreateInfoEXT; + }; + + struct BufferDeviceAddressInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferDeviceAddressInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferDeviceAddressInfo(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferDeviceAddressInfo( BufferDeviceAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferDeviceAddressInfo( VkBufferDeviceAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferDeviceAddressInfo & operator=( VkBufferDeviceAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferDeviceAddressInfo & operator=( BufferDeviceAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferDeviceAddressInfo ) ); + return *this; + } + + BufferDeviceAddressInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferDeviceAddressInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + + operator VkBufferDeviceAddressInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferDeviceAddressInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferDeviceAddressInfo const& ) const = default; +#else + bool operator==( BufferDeviceAddressInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( BufferDeviceAddressInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferDeviceAddressInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + + }; + static_assert( sizeof( BufferDeviceAddressInfo ) == sizeof( VkBufferDeviceAddressInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferDeviceAddressInfo; + }; + using BufferDeviceAddressInfoEXT = BufferDeviceAddressInfo; + using BufferDeviceAddressInfoKHR = BufferDeviceAddressInfo; + + struct BufferImageCopy + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferImageCopy(VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset_ = {}, uint32_t bufferRowLength_ = {}, uint32_t bufferImageHeight_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}) VULKAN_HPP_NOEXCEPT + : bufferOffset( bufferOffset_ ), bufferRowLength( bufferRowLength_ ), bufferImageHeight( bufferImageHeight_ ), imageSubresource( imageSubresource_ ), imageOffset( imageOffset_ ), imageExtent( imageExtent_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferImageCopy( BufferImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferImageCopy( VkBufferImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferImageCopy & operator=( VkBufferImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferImageCopy & operator=( BufferImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferImageCopy ) ); + return *this; + } + + BufferImageCopy & setBufferOffset( VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset_ ) VULKAN_HPP_NOEXCEPT + { + bufferOffset = bufferOffset_; + return *this; + } + + BufferImageCopy & setBufferRowLength( uint32_t bufferRowLength_ ) VULKAN_HPP_NOEXCEPT + { + bufferRowLength = bufferRowLength_; + return *this; + } + + BufferImageCopy & setBufferImageHeight( uint32_t bufferImageHeight_ ) VULKAN_HPP_NOEXCEPT + { + bufferImageHeight = bufferImageHeight_; + return *this; + } + + BufferImageCopy & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT + { + imageSubresource = imageSubresource_; + return *this; + } + + BufferImageCopy & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT + { + imageOffset = imageOffset_; + return *this; + } + + BufferImageCopy & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + { + imageExtent = imageExtent_; + return *this; + } + + + operator VkBufferImageCopy const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferImageCopy &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferImageCopy const& ) const = default; +#else + bool operator==( BufferImageCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( bufferOffset == rhs.bufferOffset ) + && ( bufferRowLength == rhs.bufferRowLength ) + && ( bufferImageHeight == rhs.bufferImageHeight ) + && ( imageSubresource == rhs.imageSubresource ) + && ( imageOffset == rhs.imageOffset ) + && ( imageExtent == rhs.imageExtent ); + } + + bool operator!=( BufferImageCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset = {}; + uint32_t bufferRowLength = {}; + uint32_t bufferImageHeight = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D imageOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D imageExtent = {}; + + }; + static_assert( sizeof( BufferImageCopy ) == sizeof( VkBufferImageCopy ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct BufferImageCopy2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferImageCopy2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferImageCopy2KHR(VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset_ = {}, uint32_t bufferRowLength_ = {}, uint32_t bufferImageHeight_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D imageOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageExtent_ = {}) VULKAN_HPP_NOEXCEPT + : bufferOffset( bufferOffset_ ), bufferRowLength( bufferRowLength_ ), bufferImageHeight( bufferImageHeight_ ), imageSubresource( imageSubresource_ ), imageOffset( imageOffset_ ), imageExtent( imageExtent_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferImageCopy2KHR( BufferImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferImageCopy2KHR( VkBufferImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferImageCopy2KHR & operator=( VkBufferImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferImageCopy2KHR & operator=( BufferImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferImageCopy2KHR ) ); + return *this; + } + + BufferImageCopy2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferImageCopy2KHR & setBufferOffset( VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset_ ) VULKAN_HPP_NOEXCEPT + { + bufferOffset = bufferOffset_; + return *this; + } + + BufferImageCopy2KHR & setBufferRowLength( uint32_t bufferRowLength_ ) VULKAN_HPP_NOEXCEPT + { + bufferRowLength = bufferRowLength_; + return *this; + } + + BufferImageCopy2KHR & setBufferImageHeight( uint32_t bufferImageHeight_ ) VULKAN_HPP_NOEXCEPT + { + bufferImageHeight = bufferImageHeight_; + return *this; + } + + BufferImageCopy2KHR & setImageSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & imageSubresource_ ) VULKAN_HPP_NOEXCEPT + { + imageSubresource = imageSubresource_; + return *this; + } + + BufferImageCopy2KHR & setImageOffset( VULKAN_HPP_NAMESPACE::Offset3D const & imageOffset_ ) VULKAN_HPP_NOEXCEPT + { + imageOffset = imageOffset_; + return *this; + } + + BufferImageCopy2KHR & setImageExtent( VULKAN_HPP_NAMESPACE::Extent3D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + { + imageExtent = imageExtent_; + return *this; + } + + + operator VkBufferImageCopy2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferImageCopy2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferImageCopy2KHR const& ) const = default; +#else + bool operator==( BufferImageCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( bufferOffset == rhs.bufferOffset ) + && ( bufferRowLength == rhs.bufferRowLength ) + && ( bufferImageHeight == rhs.bufferImageHeight ) + && ( imageSubresource == rhs.imageSubresource ) + && ( imageOffset == rhs.imageOffset ) + && ( imageExtent == rhs.imageExtent ); + } + + bool operator!=( BufferImageCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferImageCopy2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize bufferOffset = {}; + uint32_t bufferRowLength = {}; + uint32_t bufferImageHeight = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers imageSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D imageOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D imageExtent = {}; + + }; + static_assert( sizeof( BufferImageCopy2KHR ) == sizeof( VkBufferImageCopy2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferImageCopy2KHR; + }; + + struct BufferMemoryBarrier + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferMemoryBarrier; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferMemoryBarrier(VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, uint32_t srcQueueFamilyIndex_ = {}, uint32_t dstQueueFamilyIndex_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : srcAccessMask( srcAccessMask_ ), dstAccessMask( dstAccessMask_ ), srcQueueFamilyIndex( srcQueueFamilyIndex_ ), dstQueueFamilyIndex( dstQueueFamilyIndex_ ), buffer( buffer_ ), offset( offset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferMemoryBarrier( BufferMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferMemoryBarrier( VkBufferMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferMemoryBarrier & operator=( VkBufferMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferMemoryBarrier & operator=( BufferMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferMemoryBarrier ) ); + return *this; + } + + BufferMemoryBarrier & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferMemoryBarrier & setSrcAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + srcAccessMask = srcAccessMask_; + return *this; + } + + BufferMemoryBarrier & setDstAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + dstAccessMask = dstAccessMask_; + return *this; + } + + BufferMemoryBarrier & setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + srcQueueFamilyIndex = srcQueueFamilyIndex_; + return *this; + } + + BufferMemoryBarrier & setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + dstQueueFamilyIndex = dstQueueFamilyIndex_; + return *this; + } + + BufferMemoryBarrier & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + BufferMemoryBarrier & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + BufferMemoryBarrier & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkBufferMemoryBarrier const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferMemoryBarrier &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferMemoryBarrier const& ) const = default; +#else + bool operator==( BufferMemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex ) + && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( BufferMemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferMemoryBarrier; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask = {}; + uint32_t srcQueueFamilyIndex = {}; + uint32_t dstQueueFamilyIndex = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( BufferMemoryBarrier ) == sizeof( VkBufferMemoryBarrier ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferMemoryBarrier; + }; + + struct BufferMemoryRequirementsInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferMemoryRequirementsInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferMemoryRequirementsInfo2(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferMemoryRequirementsInfo2( BufferMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferMemoryRequirementsInfo2( VkBufferMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferMemoryRequirementsInfo2 & operator=( VkBufferMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferMemoryRequirementsInfo2 & operator=( BufferMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferMemoryRequirementsInfo2 ) ); + return *this; + } + + BufferMemoryRequirementsInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferMemoryRequirementsInfo2 & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + + operator VkBufferMemoryRequirementsInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferMemoryRequirementsInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferMemoryRequirementsInfo2 const& ) const = default; +#else + bool operator==( BufferMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( BufferMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferMemoryRequirementsInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + + }; + static_assert( sizeof( BufferMemoryRequirementsInfo2 ) == sizeof( VkBufferMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferMemoryRequirementsInfo2; + }; + using BufferMemoryRequirementsInfo2KHR = BufferMemoryRequirementsInfo2; + + struct BufferOpaqueCaptureAddressCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferOpaqueCaptureAddressCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferOpaqueCaptureAddressCreateInfo(uint64_t opaqueCaptureAddress_ = {}) VULKAN_HPP_NOEXCEPT + : opaqueCaptureAddress( opaqueCaptureAddress_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferOpaqueCaptureAddressCreateInfo( BufferOpaqueCaptureAddressCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferOpaqueCaptureAddressCreateInfo( VkBufferOpaqueCaptureAddressCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferOpaqueCaptureAddressCreateInfo & operator=( VkBufferOpaqueCaptureAddressCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferOpaqueCaptureAddressCreateInfo & operator=( BufferOpaqueCaptureAddressCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferOpaqueCaptureAddressCreateInfo ) ); + return *this; + } + + BufferOpaqueCaptureAddressCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferOpaqueCaptureAddressCreateInfo & setOpaqueCaptureAddress( uint64_t opaqueCaptureAddress_ ) VULKAN_HPP_NOEXCEPT + { + opaqueCaptureAddress = opaqueCaptureAddress_; + return *this; + } + + + operator VkBufferOpaqueCaptureAddressCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferOpaqueCaptureAddressCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferOpaqueCaptureAddressCreateInfo const& ) const = default; +#else + bool operator==( BufferOpaqueCaptureAddressCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( opaqueCaptureAddress == rhs.opaqueCaptureAddress ); + } + + bool operator!=( BufferOpaqueCaptureAddressCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferOpaqueCaptureAddressCreateInfo; + const void* pNext = {}; + uint64_t opaqueCaptureAddress = {}; + + }; + static_assert( sizeof( BufferOpaqueCaptureAddressCreateInfo ) == sizeof( VkBufferOpaqueCaptureAddressCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferOpaqueCaptureAddressCreateInfo; + }; + using BufferOpaqueCaptureAddressCreateInfoKHR = BufferOpaqueCaptureAddressCreateInfo; + + struct BufferViewCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eBufferViewCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR BufferViewCreateInfo(VULKAN_HPP_NAMESPACE::BufferViewCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize range_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), buffer( buffer_ ), format( format_ ), offset( offset_ ), range( range_ ) + {} + + VULKAN_HPP_CONSTEXPR BufferViewCreateInfo( BufferViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + BufferViewCreateInfo( VkBufferViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + BufferViewCreateInfo & operator=( VkBufferViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + BufferViewCreateInfo & operator=( BufferViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( BufferViewCreateInfo ) ); + return *this; + } + + BufferViewCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + BufferViewCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::BufferViewCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + BufferViewCreateInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + BufferViewCreateInfo & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + BufferViewCreateInfo & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + BufferViewCreateInfo & setRange( VULKAN_HPP_NAMESPACE::DeviceSize range_ ) VULKAN_HPP_NOEXCEPT + { + range = range_; + return *this; + } + + + operator VkBufferViewCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkBufferViewCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferViewCreateInfo const& ) const = default; +#else + bool operator==( BufferViewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( buffer == rhs.buffer ) + && ( format == rhs.format ) + && ( offset == rhs.offset ) + && ( range == rhs.range ); + } + + bool operator!=( BufferViewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eBufferViewCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::BufferViewCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize range = {}; + + }; + static_assert( sizeof( BufferViewCreateInfo ) == sizeof( VkBufferViewCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = BufferViewCreateInfo; + }; + + struct CalibratedTimestampInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCalibratedTimestampInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CalibratedTimestampInfoEXT(VULKAN_HPP_NAMESPACE::TimeDomainEXT timeDomain_ = VULKAN_HPP_NAMESPACE::TimeDomainEXT::eDevice) VULKAN_HPP_NOEXCEPT + : timeDomain( timeDomain_ ) + {} + + VULKAN_HPP_CONSTEXPR CalibratedTimestampInfoEXT( CalibratedTimestampInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CalibratedTimestampInfoEXT( VkCalibratedTimestampInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CalibratedTimestampInfoEXT & operator=( VkCalibratedTimestampInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CalibratedTimestampInfoEXT & operator=( CalibratedTimestampInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CalibratedTimestampInfoEXT ) ); + return *this; + } + + CalibratedTimestampInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CalibratedTimestampInfoEXT & setTimeDomain( VULKAN_HPP_NAMESPACE::TimeDomainEXT timeDomain_ ) VULKAN_HPP_NOEXCEPT + { + timeDomain = timeDomain_; + return *this; + } + + + operator VkCalibratedTimestampInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCalibratedTimestampInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CalibratedTimestampInfoEXT const& ) const = default; +#else + bool operator==( CalibratedTimestampInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( timeDomain == rhs.timeDomain ); + } + + bool operator!=( CalibratedTimestampInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCalibratedTimestampInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::TimeDomainEXT timeDomain = VULKAN_HPP_NAMESPACE::TimeDomainEXT::eDevice; + + }; + static_assert( sizeof( CalibratedTimestampInfoEXT ) == sizeof( VkCalibratedTimestampInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CalibratedTimestampInfoEXT; + }; + + struct CheckpointDataNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCheckpointDataNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CheckpointDataNV(VULKAN_HPP_NAMESPACE::PipelineStageFlagBits stage_ = VULKAN_HPP_NAMESPACE::PipelineStageFlagBits::eTopOfPipe, void* pCheckpointMarker_ = {}) VULKAN_HPP_NOEXCEPT + : stage( stage_ ), pCheckpointMarker( pCheckpointMarker_ ) + {} + + VULKAN_HPP_CONSTEXPR CheckpointDataNV( CheckpointDataNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CheckpointDataNV( VkCheckpointDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CheckpointDataNV & operator=( VkCheckpointDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CheckpointDataNV & operator=( CheckpointDataNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CheckpointDataNV ) ); + return *this; + } + + + operator VkCheckpointDataNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCheckpointDataNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CheckpointDataNV const& ) const = default; +#else + bool operator==( CheckpointDataNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stage == rhs.stage ) + && ( pCheckpointMarker == rhs.pCheckpointMarker ); + } + + bool operator!=( CheckpointDataNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCheckpointDataNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlagBits stage = VULKAN_HPP_NAMESPACE::PipelineStageFlagBits::eTopOfPipe; + void* pCheckpointMarker = {}; + + }; + static_assert( sizeof( CheckpointDataNV ) == sizeof( VkCheckpointDataNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CheckpointDataNV; + }; + + union ClearColorValue + { + ClearColorValue( VULKAN_HPP_NAMESPACE::ClearColorValue const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::ClearColorValue ) ); + } + + ClearColorValue( const std::array& float32_ = {} ) + : float32( float32_ ) + {} + + ClearColorValue( const std::array& int32_ ) + : int32( int32_ ) + {} + + ClearColorValue( const std::array& uint32_ ) + : uint32( uint32_ ) + {} + + ClearColorValue & setFloat32( std::array float32_ ) VULKAN_HPP_NOEXCEPT + { + float32 = float32_; + return *this; + } + + ClearColorValue & setInt32( std::array int32_ ) VULKAN_HPP_NOEXCEPT + { + int32 = int32_; + return *this; + } + + ClearColorValue & setUint32( std::array uint32_ ) VULKAN_HPP_NOEXCEPT + { + uint32 = uint32_; + return *this; + } + + VULKAN_HPP_NAMESPACE::ClearColorValue & operator=( VULKAN_HPP_NAMESPACE::ClearColorValue const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::ClearColorValue ) ); + return *this; + } + + operator VkClearColorValue const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearColorValue &() + { + return *reinterpret_cast(this); + } + + VULKAN_HPP_NAMESPACE::ArrayWrapper1D float32; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D int32; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D uint32; + }; + + struct ClearDepthStencilValue + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ClearDepthStencilValue(float depth_ = {}, uint32_t stencil_ = {}) VULKAN_HPP_NOEXCEPT + : depth( depth_ ), stencil( stencil_ ) + {} + + VULKAN_HPP_CONSTEXPR ClearDepthStencilValue( ClearDepthStencilValue const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ClearDepthStencilValue( VkClearDepthStencilValue const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ClearDepthStencilValue & operator=( VkClearDepthStencilValue const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ClearDepthStencilValue & operator=( ClearDepthStencilValue const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ClearDepthStencilValue ) ); + return *this; + } + + ClearDepthStencilValue & setDepth( float depth_ ) VULKAN_HPP_NOEXCEPT + { + depth = depth_; + return *this; + } + + ClearDepthStencilValue & setStencil( uint32_t stencil_ ) VULKAN_HPP_NOEXCEPT + { + stencil = stencil_; + return *this; + } + + + operator VkClearDepthStencilValue const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkClearDepthStencilValue &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ClearDepthStencilValue const& ) const = default; +#else + bool operator==( ClearDepthStencilValue const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( depth == rhs.depth ) + && ( stencil == rhs.stencil ); + } + + bool operator!=( ClearDepthStencilValue const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float depth = {}; + uint32_t stencil = {}; + + }; + static_assert( sizeof( ClearDepthStencilValue ) == sizeof( VkClearDepthStencilValue ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + union ClearValue + { + ClearValue( VULKAN_HPP_NAMESPACE::ClearValue const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::ClearValue ) ); + } + + ClearValue( VULKAN_HPP_NAMESPACE::ClearColorValue color_ = {} ) + : color( color_ ) + {} + + ClearValue( VULKAN_HPP_NAMESPACE::ClearDepthStencilValue depthStencil_ ) + : depthStencil( depthStencil_ ) + {} + + ClearValue & setColor( VULKAN_HPP_NAMESPACE::ClearColorValue const & color_ ) VULKAN_HPP_NOEXCEPT + { + color = color_; + return *this; + } + + ClearValue & setDepthStencil( VULKAN_HPP_NAMESPACE::ClearDepthStencilValue const & depthStencil_ ) VULKAN_HPP_NOEXCEPT + { + depthStencil = depthStencil_; + return *this; + } + + VULKAN_HPP_NAMESPACE::ClearValue & operator=( VULKAN_HPP_NAMESPACE::ClearValue const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::ClearValue ) ); + return *this; + } + + operator VkClearValue const&() const + { + return *reinterpret_cast(this); + } + + operator VkClearValue &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + VULKAN_HPP_NAMESPACE::ClearColorValue color; + VULKAN_HPP_NAMESPACE::ClearDepthStencilValue depthStencil; +#else + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct ClearAttachment + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + ClearAttachment(VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t colorAttachment_ = {}, VULKAN_HPP_NAMESPACE::ClearValue clearValue_ = {}) VULKAN_HPP_NOEXCEPT + : aspectMask( aspectMask_ ), colorAttachment( colorAttachment_ ), clearValue( clearValue_ ) + {} + + ClearAttachment( ClearAttachment const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ClearAttachment( VkClearAttachment const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ClearAttachment & operator=( VkClearAttachment const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ClearAttachment & operator=( ClearAttachment const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ClearAttachment ) ); + return *this; + } + + ClearAttachment & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + ClearAttachment & setColorAttachment( uint32_t colorAttachment_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachment = colorAttachment_; + return *this; + } + + ClearAttachment & setClearValue( VULKAN_HPP_NAMESPACE::ClearValue const & clearValue_ ) VULKAN_HPP_NOEXCEPT + { + clearValue = clearValue_; + return *this; + } + + + operator VkClearAttachment const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkClearAttachment &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + uint32_t colorAttachment = {}; + VULKAN_HPP_NAMESPACE::ClearValue clearValue = {}; + + }; + static_assert( sizeof( ClearAttachment ) == sizeof( VkClearAttachment ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ClearRect + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ClearRect(VULKAN_HPP_NAMESPACE::Rect2D rect_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {}) VULKAN_HPP_NOEXCEPT + : rect( rect_ ), baseArrayLayer( baseArrayLayer_ ), layerCount( layerCount_ ) + {} + + VULKAN_HPP_CONSTEXPR ClearRect( ClearRect const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ClearRect( VkClearRect const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ClearRect & operator=( VkClearRect const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ClearRect & operator=( ClearRect const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ClearRect ) ); + return *this; + } + + ClearRect & setRect( VULKAN_HPP_NAMESPACE::Rect2D const & rect_ ) VULKAN_HPP_NOEXCEPT + { + rect = rect_; + return *this; + } + + ClearRect & setBaseArrayLayer( uint32_t baseArrayLayer_ ) VULKAN_HPP_NOEXCEPT + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ClearRect & setLayerCount( uint32_t layerCount_ ) VULKAN_HPP_NOEXCEPT + { + layerCount = layerCount_; + return *this; + } + + + operator VkClearRect const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkClearRect &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ClearRect const& ) const = default; +#else + bool operator==( ClearRect const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( rect == rhs.rect ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ClearRect const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Rect2D rect = {}; + uint32_t baseArrayLayer = {}; + uint32_t layerCount = {}; + + }; + static_assert( sizeof( ClearRect ) == sizeof( VkClearRect ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct CoarseSampleLocationNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CoarseSampleLocationNV(uint32_t pixelX_ = {}, uint32_t pixelY_ = {}, uint32_t sample_ = {}) VULKAN_HPP_NOEXCEPT + : pixelX( pixelX_ ), pixelY( pixelY_ ), sample( sample_ ) + {} + + VULKAN_HPP_CONSTEXPR CoarseSampleLocationNV( CoarseSampleLocationNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CoarseSampleLocationNV( VkCoarseSampleLocationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CoarseSampleLocationNV & operator=( VkCoarseSampleLocationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CoarseSampleLocationNV & operator=( CoarseSampleLocationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CoarseSampleLocationNV ) ); + return *this; + } + + CoarseSampleLocationNV & setPixelX( uint32_t pixelX_ ) VULKAN_HPP_NOEXCEPT + { + pixelX = pixelX_; + return *this; + } + + CoarseSampleLocationNV & setPixelY( uint32_t pixelY_ ) VULKAN_HPP_NOEXCEPT + { + pixelY = pixelY_; + return *this; + } + + CoarseSampleLocationNV & setSample( uint32_t sample_ ) VULKAN_HPP_NOEXCEPT + { + sample = sample_; + return *this; + } + + + operator VkCoarseSampleLocationNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCoarseSampleLocationNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CoarseSampleLocationNV const& ) const = default; +#else + bool operator==( CoarseSampleLocationNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( pixelX == rhs.pixelX ) + && ( pixelY == rhs.pixelY ) + && ( sample == rhs.sample ); + } + + bool operator!=( CoarseSampleLocationNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t pixelX = {}; + uint32_t pixelY = {}; + uint32_t sample = {}; + + }; + static_assert( sizeof( CoarseSampleLocationNV ) == sizeof( VkCoarseSampleLocationNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct CoarseSampleOrderCustomNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CoarseSampleOrderCustomNV(VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV shadingRate_ = VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV::eNoInvocations, uint32_t sampleCount_ = {}, uint32_t sampleLocationCount_ = {}, const VULKAN_HPP_NAMESPACE::CoarseSampleLocationNV* pSampleLocations_ = {}) VULKAN_HPP_NOEXCEPT + : shadingRate( shadingRate_ ), sampleCount( sampleCount_ ), sampleLocationCount( sampleLocationCount_ ), pSampleLocations( pSampleLocations_ ) + {} + + VULKAN_HPP_CONSTEXPR CoarseSampleOrderCustomNV( CoarseSampleOrderCustomNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CoarseSampleOrderCustomNV( VkCoarseSampleOrderCustomNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CoarseSampleOrderCustomNV( VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV shadingRate_, uint32_t sampleCount_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & sampleLocations_ ) + : shadingRate( shadingRate_ ), sampleCount( sampleCount_ ), sampleLocationCount( static_cast( sampleLocations_.size() ) ), pSampleLocations( sampleLocations_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CoarseSampleOrderCustomNV & operator=( VkCoarseSampleOrderCustomNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CoarseSampleOrderCustomNV & operator=( CoarseSampleOrderCustomNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CoarseSampleOrderCustomNV ) ); + return *this; + } + + CoarseSampleOrderCustomNV & setShadingRate( VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV shadingRate_ ) VULKAN_HPP_NOEXCEPT + { + shadingRate = shadingRate_; + return *this; + } + + CoarseSampleOrderCustomNV & setSampleCount( uint32_t sampleCount_ ) VULKAN_HPP_NOEXCEPT + { + sampleCount = sampleCount_; + return *this; + } + + CoarseSampleOrderCustomNV & setSampleLocationCount( uint32_t sampleLocationCount_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationCount = sampleLocationCount_; + return *this; + } + + CoarseSampleOrderCustomNV & setPSampleLocations( const VULKAN_HPP_NAMESPACE::CoarseSampleLocationNV* pSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + pSampleLocations = pSampleLocations_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CoarseSampleOrderCustomNV & setSampleLocations( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & sampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationCount = static_cast( sampleLocations_.size() ); + pSampleLocations = sampleLocations_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkCoarseSampleOrderCustomNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCoarseSampleOrderCustomNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CoarseSampleOrderCustomNV const& ) const = default; +#else + bool operator==( CoarseSampleOrderCustomNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( shadingRate == rhs.shadingRate ) + && ( sampleCount == rhs.sampleCount ) + && ( sampleLocationCount == rhs.sampleLocationCount ) + && ( pSampleLocations == rhs.pSampleLocations ); + } + + bool operator!=( CoarseSampleOrderCustomNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV shadingRate = VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV::eNoInvocations; + uint32_t sampleCount = {}; + uint32_t sampleLocationCount = {}; + const VULKAN_HPP_NAMESPACE::CoarseSampleLocationNV* pSampleLocations = {}; + + }; + static_assert( sizeof( CoarseSampleOrderCustomNV ) == sizeof( VkCoarseSampleOrderCustomNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + class CommandPool + { + public: + using CType = VkCommandPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eCommandPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandPool; + + public: + VULKAN_HPP_CONSTEXPR CommandPool() VULKAN_HPP_NOEXCEPT + : m_commandPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR CommandPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_commandPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT CommandPool( VkCommandPool commandPool ) VULKAN_HPP_NOEXCEPT + : m_commandPool( commandPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + CommandPool & operator=(VkCommandPool commandPool) VULKAN_HPP_NOEXCEPT + { + m_commandPool = commandPool; + return *this; + } +#endif + + CommandPool & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_commandPool = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandPool const& ) const = default; +#else + bool operator==( CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandPool == rhs.m_commandPool; + } + + bool operator!=(CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandPool != rhs.m_commandPool; + } + + bool operator<(CommandPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandPool < rhs.m_commandPool; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandPool() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_commandPool == VK_NULL_HANDLE; + } + + private: + VkCommandPool m_commandPool; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::CommandPool ) == sizeof( VkCommandPool ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::CommandPool; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::CommandPool; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::CommandPool; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct CommandBufferAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandBufferAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandBufferAllocateInfo(VULKAN_HPP_NAMESPACE::CommandPool commandPool_ = {}, VULKAN_HPP_NAMESPACE::CommandBufferLevel level_ = VULKAN_HPP_NAMESPACE::CommandBufferLevel::ePrimary, uint32_t commandBufferCount_ = {}) VULKAN_HPP_NOEXCEPT + : commandPool( commandPool_ ), level( level_ ), commandBufferCount( commandBufferCount_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandBufferAllocateInfo( CommandBufferAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandBufferAllocateInfo( VkCommandBufferAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandBufferAllocateInfo & operator=( VkCommandBufferAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandBufferAllocateInfo & operator=( CommandBufferAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandBufferAllocateInfo ) ); + return *this; + } + + CommandBufferAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandBufferAllocateInfo & setCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool_ ) VULKAN_HPP_NOEXCEPT + { + commandPool = commandPool_; + return *this; + } + + CommandBufferAllocateInfo & setLevel( VULKAN_HPP_NAMESPACE::CommandBufferLevel level_ ) VULKAN_HPP_NOEXCEPT + { + level = level_; + return *this; + } + + CommandBufferAllocateInfo & setCommandBufferCount( uint32_t commandBufferCount_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferCount = commandBufferCount_; + return *this; + } + + + operator VkCommandBufferAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandBufferAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBufferAllocateInfo const& ) const = default; +#else + bool operator==( CommandBufferAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( commandPool == rhs.commandPool ) + && ( level == rhs.level ) + && ( commandBufferCount == rhs.commandBufferCount ); + } + + bool operator!=( CommandBufferAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandBufferAllocateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::CommandPool commandPool = {}; + VULKAN_HPP_NAMESPACE::CommandBufferLevel level = VULKAN_HPP_NAMESPACE::CommandBufferLevel::ePrimary; + uint32_t commandBufferCount = {}; + + }; + static_assert( sizeof( CommandBufferAllocateInfo ) == sizeof( VkCommandBufferAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandBufferAllocateInfo; + }; + + class RenderPass + { + public: + using CType = VkRenderPass; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eRenderPass; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eRenderPass; + + public: + VULKAN_HPP_CONSTEXPR RenderPass() VULKAN_HPP_NOEXCEPT + : m_renderPass(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR RenderPass( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_renderPass(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT RenderPass( VkRenderPass renderPass ) VULKAN_HPP_NOEXCEPT + : m_renderPass( renderPass ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + RenderPass & operator=(VkRenderPass renderPass) VULKAN_HPP_NOEXCEPT + { + m_renderPass = renderPass; + return *this; + } +#endif + + RenderPass & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_renderPass = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPass const& ) const = default; +#else + bool operator==( RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_renderPass == rhs.m_renderPass; + } + + bool operator!=(RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_renderPass != rhs.m_renderPass; + } + + bool operator<(RenderPass const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_renderPass < rhs.m_renderPass; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkRenderPass() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_renderPass == VK_NULL_HANDLE; + } + + private: + VkRenderPass m_renderPass; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::RenderPass ) == sizeof( VkRenderPass ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::RenderPass; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::RenderPass; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::RenderPass; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class Framebuffer + { + public: + using CType = VkFramebuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eFramebuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eFramebuffer; + + public: + VULKAN_HPP_CONSTEXPR Framebuffer() VULKAN_HPP_NOEXCEPT + : m_framebuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Framebuffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_framebuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Framebuffer( VkFramebuffer framebuffer ) VULKAN_HPP_NOEXCEPT + : m_framebuffer( framebuffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Framebuffer & operator=(VkFramebuffer framebuffer) VULKAN_HPP_NOEXCEPT + { + m_framebuffer = framebuffer; + return *this; + } +#endif + + Framebuffer & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_framebuffer = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Framebuffer const& ) const = default; +#else + bool operator==( Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer == rhs.m_framebuffer; + } + + bool operator!=(Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer != rhs.m_framebuffer; + } + + bool operator<(Framebuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer < rhs.m_framebuffer; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkFramebuffer() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_framebuffer == VK_NULL_HANDLE; + } + + private: + VkFramebuffer m_framebuffer; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Framebuffer ) == sizeof( VkFramebuffer ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Framebuffer; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Framebuffer; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Framebuffer; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct CommandBufferInheritanceInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandBufferInheritanceInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceInfo(VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, uint32_t subpass_ = {}, VULKAN_HPP_NAMESPACE::Framebuffer framebuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryEnable_ = {}, VULKAN_HPP_NAMESPACE::QueryControlFlags queryFlags_ = {}, VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ = {}) VULKAN_HPP_NOEXCEPT + : renderPass( renderPass_ ), subpass( subpass_ ), framebuffer( framebuffer_ ), occlusionQueryEnable( occlusionQueryEnable_ ), queryFlags( queryFlags_ ), pipelineStatistics( pipelineStatistics_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceInfo( CommandBufferInheritanceInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandBufferInheritanceInfo( VkCommandBufferInheritanceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandBufferInheritanceInfo & operator=( VkCommandBufferInheritanceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandBufferInheritanceInfo & operator=( CommandBufferInheritanceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandBufferInheritanceInfo ) ); + return *this; + } + + CommandBufferInheritanceInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandBufferInheritanceInfo & setRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass_ ) VULKAN_HPP_NOEXCEPT + { + renderPass = renderPass_; + return *this; + } + + CommandBufferInheritanceInfo & setSubpass( uint32_t subpass_ ) VULKAN_HPP_NOEXCEPT + { + subpass = subpass_; + return *this; + } + + CommandBufferInheritanceInfo & setFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer_ ) VULKAN_HPP_NOEXCEPT + { + framebuffer = framebuffer_; + return *this; + } + + CommandBufferInheritanceInfo & setOcclusionQueryEnable( VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryEnable_ ) VULKAN_HPP_NOEXCEPT + { + occlusionQueryEnable = occlusionQueryEnable_; + return *this; + } + + CommandBufferInheritanceInfo & setQueryFlags( VULKAN_HPP_NAMESPACE::QueryControlFlags queryFlags_ ) VULKAN_HPP_NOEXCEPT + { + queryFlags = queryFlags_; + return *this; + } + + CommandBufferInheritanceInfo & setPipelineStatistics( VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ ) VULKAN_HPP_NOEXCEPT + { + pipelineStatistics = pipelineStatistics_; + return *this; + } + + + operator VkCommandBufferInheritanceInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandBufferInheritanceInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBufferInheritanceInfo const& ) const = default; +#else + bool operator==( CommandBufferInheritanceInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( renderPass == rhs.renderPass ) + && ( subpass == rhs.subpass ) + && ( framebuffer == rhs.framebuffer ) + && ( occlusionQueryEnable == rhs.occlusionQueryEnable ) + && ( queryFlags == rhs.queryFlags ) + && ( pipelineStatistics == rhs.pipelineStatistics ); + } + + bool operator!=( CommandBufferInheritanceInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandBufferInheritanceInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RenderPass renderPass = {}; + uint32_t subpass = {}; + VULKAN_HPP_NAMESPACE::Framebuffer framebuffer = {}; + VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryEnable = {}; + VULKAN_HPP_NAMESPACE::QueryControlFlags queryFlags = {}; + VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics = {}; + + }; + static_assert( sizeof( CommandBufferInheritanceInfo ) == sizeof( VkCommandBufferInheritanceInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandBufferInheritanceInfo; + }; + + struct CommandBufferBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandBufferBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandBufferBeginInfo(VULKAN_HPP_NAMESPACE::CommandBufferUsageFlags flags_ = {}, const VULKAN_HPP_NAMESPACE::CommandBufferInheritanceInfo* pInheritanceInfo_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pInheritanceInfo( pInheritanceInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandBufferBeginInfo( CommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandBufferBeginInfo( VkCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandBufferBeginInfo & operator=( VkCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandBufferBeginInfo & operator=( CommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandBufferBeginInfo ) ); + return *this; + } + + CommandBufferBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandBufferBeginInfo & setFlags( VULKAN_HPP_NAMESPACE::CommandBufferUsageFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + CommandBufferBeginInfo & setPInheritanceInfo( const VULKAN_HPP_NAMESPACE::CommandBufferInheritanceInfo* pInheritanceInfo_ ) VULKAN_HPP_NOEXCEPT + { + pInheritanceInfo = pInheritanceInfo_; + return *this; + } + + + operator VkCommandBufferBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandBufferBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBufferBeginInfo const& ) const = default; +#else + bool operator==( CommandBufferBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pInheritanceInfo == rhs.pInheritanceInfo ); + } + + bool operator!=( CommandBufferBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandBufferBeginInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::CommandBufferUsageFlags flags = {}; + const VULKAN_HPP_NAMESPACE::CommandBufferInheritanceInfo* pInheritanceInfo = {}; + + }; + static_assert( sizeof( CommandBufferBeginInfo ) == sizeof( VkCommandBufferBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandBufferBeginInfo; + }; + + struct CommandBufferInheritanceConditionalRenderingInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceConditionalRenderingInfoEXT(VULKAN_HPP_NAMESPACE::Bool32 conditionalRenderingEnable_ = {}) VULKAN_HPP_NOEXCEPT + : conditionalRenderingEnable( conditionalRenderingEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceConditionalRenderingInfoEXT( CommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandBufferInheritanceConditionalRenderingInfoEXT( VkCommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandBufferInheritanceConditionalRenderingInfoEXT & operator=( VkCommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandBufferInheritanceConditionalRenderingInfoEXT & operator=( CommandBufferInheritanceConditionalRenderingInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandBufferInheritanceConditionalRenderingInfoEXT ) ); + return *this; + } + + CommandBufferInheritanceConditionalRenderingInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandBufferInheritanceConditionalRenderingInfoEXT & setConditionalRenderingEnable( VULKAN_HPP_NAMESPACE::Bool32 conditionalRenderingEnable_ ) VULKAN_HPP_NOEXCEPT + { + conditionalRenderingEnable = conditionalRenderingEnable_; + return *this; + } + + + operator VkCommandBufferInheritanceConditionalRenderingInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandBufferInheritanceConditionalRenderingInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBufferInheritanceConditionalRenderingInfoEXT const& ) const = default; +#else + bool operator==( CommandBufferInheritanceConditionalRenderingInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conditionalRenderingEnable == rhs.conditionalRenderingEnable ); + } + + bool operator!=( CommandBufferInheritanceConditionalRenderingInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandBufferInheritanceConditionalRenderingInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 conditionalRenderingEnable = {}; + + }; + static_assert( sizeof( CommandBufferInheritanceConditionalRenderingInfoEXT ) == sizeof( VkCommandBufferInheritanceConditionalRenderingInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandBufferInheritanceConditionalRenderingInfoEXT; + }; + + struct CommandBufferInheritanceRenderPassTransformInfoQCOM + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandBufferInheritanceRenderPassTransformInfoQCOM; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceRenderPassTransformInfoQCOM(VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::Rect2D renderArea_ = {}) VULKAN_HPP_NOEXCEPT + : transform( transform_ ), renderArea( renderArea_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandBufferInheritanceRenderPassTransformInfoQCOM( CommandBufferInheritanceRenderPassTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandBufferInheritanceRenderPassTransformInfoQCOM( VkCommandBufferInheritanceRenderPassTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandBufferInheritanceRenderPassTransformInfoQCOM & operator=( VkCommandBufferInheritanceRenderPassTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandBufferInheritanceRenderPassTransformInfoQCOM & operator=( CommandBufferInheritanceRenderPassTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandBufferInheritanceRenderPassTransformInfoQCOM ) ); + return *this; + } + + CommandBufferInheritanceRenderPassTransformInfoQCOM & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandBufferInheritanceRenderPassTransformInfoQCOM & setTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + CommandBufferInheritanceRenderPassTransformInfoQCOM & setRenderArea( VULKAN_HPP_NAMESPACE::Rect2D const & renderArea_ ) VULKAN_HPP_NOEXCEPT + { + renderArea = renderArea_; + return *this; + } + + + operator VkCommandBufferInheritanceRenderPassTransformInfoQCOM const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandBufferInheritanceRenderPassTransformInfoQCOM &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBufferInheritanceRenderPassTransformInfoQCOM const& ) const = default; +#else + bool operator==( CommandBufferInheritanceRenderPassTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( transform == rhs.transform ) + && ( renderArea == rhs.renderArea ); + } + + bool operator!=( CommandBufferInheritanceRenderPassTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandBufferInheritanceRenderPassTransformInfoQCOM; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + VULKAN_HPP_NAMESPACE::Rect2D renderArea = {}; + + }; + static_assert( sizeof( CommandBufferInheritanceRenderPassTransformInfoQCOM ) == sizeof( VkCommandBufferInheritanceRenderPassTransformInfoQCOM ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandBufferInheritanceRenderPassTransformInfoQCOM; + }; + + struct CommandPoolCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCommandPoolCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CommandPoolCreateInfo(VULKAN_HPP_NAMESPACE::CommandPoolCreateFlags flags_ = {}, uint32_t queueFamilyIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), queueFamilyIndex( queueFamilyIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR CommandPoolCreateInfo( CommandPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CommandPoolCreateInfo( VkCommandPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CommandPoolCreateInfo & operator=( VkCommandPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CommandPoolCreateInfo & operator=( CommandPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CommandPoolCreateInfo ) ); + return *this; + } + + CommandPoolCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CommandPoolCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::CommandPoolCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + CommandPoolCreateInfo & setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + + operator VkCommandPoolCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCommandPoolCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandPoolCreateInfo const& ) const = default; +#else + bool operator==( CommandPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ); + } + + bool operator!=( CommandPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCommandPoolCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::CommandPoolCreateFlags flags = {}; + uint32_t queueFamilyIndex = {}; + + }; + static_assert( sizeof( CommandPoolCreateInfo ) == sizeof( VkCommandPoolCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CommandPoolCreateInfo; + }; + + class ShaderModule + { + public: + using CType = VkShaderModule; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eShaderModule; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eShaderModule; + + public: + VULKAN_HPP_CONSTEXPR ShaderModule() VULKAN_HPP_NOEXCEPT + : m_shaderModule(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ShaderModule( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_shaderModule(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ShaderModule( VkShaderModule shaderModule ) VULKAN_HPP_NOEXCEPT + : m_shaderModule( shaderModule ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ShaderModule & operator=(VkShaderModule shaderModule) VULKAN_HPP_NOEXCEPT + { + m_shaderModule = shaderModule; + return *this; + } +#endif + + ShaderModule & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_shaderModule = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShaderModule const& ) const = default; +#else + bool operator==( ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule == rhs.m_shaderModule; + } + + bool operator!=(ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule != rhs.m_shaderModule; + } + + bool operator<(ShaderModule const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule < rhs.m_shaderModule; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkShaderModule() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_shaderModule == VK_NULL_HANDLE; + } + + private: + VkShaderModule m_shaderModule; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::ShaderModule ) == sizeof( VkShaderModule ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::ShaderModule; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ShaderModule; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ShaderModule; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct SpecializationMapEntry + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SpecializationMapEntry(uint32_t constantID_ = {}, uint32_t offset_ = {}, size_t size_ = {}) VULKAN_HPP_NOEXCEPT + : constantID( constantID_ ), offset( offset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR SpecializationMapEntry( SpecializationMapEntry const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SpecializationMapEntry( VkSpecializationMapEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SpecializationMapEntry & operator=( VkSpecializationMapEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SpecializationMapEntry & operator=( SpecializationMapEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SpecializationMapEntry ) ); + return *this; + } + + SpecializationMapEntry & setConstantID( uint32_t constantID_ ) VULKAN_HPP_NOEXCEPT + { + constantID = constantID_; + return *this; + } + + SpecializationMapEntry & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + SpecializationMapEntry & setSize( size_t size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkSpecializationMapEntry const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSpecializationMapEntry &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SpecializationMapEntry const& ) const = default; +#else + bool operator==( SpecializationMapEntry const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( constantID == rhs.constantID ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( SpecializationMapEntry const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t constantID = {}; + uint32_t offset = {}; + size_t size = {}; + + }; + static_assert( sizeof( SpecializationMapEntry ) == sizeof( VkSpecializationMapEntry ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SpecializationInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SpecializationInfo(uint32_t mapEntryCount_ = {}, const VULKAN_HPP_NAMESPACE::SpecializationMapEntry* pMapEntries_ = {}, size_t dataSize_ = {}, const void* pData_ = {}) VULKAN_HPP_NOEXCEPT + : mapEntryCount( mapEntryCount_ ), pMapEntries( pMapEntries_ ), dataSize( dataSize_ ), pData( pData_ ) + {} + + VULKAN_HPP_CONSTEXPR SpecializationInfo( SpecializationInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SpecializationInfo( VkSpecializationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + SpecializationInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & mapEntries_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ = {} ) + : mapEntryCount( static_cast( mapEntries_.size() ) ), pMapEntries( mapEntries_.data() ), dataSize( data_.size() * sizeof(T) ), pData( data_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SpecializationInfo & operator=( VkSpecializationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SpecializationInfo & operator=( SpecializationInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SpecializationInfo ) ); + return *this; + } + + SpecializationInfo & setMapEntryCount( uint32_t mapEntryCount_ ) VULKAN_HPP_NOEXCEPT + { + mapEntryCount = mapEntryCount_; + return *this; + } + + SpecializationInfo & setPMapEntries( const VULKAN_HPP_NAMESPACE::SpecializationMapEntry* pMapEntries_ ) VULKAN_HPP_NOEXCEPT + { + pMapEntries = pMapEntries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SpecializationInfo & setMapEntries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & mapEntries_ ) VULKAN_HPP_NOEXCEPT + { + mapEntryCount = static_cast( mapEntries_.size() ); + pMapEntries = mapEntries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SpecializationInfo & setDataSize( size_t dataSize_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = dataSize_; + return *this; + } + + SpecializationInfo & setPData( const void* pData_ ) VULKAN_HPP_NOEXCEPT + { + pData = pData_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + SpecializationInfo & setData( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = data_.size() * sizeof(T); + pData = data_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSpecializationInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSpecializationInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SpecializationInfo const& ) const = default; +#else + bool operator==( SpecializationInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( mapEntryCount == rhs.mapEntryCount ) + && ( pMapEntries == rhs.pMapEntries ) + && ( dataSize == rhs.dataSize ) + && ( pData == rhs.pData ); + } + + bool operator!=( SpecializationInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t mapEntryCount = {}; + const VULKAN_HPP_NAMESPACE::SpecializationMapEntry* pMapEntries = {}; + size_t dataSize = {}; + const void* pData = {}; + + }; + static_assert( sizeof( SpecializationInfo ) == sizeof( VkSpecializationInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineShaderStageCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineShaderStageCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateInfo(VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits stage_ = VULKAN_HPP_NAMESPACE::ShaderStageFlagBits::eVertex, VULKAN_HPP_NAMESPACE::ShaderModule module_ = {}, const char* pName_ = {}, const VULKAN_HPP_NAMESPACE::SpecializationInfo* pSpecializationInfo_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stage( stage_ ), module( module_ ), pName( pName_ ), pSpecializationInfo( pSpecializationInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineShaderStageCreateInfo( PipelineShaderStageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineShaderStageCreateInfo( VkPipelineShaderStageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineShaderStageCreateInfo & operator=( VkPipelineShaderStageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineShaderStageCreateInfo & operator=( PipelineShaderStageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineShaderStageCreateInfo ) ); + return *this; + } + + PipelineShaderStageCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineShaderStageCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineShaderStageCreateInfo & setStage( VULKAN_HPP_NAMESPACE::ShaderStageFlagBits stage_ ) VULKAN_HPP_NOEXCEPT + { + stage = stage_; + return *this; + } + + PipelineShaderStageCreateInfo & setModule( VULKAN_HPP_NAMESPACE::ShaderModule module_ ) VULKAN_HPP_NOEXCEPT + { + module = module_; + return *this; + } + + PipelineShaderStageCreateInfo & setPName( const char* pName_ ) VULKAN_HPP_NOEXCEPT + { + pName = pName_; + return *this; + } + + PipelineShaderStageCreateInfo & setPSpecializationInfo( const VULKAN_HPP_NAMESPACE::SpecializationInfo* pSpecializationInfo_ ) VULKAN_HPP_NOEXCEPT + { + pSpecializationInfo = pSpecializationInfo_; + return *this; + } + + + operator VkPipelineShaderStageCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineShaderStageCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineShaderStageCreateInfo const& ) const = default; +#else + bool operator==( PipelineShaderStageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stage == rhs.stage ) + && ( module == rhs.module ) + && ( pName == rhs.pName ) + && ( pSpecializationInfo == rhs.pSpecializationInfo ); + } + + bool operator!=( PipelineShaderStageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineShaderStageCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlagBits stage = VULKAN_HPP_NAMESPACE::ShaderStageFlagBits::eVertex; + VULKAN_HPP_NAMESPACE::ShaderModule module = {}; + const char* pName = {}; + const VULKAN_HPP_NAMESPACE::SpecializationInfo* pSpecializationInfo = {}; + + }; + static_assert( sizeof( PipelineShaderStageCreateInfo ) == sizeof( VkPipelineShaderStageCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineShaderStageCreateInfo; + }; + + class PipelineLayout + { + public: + using CType = VkPipelineLayout; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePipelineLayout; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineLayout; + + public: + VULKAN_HPP_CONSTEXPR PipelineLayout() VULKAN_HPP_NOEXCEPT + : m_pipelineLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PipelineLayout( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_pipelineLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PipelineLayout( VkPipelineLayout pipelineLayout ) VULKAN_HPP_NOEXCEPT + : m_pipelineLayout( pipelineLayout ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PipelineLayout & operator=(VkPipelineLayout pipelineLayout) VULKAN_HPP_NOEXCEPT + { + m_pipelineLayout = pipelineLayout; + return *this; + } +#endif + + PipelineLayout & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_pipelineLayout = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineLayout const& ) const = default; +#else + bool operator==( PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout == rhs.m_pipelineLayout; + } + + bool operator!=(PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout != rhs.m_pipelineLayout; + } + + bool operator<(PipelineLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout < rhs.m_pipelineLayout; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineLayout() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineLayout == VK_NULL_HANDLE; + } + + private: + VkPipelineLayout m_pipelineLayout; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::PipelineLayout ) == sizeof( VkPipelineLayout ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::PipelineLayout; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineLayout; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineLayout; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class Pipeline + { + public: + using CType = VkPipeline; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePipeline; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipeline; + + public: + VULKAN_HPP_CONSTEXPR Pipeline() VULKAN_HPP_NOEXCEPT + : m_pipeline(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Pipeline( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_pipeline(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Pipeline( VkPipeline pipeline ) VULKAN_HPP_NOEXCEPT + : m_pipeline( pipeline ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Pipeline & operator=(VkPipeline pipeline) VULKAN_HPP_NOEXCEPT + { + m_pipeline = pipeline; + return *this; + } +#endif + + Pipeline & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_pipeline = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Pipeline const& ) const = default; +#else + bool operator==( Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipeline == rhs.m_pipeline; + } + + bool operator!=(Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipeline != rhs.m_pipeline; + } + + bool operator<(Pipeline const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipeline < rhs.m_pipeline; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipeline() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_pipeline == VK_NULL_HANDLE; + } + + private: + VkPipeline m_pipeline; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Pipeline ) == sizeof( VkPipeline ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Pipeline; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Pipeline; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Pipeline; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct ComputePipelineCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eComputePipelineCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ComputePipelineCreateInfo(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo stage_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stage( stage_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR ComputePipelineCreateInfo( ComputePipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ComputePipelineCreateInfo( VkComputePipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ComputePipelineCreateInfo & operator=( VkComputePipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ComputePipelineCreateInfo & operator=( ComputePipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ComputePipelineCreateInfo ) ); + return *this; + } + + ComputePipelineCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ComputePipelineCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ComputePipelineCreateInfo & setStage( VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo const & stage_ ) VULKAN_HPP_NOEXCEPT + { + stage = stage_; + return *this; + } + + ComputePipelineCreateInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + ComputePipelineCreateInfo & setBasePipelineHandle( VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + ComputePipelineCreateInfo & setBasePipelineIndex( int32_t basePipelineIndex_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + + operator VkComputePipelineCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkComputePipelineCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ComputePipelineCreateInfo const& ) const = default; +#else + bool operator==( ComputePipelineCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stage == rhs.stage ) + && ( layout == rhs.layout ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( ComputePipelineCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eComputePipelineCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo stage = {}; + VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; + VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle = {}; + int32_t basePipelineIndex = {}; + + }; + static_assert( sizeof( ComputePipelineCreateInfo ) == sizeof( VkComputePipelineCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ComputePipelineCreateInfo; + }; + + struct ConditionalRenderingBeginInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eConditionalRenderingBeginInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ConditionalRenderingBeginInfoEXT(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagsEXT flags_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ), offset( offset_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR ConditionalRenderingBeginInfoEXT( ConditionalRenderingBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ConditionalRenderingBeginInfoEXT( VkConditionalRenderingBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ConditionalRenderingBeginInfoEXT & operator=( VkConditionalRenderingBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ConditionalRenderingBeginInfoEXT & operator=( ConditionalRenderingBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ConditionalRenderingBeginInfoEXT ) ); + return *this; + } + + ConditionalRenderingBeginInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ConditionalRenderingBeginInfoEXT & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + ConditionalRenderingBeginInfoEXT & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + ConditionalRenderingBeginInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkConditionalRenderingBeginInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkConditionalRenderingBeginInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ConditionalRenderingBeginInfoEXT const& ) const = default; +#else + bool operator==( ConditionalRenderingBeginInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( flags == rhs.flags ); + } + + bool operator!=( ConditionalRenderingBeginInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eConditionalRenderingBeginInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::ConditionalRenderingFlagsEXT flags = {}; + + }; + static_assert( sizeof( ConditionalRenderingBeginInfoEXT ) == sizeof( VkConditionalRenderingBeginInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ConditionalRenderingBeginInfoEXT; + }; + + struct ConformanceVersion + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ConformanceVersion(uint8_t major_ = {}, uint8_t minor_ = {}, uint8_t subminor_ = {}, uint8_t patch_ = {}) VULKAN_HPP_NOEXCEPT + : major( major_ ), minor( minor_ ), subminor( subminor_ ), patch( patch_ ) + {} + + VULKAN_HPP_CONSTEXPR ConformanceVersion( ConformanceVersion const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ConformanceVersion( VkConformanceVersion const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ConformanceVersion & operator=( VkConformanceVersion const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ConformanceVersion & operator=( ConformanceVersion const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ConformanceVersion ) ); + return *this; + } + + ConformanceVersion & setMajor( uint8_t major_ ) VULKAN_HPP_NOEXCEPT + { + major = major_; + return *this; + } + + ConformanceVersion & setMinor( uint8_t minor_ ) VULKAN_HPP_NOEXCEPT + { + minor = minor_; + return *this; + } + + ConformanceVersion & setSubminor( uint8_t subminor_ ) VULKAN_HPP_NOEXCEPT + { + subminor = subminor_; + return *this; + } + + ConformanceVersion & setPatch( uint8_t patch_ ) VULKAN_HPP_NOEXCEPT + { + patch = patch_; + return *this; + } + + + operator VkConformanceVersion const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkConformanceVersion &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ConformanceVersion const& ) const = default; +#else + bool operator==( ConformanceVersion const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( major == rhs.major ) + && ( minor == rhs.minor ) + && ( subminor == rhs.subminor ) + && ( patch == rhs.patch ); + } + + bool operator!=( ConformanceVersion const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint8_t major = {}; + uint8_t minor = {}; + uint8_t subminor = {}; + uint8_t patch = {}; + + }; + static_assert( sizeof( ConformanceVersion ) == sizeof( VkConformanceVersion ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using ConformanceVersionKHR = ConformanceVersion; + + struct CooperativeMatrixPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCooperativeMatrixPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CooperativeMatrixPropertiesNV(uint32_t MSize_ = {}, uint32_t NSize_ = {}, uint32_t KSize_ = {}, VULKAN_HPP_NAMESPACE::ComponentTypeNV AType_ = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16, VULKAN_HPP_NAMESPACE::ComponentTypeNV BType_ = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16, VULKAN_HPP_NAMESPACE::ComponentTypeNV CType_ = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16, VULKAN_HPP_NAMESPACE::ComponentTypeNV DType_ = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16, VULKAN_HPP_NAMESPACE::ScopeNV scope_ = VULKAN_HPP_NAMESPACE::ScopeNV::eDevice) VULKAN_HPP_NOEXCEPT + : MSize( MSize_ ), NSize( NSize_ ), KSize( KSize_ ), AType( AType_ ), BType( BType_ ), CType( CType_ ), DType( DType_ ), scope( scope_ ) + {} + + VULKAN_HPP_CONSTEXPR CooperativeMatrixPropertiesNV( CooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CooperativeMatrixPropertiesNV( VkCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CooperativeMatrixPropertiesNV & operator=( VkCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CooperativeMatrixPropertiesNV & operator=( CooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CooperativeMatrixPropertiesNV ) ); + return *this; + } + + CooperativeMatrixPropertiesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CooperativeMatrixPropertiesNV & setMSize( uint32_t MSize_ ) VULKAN_HPP_NOEXCEPT + { + MSize = MSize_; + return *this; + } + + CooperativeMatrixPropertiesNV & setNSize( uint32_t NSize_ ) VULKAN_HPP_NOEXCEPT + { + NSize = NSize_; + return *this; + } + + CooperativeMatrixPropertiesNV & setKSize( uint32_t KSize_ ) VULKAN_HPP_NOEXCEPT + { + KSize = KSize_; + return *this; + } + + CooperativeMatrixPropertiesNV & setAType( VULKAN_HPP_NAMESPACE::ComponentTypeNV AType_ ) VULKAN_HPP_NOEXCEPT + { + AType = AType_; + return *this; + } + + CooperativeMatrixPropertiesNV & setBType( VULKAN_HPP_NAMESPACE::ComponentTypeNV BType_ ) VULKAN_HPP_NOEXCEPT + { + BType = BType_; + return *this; + } + + CooperativeMatrixPropertiesNV & setCType( VULKAN_HPP_NAMESPACE::ComponentTypeNV CType_ ) VULKAN_HPP_NOEXCEPT + { + CType = CType_; + return *this; + } + + CooperativeMatrixPropertiesNV & setDType( VULKAN_HPP_NAMESPACE::ComponentTypeNV DType_ ) VULKAN_HPP_NOEXCEPT + { + DType = DType_; + return *this; + } + + CooperativeMatrixPropertiesNV & setScope( VULKAN_HPP_NAMESPACE::ScopeNV scope_ ) VULKAN_HPP_NOEXCEPT + { + scope = scope_; + return *this; + } + + + operator VkCooperativeMatrixPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCooperativeMatrixPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CooperativeMatrixPropertiesNV const& ) const = default; +#else + bool operator==( CooperativeMatrixPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( MSize == rhs.MSize ) + && ( NSize == rhs.NSize ) + && ( KSize == rhs.KSize ) + && ( AType == rhs.AType ) + && ( BType == rhs.BType ) + && ( CType == rhs.CType ) + && ( DType == rhs.DType ) + && ( scope == rhs.scope ); + } + + bool operator!=( CooperativeMatrixPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCooperativeMatrixPropertiesNV; + void* pNext = {}; + uint32_t MSize = {}; + uint32_t NSize = {}; + uint32_t KSize = {}; + VULKAN_HPP_NAMESPACE::ComponentTypeNV AType = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16; + VULKAN_HPP_NAMESPACE::ComponentTypeNV BType = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16; + VULKAN_HPP_NAMESPACE::ComponentTypeNV CType = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16; + VULKAN_HPP_NAMESPACE::ComponentTypeNV DType = VULKAN_HPP_NAMESPACE::ComponentTypeNV::eFloat16; + VULKAN_HPP_NAMESPACE::ScopeNV scope = VULKAN_HPP_NAMESPACE::ScopeNV::eDevice; + + }; + static_assert( sizeof( CooperativeMatrixPropertiesNV ) == sizeof( VkCooperativeMatrixPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CooperativeMatrixPropertiesNV; + }; + + struct CopyAccelerationStructureInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyAccelerationStructureInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyAccelerationStructureInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone) VULKAN_HPP_NOEXCEPT + : src( src_ ), dst( dst_ ), mode( mode_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyAccelerationStructureInfoKHR( CopyAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyAccelerationStructureInfoKHR( VkCopyAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyAccelerationStructureInfoKHR & operator=( VkCopyAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyAccelerationStructureInfoKHR & operator=( CopyAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyAccelerationStructureInfoKHR ) ); + return *this; + } + + CopyAccelerationStructureInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyAccelerationStructureInfoKHR & setSrc( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src_ ) VULKAN_HPP_NOEXCEPT + { + src = src_; + return *this; + } + + CopyAccelerationStructureInfoKHR & setDst( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ ) VULKAN_HPP_NOEXCEPT + { + dst = dst_; + return *this; + } + + CopyAccelerationStructureInfoKHR & setMode( VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + + operator VkCopyAccelerationStructureInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyAccelerationStructureInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyAccelerationStructureInfoKHR const& ) const = default; +#else + bool operator==( CopyAccelerationStructureInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( src == rhs.src ) + && ( dst == rhs.dst ) + && ( mode == rhs.mode ); + } + + bool operator!=( CopyAccelerationStructureInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyAccelerationStructureInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst = {}; + VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone; + + }; + static_assert( sizeof( CopyAccelerationStructureInfoKHR ) == sizeof( VkCopyAccelerationStructureInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyAccelerationStructureInfoKHR; + }; + + struct CopyAccelerationStructureToMemoryInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyAccelerationStructureToMemoryInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + CopyAccelerationStructureToMemoryInfoKHR(VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src_ = {}, VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone) VULKAN_HPP_NOEXCEPT + : src( src_ ), dst( dst_ ), mode( mode_ ) + {} + + CopyAccelerationStructureToMemoryInfoKHR( CopyAccelerationStructureToMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyAccelerationStructureToMemoryInfoKHR( VkCopyAccelerationStructureToMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyAccelerationStructureToMemoryInfoKHR & operator=( VkCopyAccelerationStructureToMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyAccelerationStructureToMemoryInfoKHR & operator=( CopyAccelerationStructureToMemoryInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyAccelerationStructureToMemoryInfoKHR ) ); + return *this; + } + + CopyAccelerationStructureToMemoryInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyAccelerationStructureToMemoryInfoKHR & setSrc( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src_ ) VULKAN_HPP_NOEXCEPT + { + src = src_; + return *this; + } + + CopyAccelerationStructureToMemoryInfoKHR & setDst( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR const & dst_ ) VULKAN_HPP_NOEXCEPT + { + dst = dst_; + return *this; + } + + CopyAccelerationStructureToMemoryInfoKHR & setMode( VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + + operator VkCopyAccelerationStructureToMemoryInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyAccelerationStructureToMemoryInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyAccelerationStructureToMemoryInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR src = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressKHR dst = {}; + VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone; + + }; + static_assert( sizeof( CopyAccelerationStructureToMemoryInfoKHR ) == sizeof( VkCopyAccelerationStructureToMemoryInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyAccelerationStructureToMemoryInfoKHR; + }; + + struct CopyBufferInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyBufferInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyBufferInfo2KHR(VULKAN_HPP_NAMESPACE::Buffer srcBuffer_ = {}, VULKAN_HPP_NAMESPACE::Buffer dstBuffer_ = {}, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferCopy2KHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : srcBuffer( srcBuffer_ ), dstBuffer( dstBuffer_ ), regionCount( regionCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyBufferInfo2KHR( CopyBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyBufferInfo2KHR( VkCopyBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyBufferInfo2KHR( VULKAN_HPP_NAMESPACE::Buffer srcBuffer_, VULKAN_HPP_NAMESPACE::Buffer dstBuffer_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : srcBuffer( srcBuffer_ ), dstBuffer( dstBuffer_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyBufferInfo2KHR & operator=( VkCopyBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyBufferInfo2KHR & operator=( CopyBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyBufferInfo2KHR ) ); + return *this; + } + + CopyBufferInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyBufferInfo2KHR & setSrcBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer_ ) VULKAN_HPP_NOEXCEPT + { + srcBuffer = srcBuffer_; + return *this; + } + + CopyBufferInfo2KHR & setDstBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer_ ) VULKAN_HPP_NOEXCEPT + { + dstBuffer = dstBuffer_; + return *this; + } + + CopyBufferInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + CopyBufferInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::BufferCopy2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyBufferInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkCopyBufferInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyBufferInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyBufferInfo2KHR const& ) const = default; +#else + bool operator==( CopyBufferInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcBuffer == rhs.srcBuffer ) + && ( dstBuffer == rhs.dstBuffer ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( CopyBufferInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyBufferInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer srcBuffer = {}; + VULKAN_HPP_NAMESPACE::Buffer dstBuffer = {}; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::BufferCopy2KHR* pRegions = {}; + + }; + static_assert( sizeof( CopyBufferInfo2KHR ) == sizeof( VkCopyBufferInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyBufferInfo2KHR; + }; + + struct CopyBufferToImageInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyBufferToImageInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyBufferToImageInfo2KHR(VULKAN_HPP_NAMESPACE::Buffer srcBuffer_ = {}, VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : srcBuffer( srcBuffer_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( regionCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyBufferToImageInfo2KHR( CopyBufferToImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyBufferToImageInfo2KHR( VkCopyBufferToImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyBufferToImageInfo2KHR( VULKAN_HPP_NAMESPACE::Buffer srcBuffer_, VULKAN_HPP_NAMESPACE::Image dstImage_, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : srcBuffer( srcBuffer_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyBufferToImageInfo2KHR & operator=( VkCopyBufferToImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyBufferToImageInfo2KHR & operator=( CopyBufferToImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyBufferToImageInfo2KHR ) ); + return *this; + } + + CopyBufferToImageInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyBufferToImageInfo2KHR & setSrcBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer_ ) VULKAN_HPP_NOEXCEPT + { + srcBuffer = srcBuffer_; + return *this; + } + + CopyBufferToImageInfo2KHR & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + { + dstImage = dstImage_; + return *this; + } + + CopyBufferToImageInfo2KHR & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + dstImageLayout = dstImageLayout_; + return *this; + } + + CopyBufferToImageInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + CopyBufferToImageInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyBufferToImageInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkCopyBufferToImageInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyBufferToImageInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyBufferToImageInfo2KHR const& ) const = default; +#else + bool operator==( CopyBufferToImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcBuffer == rhs.srcBuffer ) + && ( dstImage == rhs.dstImage ) + && ( dstImageLayout == rhs.dstImageLayout ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( CopyBufferToImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyBufferToImageInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Buffer srcBuffer = {}; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions = {}; + + }; + static_assert( sizeof( CopyBufferToImageInfo2KHR ) == sizeof( VkCopyBufferToImageInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyBufferToImageInfo2KHR; + }; + + struct CopyCommandTransformInfoQCOM + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyCommandTransformInfoQCOM; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyCommandTransformInfoQCOM(VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity) VULKAN_HPP_NOEXCEPT + : transform( transform_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyCommandTransformInfoQCOM( CopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyCommandTransformInfoQCOM( VkCopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyCommandTransformInfoQCOM & operator=( VkCopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyCommandTransformInfoQCOM & operator=( CopyCommandTransformInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyCommandTransformInfoQCOM ) ); + return *this; + } + + CopyCommandTransformInfoQCOM & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyCommandTransformInfoQCOM & setTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + + operator VkCopyCommandTransformInfoQCOM const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyCommandTransformInfoQCOM &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyCommandTransformInfoQCOM const& ) const = default; +#else + bool operator==( CopyCommandTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( transform == rhs.transform ); + } + + bool operator!=( CopyCommandTransformInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyCommandTransformInfoQCOM; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + + }; + static_assert( sizeof( CopyCommandTransformInfoQCOM ) == sizeof( VkCopyCommandTransformInfoQCOM ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyCommandTransformInfoQCOM; + }; + + class DescriptorSet + { + public: + using CType = VkDescriptorSet; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorSet; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSet; + + public: + VULKAN_HPP_CONSTEXPR DescriptorSet() VULKAN_HPP_NOEXCEPT + : m_descriptorSet(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSet( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_descriptorSet(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSet( VkDescriptorSet descriptorSet ) VULKAN_HPP_NOEXCEPT + : m_descriptorSet( descriptorSet ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorSet & operator=(VkDescriptorSet descriptorSet) VULKAN_HPP_NOEXCEPT + { + m_descriptorSet = descriptorSet; + return *this; + } +#endif + + DescriptorSet & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_descriptorSet = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSet const& ) const = default; +#else + bool operator==( DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet == rhs.m_descriptorSet; + } + + bool operator!=(DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet != rhs.m_descriptorSet; + } + + bool operator<(DescriptorSet const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet < rhs.m_descriptorSet; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSet() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSet == VK_NULL_HANDLE; + } + + private: + VkDescriptorSet m_descriptorSet; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DescriptorSet ) == sizeof( VkDescriptorSet ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DescriptorSet; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorSet; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorSet; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct CopyDescriptorSet + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyDescriptorSet; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyDescriptorSet(VULKAN_HPP_NAMESPACE::DescriptorSet srcSet_ = {}, uint32_t srcBinding_ = {}, uint32_t srcArrayElement_ = {}, VULKAN_HPP_NAMESPACE::DescriptorSet dstSet_ = {}, uint32_t dstBinding_ = {}, uint32_t dstArrayElement_ = {}, uint32_t descriptorCount_ = {}) VULKAN_HPP_NOEXCEPT + : srcSet( srcSet_ ), srcBinding( srcBinding_ ), srcArrayElement( srcArrayElement_ ), dstSet( dstSet_ ), dstBinding( dstBinding_ ), dstArrayElement( dstArrayElement_ ), descriptorCount( descriptorCount_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyDescriptorSet( CopyDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyDescriptorSet( VkCopyDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyDescriptorSet & operator=( VkCopyDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyDescriptorSet & operator=( CopyDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyDescriptorSet ) ); + return *this; + } + + CopyDescriptorSet & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyDescriptorSet & setSrcSet( VULKAN_HPP_NAMESPACE::DescriptorSet srcSet_ ) VULKAN_HPP_NOEXCEPT + { + srcSet = srcSet_; + return *this; + } + + CopyDescriptorSet & setSrcBinding( uint32_t srcBinding_ ) VULKAN_HPP_NOEXCEPT + { + srcBinding = srcBinding_; + return *this; + } + + CopyDescriptorSet & setSrcArrayElement( uint32_t srcArrayElement_ ) VULKAN_HPP_NOEXCEPT + { + srcArrayElement = srcArrayElement_; + return *this; + } + + CopyDescriptorSet & setDstSet( VULKAN_HPP_NAMESPACE::DescriptorSet dstSet_ ) VULKAN_HPP_NOEXCEPT + { + dstSet = dstSet_; + return *this; + } + + CopyDescriptorSet & setDstBinding( uint32_t dstBinding_ ) VULKAN_HPP_NOEXCEPT + { + dstBinding = dstBinding_; + return *this; + } + + CopyDescriptorSet & setDstArrayElement( uint32_t dstArrayElement_ ) VULKAN_HPP_NOEXCEPT + { + dstArrayElement = dstArrayElement_; + return *this; + } + + CopyDescriptorSet & setDescriptorCount( uint32_t descriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = descriptorCount_; + return *this; + } + + + operator VkCopyDescriptorSet const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyDescriptorSet &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyDescriptorSet const& ) const = default; +#else + bool operator==( CopyDescriptorSet const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSet == rhs.srcSet ) + && ( srcBinding == rhs.srcBinding ) + && ( srcArrayElement == rhs.srcArrayElement ) + && ( dstSet == rhs.dstSet ) + && ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ); + } + + bool operator!=( CopyDescriptorSet const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyDescriptorSet; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorSet srcSet = {}; + uint32_t srcBinding = {}; + uint32_t srcArrayElement = {}; + VULKAN_HPP_NAMESPACE::DescriptorSet dstSet = {}; + uint32_t dstBinding = {}; + uint32_t dstArrayElement = {}; + uint32_t descriptorCount = {}; + + }; + static_assert( sizeof( CopyDescriptorSet ) == sizeof( VkCopyDescriptorSet ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyDescriptorSet; + }; + + struct ImageCopy2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageCopy2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageCopy2KHR(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D srcOffset_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffset( srcOffset_ ), dstSubresource( dstSubresource_ ), dstOffset( dstOffset_ ), extent( extent_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageCopy2KHR( ImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageCopy2KHR( VkImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageCopy2KHR & operator=( VkImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageCopy2KHR & operator=( ImageCopy2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageCopy2KHR ) ); + return *this; + } + + ImageCopy2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageCopy2KHR & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageCopy2KHR & setSrcOffset( VULKAN_HPP_NAMESPACE::Offset3D const & srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + ImageCopy2KHR & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageCopy2KHR & setDstOffset( VULKAN_HPP_NAMESPACE::Offset3D const & dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + ImageCopy2KHR & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + + operator VkImageCopy2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageCopy2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageCopy2KHR const& ) const = default; +#else + bool operator==( ImageCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageCopy2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageCopy2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D srcOffset = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D dstOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + + }; + static_assert( sizeof( ImageCopy2KHR ) == sizeof( VkImageCopy2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageCopy2KHR; + }; + + struct CopyImageInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyImageInfo2KHR(VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageCopy2KHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( regionCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyImageInfo2KHR( CopyImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyImageInfo2KHR( VkCopyImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyImageInfo2KHR( VULKAN_HPP_NAMESPACE::Image srcImage_, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, VULKAN_HPP_NAMESPACE::Image dstImage_, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyImageInfo2KHR & operator=( VkCopyImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyImageInfo2KHR & operator=( CopyImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyImageInfo2KHR ) ); + return *this; + } + + CopyImageInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyImageInfo2KHR & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + { + srcImage = srcImage_; + return *this; + } + + CopyImageInfo2KHR & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + srcImageLayout = srcImageLayout_; + return *this; + } + + CopyImageInfo2KHR & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + { + dstImage = dstImage_; + return *this; + } + + CopyImageInfo2KHR & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + dstImageLayout = dstImageLayout_; + return *this; + } + + CopyImageInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + CopyImageInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::ImageCopy2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyImageInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkCopyImageInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyImageInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyImageInfo2KHR const& ) const = default; +#else + bool operator==( CopyImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcImage == rhs.srcImage ) + && ( srcImageLayout == rhs.srcImageLayout ) + && ( dstImage == rhs.dstImage ) + && ( dstImageLayout == rhs.dstImageLayout ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( CopyImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::ImageCopy2KHR* pRegions = {}; + + }; + static_assert( sizeof( CopyImageInfo2KHR ) == sizeof( VkCopyImageInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyImageInfo2KHR; + }; + + struct CopyImageToBufferInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyImageToBufferInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR CopyImageToBufferInfo2KHR(VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::Buffer dstBuffer_ = {}, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstBuffer( dstBuffer_ ), regionCount( regionCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR CopyImageToBufferInfo2KHR( CopyImageToBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyImageToBufferInfo2KHR( VkCopyImageToBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyImageToBufferInfo2KHR( VULKAN_HPP_NAMESPACE::Image srcImage_, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, VULKAN_HPP_NAMESPACE::Buffer dstBuffer_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstBuffer( dstBuffer_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyImageToBufferInfo2KHR & operator=( VkCopyImageToBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyImageToBufferInfo2KHR & operator=( CopyImageToBufferInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyImageToBufferInfo2KHR ) ); + return *this; + } + + CopyImageToBufferInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyImageToBufferInfo2KHR & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + { + srcImage = srcImage_; + return *this; + } + + CopyImageToBufferInfo2KHR & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + srcImageLayout = srcImageLayout_; + return *this; + } + + CopyImageToBufferInfo2KHR & setDstBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer_ ) VULKAN_HPP_NOEXCEPT + { + dstBuffer = dstBuffer_; + return *this; + } + + CopyImageToBufferInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + CopyImageToBufferInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + CopyImageToBufferInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkCopyImageToBufferInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyImageToBufferInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CopyImageToBufferInfo2KHR const& ) const = default; +#else + bool operator==( CopyImageToBufferInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcImage == rhs.srcImage ) + && ( srcImageLayout == rhs.srcImageLayout ) + && ( dstBuffer == rhs.dstBuffer ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( CopyImageToBufferInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyImageToBufferInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::Buffer dstBuffer = {}; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::BufferImageCopy2KHR* pRegions = {}; + + }; + static_assert( sizeof( CopyImageToBufferInfo2KHR ) == sizeof( VkCopyImageToBufferInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyImageToBufferInfo2KHR; + }; + + struct CopyMemoryToAccelerationStructureInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eCopyMemoryToAccelerationStructureInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + CopyMemoryToAccelerationStructureInfoKHR(VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR src_ = {}, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ = {}, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone) VULKAN_HPP_NOEXCEPT + : src( src_ ), dst( dst_ ), mode( mode_ ) + {} + + CopyMemoryToAccelerationStructureInfoKHR( CopyMemoryToAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + CopyMemoryToAccelerationStructureInfoKHR( VkCopyMemoryToAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + CopyMemoryToAccelerationStructureInfoKHR & operator=( VkCopyMemoryToAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + CopyMemoryToAccelerationStructureInfoKHR & operator=( CopyMemoryToAccelerationStructureInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( CopyMemoryToAccelerationStructureInfoKHR ) ); + return *this; + } + + CopyMemoryToAccelerationStructureInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + CopyMemoryToAccelerationStructureInfoKHR & setSrc( VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR const & src_ ) VULKAN_HPP_NOEXCEPT + { + src = src_; + return *this; + } + + CopyMemoryToAccelerationStructureInfoKHR & setDst( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst_ ) VULKAN_HPP_NOEXCEPT + { + dst = dst_; + return *this; + } + + CopyMemoryToAccelerationStructureInfoKHR & setMode( VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + + operator VkCopyMemoryToAccelerationStructureInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkCopyMemoryToAccelerationStructureInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eCopyMemoryToAccelerationStructureInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceOrHostAddressConstKHR src = {}; + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR dst = {}; + VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode = VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR::eClone; + + }; + static_assert( sizeof( CopyMemoryToAccelerationStructureInfoKHR ) == sizeof( VkCopyMemoryToAccelerationStructureInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = CopyMemoryToAccelerationStructureInfoKHR; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct D3D12FenceSubmitInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eD3D12FenceSubmitInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR D3D12FenceSubmitInfoKHR(uint32_t waitSemaphoreValuesCount_ = {}, const uint64_t* pWaitSemaphoreValues_ = {}, uint32_t signalSemaphoreValuesCount_ = {}, const uint64_t* pSignalSemaphoreValues_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreValuesCount( waitSemaphoreValuesCount_ ), pWaitSemaphoreValues( pWaitSemaphoreValues_ ), signalSemaphoreValuesCount( signalSemaphoreValuesCount_ ), pSignalSemaphoreValues( pSignalSemaphoreValues_ ) + {} + + VULKAN_HPP_CONSTEXPR D3D12FenceSubmitInfoKHR( D3D12FenceSubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + D3D12FenceSubmitInfoKHR( VkD3D12FenceSubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + D3D12FenceSubmitInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreValues_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreValues_ = {} ) + : waitSemaphoreValuesCount( static_cast( waitSemaphoreValues_.size() ) ), pWaitSemaphoreValues( waitSemaphoreValues_.data() ), signalSemaphoreValuesCount( static_cast( signalSemaphoreValues_.size() ) ), pSignalSemaphoreValues( signalSemaphoreValues_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + D3D12FenceSubmitInfoKHR & operator=( VkD3D12FenceSubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + D3D12FenceSubmitInfoKHR & operator=( D3D12FenceSubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( D3D12FenceSubmitInfoKHR ) ); + return *this; + } + + D3D12FenceSubmitInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + D3D12FenceSubmitInfoKHR & setWaitSemaphoreValuesCount( uint32_t waitSemaphoreValuesCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreValuesCount = waitSemaphoreValuesCount_; + return *this; + } + + D3D12FenceSubmitInfoKHR & setPWaitSemaphoreValues( const uint64_t* pWaitSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphoreValues = pWaitSemaphoreValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + D3D12FenceSubmitInfoKHR & setWaitSemaphoreValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreValuesCount = static_cast( waitSemaphoreValues_.size() ); + pWaitSemaphoreValues = waitSemaphoreValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + D3D12FenceSubmitInfoKHR & setSignalSemaphoreValuesCount( uint32_t signalSemaphoreValuesCount_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreValuesCount = signalSemaphoreValuesCount_; + return *this; + } + + D3D12FenceSubmitInfoKHR & setPSignalSemaphoreValues( const uint64_t* pSignalSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + pSignalSemaphoreValues = pSignalSemaphoreValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + D3D12FenceSubmitInfoKHR & setSignalSemaphoreValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreValuesCount = static_cast( signalSemaphoreValues_.size() ); + pSignalSemaphoreValues = signalSemaphoreValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkD3D12FenceSubmitInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkD3D12FenceSubmitInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( D3D12FenceSubmitInfoKHR const& ) const = default; +#else + bool operator==( D3D12FenceSubmitInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreValuesCount == rhs.waitSemaphoreValuesCount ) + && ( pWaitSemaphoreValues == rhs.pWaitSemaphoreValues ) + && ( signalSemaphoreValuesCount == rhs.signalSemaphoreValuesCount ) + && ( pSignalSemaphoreValues == rhs.pSignalSemaphoreValues ); + } + + bool operator!=( D3D12FenceSubmitInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eD3D12FenceSubmitInfoKHR; + const void* pNext = {}; + uint32_t waitSemaphoreValuesCount = {}; + const uint64_t* pWaitSemaphoreValues = {}; + uint32_t signalSemaphoreValuesCount = {}; + const uint64_t* pSignalSemaphoreValues = {}; + + }; + static_assert( sizeof( D3D12FenceSubmitInfoKHR ) == sizeof( VkD3D12FenceSubmitInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = D3D12FenceSubmitInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct DebugMarkerMarkerInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugMarkerMarkerInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 DebugMarkerMarkerInfoEXT(const char* pMarkerName_ = {}, std::array const& color_ = {}) VULKAN_HPP_NOEXCEPT + : pMarkerName( pMarkerName_ ), color( color_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 DebugMarkerMarkerInfoEXT( DebugMarkerMarkerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugMarkerMarkerInfoEXT( VkDebugMarkerMarkerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugMarkerMarkerInfoEXT & operator=( VkDebugMarkerMarkerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugMarkerMarkerInfoEXT & operator=( DebugMarkerMarkerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugMarkerMarkerInfoEXT ) ); + return *this; + } + + DebugMarkerMarkerInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugMarkerMarkerInfoEXT & setPMarkerName( const char* pMarkerName_ ) VULKAN_HPP_NOEXCEPT + { + pMarkerName = pMarkerName_; + return *this; + } + + DebugMarkerMarkerInfoEXT & setColor( std::array color_ ) VULKAN_HPP_NOEXCEPT + { + color = color_; + return *this; + } + + + operator VkDebugMarkerMarkerInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugMarkerMarkerInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugMarkerMarkerInfoEXT const& ) const = default; +#else + bool operator==( DebugMarkerMarkerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pMarkerName == rhs.pMarkerName ) + && ( color == rhs.color ); + } + + bool operator!=( DebugMarkerMarkerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugMarkerMarkerInfoEXT; + const void* pNext = {}; + const char* pMarkerName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D color = {}; + + }; + static_assert( sizeof( DebugMarkerMarkerInfoEXT ) == sizeof( VkDebugMarkerMarkerInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugMarkerMarkerInfoEXT; + }; + + struct DebugMarkerObjectNameInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugMarkerObjectNameInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugMarkerObjectNameInfoEXT(VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_ = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown, uint64_t object_ = {}, const char* pObjectName_ = {}) VULKAN_HPP_NOEXCEPT + : objectType( objectType_ ), object( object_ ), pObjectName( pObjectName_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugMarkerObjectNameInfoEXT( DebugMarkerObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugMarkerObjectNameInfoEXT( VkDebugMarkerObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugMarkerObjectNameInfoEXT & operator=( VkDebugMarkerObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugMarkerObjectNameInfoEXT & operator=( DebugMarkerObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugMarkerObjectNameInfoEXT ) ); + return *this; + } + + DebugMarkerObjectNameInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugMarkerObjectNameInfoEXT & setObjectType( VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_ ) VULKAN_HPP_NOEXCEPT + { + objectType = objectType_; + return *this; + } + + DebugMarkerObjectNameInfoEXT & setObject( uint64_t object_ ) VULKAN_HPP_NOEXCEPT + { + object = object_; + return *this; + } + + DebugMarkerObjectNameInfoEXT & setPObjectName( const char* pObjectName_ ) VULKAN_HPP_NOEXCEPT + { + pObjectName = pObjectName_; + return *this; + } + + + operator VkDebugMarkerObjectNameInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugMarkerObjectNameInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugMarkerObjectNameInfoEXT const& ) const = default; +#else + bool operator==( DebugMarkerObjectNameInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( object == rhs.object ) + && ( pObjectName == rhs.pObjectName ); + } + + bool operator!=( DebugMarkerObjectNameInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugMarkerObjectNameInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + uint64_t object = {}; + const char* pObjectName = {}; + + }; + static_assert( sizeof( DebugMarkerObjectNameInfoEXT ) == sizeof( VkDebugMarkerObjectNameInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugMarkerObjectNameInfoEXT; + }; + + struct DebugMarkerObjectTagInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugMarkerObjectTagInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugMarkerObjectTagInfoEXT(VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_ = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown, uint64_t object_ = {}, uint64_t tagName_ = {}, size_t tagSize_ = {}, const void* pTag_ = {}) VULKAN_HPP_NOEXCEPT + : objectType( objectType_ ), object( object_ ), tagName( tagName_ ), tagSize( tagSize_ ), pTag( pTag_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugMarkerObjectTagInfoEXT( DebugMarkerObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugMarkerObjectTagInfoEXT( VkDebugMarkerObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + DebugMarkerObjectTagInfoEXT( VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_, uint64_t object_, uint64_t tagName_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tag_ ) + : objectType( objectType_ ), object( object_ ), tagName( tagName_ ), tagSize( tag_.size() * sizeof(T) ), pTag( tag_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugMarkerObjectTagInfoEXT & operator=( VkDebugMarkerObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugMarkerObjectTagInfoEXT & operator=( DebugMarkerObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugMarkerObjectTagInfoEXT ) ); + return *this; + } + + DebugMarkerObjectTagInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugMarkerObjectTagInfoEXT & setObjectType( VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType_ ) VULKAN_HPP_NOEXCEPT + { + objectType = objectType_; + return *this; + } + + DebugMarkerObjectTagInfoEXT & setObject( uint64_t object_ ) VULKAN_HPP_NOEXCEPT + { + object = object_; + return *this; + } + + DebugMarkerObjectTagInfoEXT & setTagName( uint64_t tagName_ ) VULKAN_HPP_NOEXCEPT + { + tagName = tagName_; + return *this; + } + + DebugMarkerObjectTagInfoEXT & setTagSize( size_t tagSize_ ) VULKAN_HPP_NOEXCEPT + { + tagSize = tagSize_; + return *this; + } + + DebugMarkerObjectTagInfoEXT & setPTag( const void* pTag_ ) VULKAN_HPP_NOEXCEPT + { + pTag = pTag_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + DebugMarkerObjectTagInfoEXT & setTag( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tag_ ) VULKAN_HPP_NOEXCEPT + { + tagSize = tag_.size() * sizeof(T); + pTag = tag_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDebugMarkerObjectTagInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugMarkerObjectTagInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugMarkerObjectTagInfoEXT const& ) const = default; +#else + bool operator==( DebugMarkerObjectTagInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( object == rhs.object ) + && ( tagName == rhs.tagName ) + && ( tagSize == rhs.tagSize ) + && ( pTag == rhs.pTag ); + } + + bool operator!=( DebugMarkerObjectTagInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugMarkerObjectTagInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + uint64_t object = {}; + uint64_t tagName = {}; + size_t tagSize = {}; + const void* pTag = {}; + + }; + static_assert( sizeof( DebugMarkerObjectTagInfoEXT ) == sizeof( VkDebugMarkerObjectTagInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugMarkerObjectTagInfoEXT; + }; + + struct DebugReportCallbackCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugReportCallbackCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugReportCallbackCreateInfoEXT(VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags_ = {}, PFN_vkDebugReportCallbackEXT pfnCallback_ = {}, void* pUserData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pfnCallback( pfnCallback_ ), pUserData( pUserData_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugReportCallbackCreateInfoEXT( DebugReportCallbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugReportCallbackCreateInfoEXT( VkDebugReportCallbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugReportCallbackCreateInfoEXT & operator=( VkDebugReportCallbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugReportCallbackCreateInfoEXT & operator=( DebugReportCallbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugReportCallbackCreateInfoEXT ) ); + return *this; + } + + DebugReportCallbackCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugReportCallbackCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DebugReportCallbackCreateInfoEXT & setPfnCallback( PFN_vkDebugReportCallbackEXT pfnCallback_ ) VULKAN_HPP_NOEXCEPT + { + pfnCallback = pfnCallback_; + return *this; + } + + DebugReportCallbackCreateInfoEXT & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + + operator VkDebugReportCallbackCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugReportCallbackCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugReportCallbackCreateInfoEXT const& ) const = default; +#else + bool operator==( DebugReportCallbackCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pfnCallback == rhs.pfnCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DebugReportCallbackCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugReportCallbackCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags = {}; + PFN_vkDebugReportCallbackEXT pfnCallback = {}; + void* pUserData = {}; + + }; + static_assert( sizeof( DebugReportCallbackCreateInfoEXT ) == sizeof( VkDebugReportCallbackCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugReportCallbackCreateInfoEXT; + }; + + struct DebugUtilsLabelEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugUtilsLabelEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 DebugUtilsLabelEXT(const char* pLabelName_ = {}, std::array const& color_ = {}) VULKAN_HPP_NOEXCEPT + : pLabelName( pLabelName_ ), color( color_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 DebugUtilsLabelEXT( DebugUtilsLabelEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugUtilsLabelEXT( VkDebugUtilsLabelEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugUtilsLabelEXT & operator=( VkDebugUtilsLabelEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugUtilsLabelEXT & operator=( DebugUtilsLabelEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugUtilsLabelEXT ) ); + return *this; + } + + DebugUtilsLabelEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugUtilsLabelEXT & setPLabelName( const char* pLabelName_ ) VULKAN_HPP_NOEXCEPT + { + pLabelName = pLabelName_; + return *this; + } + + DebugUtilsLabelEXT & setColor( std::array color_ ) VULKAN_HPP_NOEXCEPT + { + color = color_; + return *this; + } + + + operator VkDebugUtilsLabelEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugUtilsLabelEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsLabelEXT const& ) const = default; +#else + bool operator==( DebugUtilsLabelEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pLabelName == rhs.pLabelName ) + && ( color == rhs.color ); + } + + bool operator!=( DebugUtilsLabelEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugUtilsLabelEXT; + const void* pNext = {}; + const char* pLabelName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D color = {}; + + }; + static_assert( sizeof( DebugUtilsLabelEXT ) == sizeof( VkDebugUtilsLabelEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugUtilsLabelEXT; + }; + + struct DebugUtilsObjectNameInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugUtilsObjectNameInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugUtilsObjectNameInfoEXT(VULKAN_HPP_NAMESPACE::ObjectType objectType_ = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown, uint64_t objectHandle_ = {}, const char* pObjectName_ = {}) VULKAN_HPP_NOEXCEPT + : objectType( objectType_ ), objectHandle( objectHandle_ ), pObjectName( pObjectName_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugUtilsObjectNameInfoEXT( DebugUtilsObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugUtilsObjectNameInfoEXT( VkDebugUtilsObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugUtilsObjectNameInfoEXT & operator=( VkDebugUtilsObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugUtilsObjectNameInfoEXT & operator=( DebugUtilsObjectNameInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugUtilsObjectNameInfoEXT ) ); + return *this; + } + + DebugUtilsObjectNameInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugUtilsObjectNameInfoEXT & setObjectType( VULKAN_HPP_NAMESPACE::ObjectType objectType_ ) VULKAN_HPP_NOEXCEPT + { + objectType = objectType_; + return *this; + } + + DebugUtilsObjectNameInfoEXT & setObjectHandle( uint64_t objectHandle_ ) VULKAN_HPP_NOEXCEPT + { + objectHandle = objectHandle_; + return *this; + } + + DebugUtilsObjectNameInfoEXT & setPObjectName( const char* pObjectName_ ) VULKAN_HPP_NOEXCEPT + { + pObjectName = pObjectName_; + return *this; + } + + + operator VkDebugUtilsObjectNameInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugUtilsObjectNameInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsObjectNameInfoEXT const& ) const = default; +#else + bool operator==( DebugUtilsObjectNameInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( pObjectName == rhs.pObjectName ); + } + + bool operator!=( DebugUtilsObjectNameInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugUtilsObjectNameInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown; + uint64_t objectHandle = {}; + const char* pObjectName = {}; + + }; + static_assert( sizeof( DebugUtilsObjectNameInfoEXT ) == sizeof( VkDebugUtilsObjectNameInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugUtilsObjectNameInfoEXT; + }; + + struct DebugUtilsMessengerCallbackDataEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugUtilsMessengerCallbackDataEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 DebugUtilsMessengerCallbackDataEXT(VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataFlagsEXT flags_ = {}, const char* pMessageIdName_ = {}, int32_t messageIdNumber_ = {}, const char* pMessage_ = {}, uint32_t queueLabelCount_ = {}, const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pQueueLabels_ = {}, uint32_t cmdBufLabelCount_ = {}, const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pCmdBufLabels_ = {}, uint32_t objectCount_ = {}, const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pObjects_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pMessageIdName( pMessageIdName_ ), messageIdNumber( messageIdNumber_ ), pMessage( pMessage_ ), queueLabelCount( queueLabelCount_ ), pQueueLabels( pQueueLabels_ ), cmdBufLabelCount( cmdBufLabelCount_ ), pCmdBufLabels( pCmdBufLabels_ ), objectCount( objectCount_ ), pObjects( pObjects_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 DebugUtilsMessengerCallbackDataEXT( DebugUtilsMessengerCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugUtilsMessengerCallbackDataEXT( VkDebugUtilsMessengerCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DebugUtilsMessengerCallbackDataEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataFlagsEXT flags_, const char* pMessageIdName_, int32_t messageIdNumber_, const char* pMessage_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueLabels_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & cmdBufLabels_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & objects_ = {} ) + : flags( flags_ ), pMessageIdName( pMessageIdName_ ), messageIdNumber( messageIdNumber_ ), pMessage( pMessage_ ), queueLabelCount( static_cast( queueLabels_.size() ) ), pQueueLabels( queueLabels_.data() ), cmdBufLabelCount( static_cast( cmdBufLabels_.size() ) ), pCmdBufLabels( cmdBufLabels_.data() ), objectCount( static_cast( objects_.size() ) ), pObjects( objects_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugUtilsMessengerCallbackDataEXT & operator=( VkDebugUtilsMessengerCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & operator=( DebugUtilsMessengerCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugUtilsMessengerCallbackDataEXT ) ); + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setFlags( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPMessageIdName( const char* pMessageIdName_ ) VULKAN_HPP_NOEXCEPT + { + pMessageIdName = pMessageIdName_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setMessageIdNumber( int32_t messageIdNumber_ ) VULKAN_HPP_NOEXCEPT + { + messageIdNumber = messageIdNumber_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPMessage( const char* pMessage_ ) VULKAN_HPP_NOEXCEPT + { + pMessage = pMessage_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setQueueLabelCount( uint32_t queueLabelCount_ ) VULKAN_HPP_NOEXCEPT + { + queueLabelCount = queueLabelCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPQueueLabels( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pQueueLabels_ ) VULKAN_HPP_NOEXCEPT + { + pQueueLabels = pQueueLabels_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DebugUtilsMessengerCallbackDataEXT & setQueueLabels( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueLabels_ ) VULKAN_HPP_NOEXCEPT + { + queueLabelCount = static_cast( queueLabels_.size() ); + pQueueLabels = queueLabels_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DebugUtilsMessengerCallbackDataEXT & setCmdBufLabelCount( uint32_t cmdBufLabelCount_ ) VULKAN_HPP_NOEXCEPT + { + cmdBufLabelCount = cmdBufLabelCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPCmdBufLabels( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pCmdBufLabels_ ) VULKAN_HPP_NOEXCEPT + { + pCmdBufLabels = pCmdBufLabels_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DebugUtilsMessengerCallbackDataEXT & setCmdBufLabels( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & cmdBufLabels_ ) VULKAN_HPP_NOEXCEPT + { + cmdBufLabelCount = static_cast( cmdBufLabels_.size() ); + pCmdBufLabels = cmdBufLabels_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DebugUtilsMessengerCallbackDataEXT & setObjectCount( uint32_t objectCount_ ) VULKAN_HPP_NOEXCEPT + { + objectCount = objectCount_; + return *this; + } + + DebugUtilsMessengerCallbackDataEXT & setPObjects( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pObjects_ ) VULKAN_HPP_NOEXCEPT + { + pObjects = pObjects_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DebugUtilsMessengerCallbackDataEXT & setObjects( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & objects_ ) VULKAN_HPP_NOEXCEPT + { + objectCount = static_cast( objects_.size() ); + pObjects = objects_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDebugUtilsMessengerCallbackDataEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugUtilsMessengerCallbackDataEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsMessengerCallbackDataEXT const& ) const = default; +#else + bool operator==( DebugUtilsMessengerCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pMessageIdName == rhs.pMessageIdName ) + && ( messageIdNumber == rhs.messageIdNumber ) + && ( pMessage == rhs.pMessage ) + && ( queueLabelCount == rhs.queueLabelCount ) + && ( pQueueLabels == rhs.pQueueLabels ) + && ( cmdBufLabelCount == rhs.cmdBufLabelCount ) + && ( pCmdBufLabels == rhs.pCmdBufLabels ) + && ( objectCount == rhs.objectCount ) + && ( pObjects == rhs.pObjects ); + } + + bool operator!=( DebugUtilsMessengerCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugUtilsMessengerCallbackDataEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataFlagsEXT flags = {}; + const char* pMessageIdName = {}; + int32_t messageIdNumber = {}; + const char* pMessage = {}; + uint32_t queueLabelCount = {}; + const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pQueueLabels = {}; + uint32_t cmdBufLabelCount = {}; + const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pCmdBufLabels = {}; + uint32_t objectCount = {}; + const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pObjects = {}; + + }; + static_assert( sizeof( DebugUtilsMessengerCallbackDataEXT ) == sizeof( VkDebugUtilsMessengerCallbackDataEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugUtilsMessengerCallbackDataEXT; + }; + + struct DebugUtilsMessengerCreateInfoEXT + { + static const bool allowDuplicate = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugUtilsMessengerCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerCreateInfoEXT(VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagsEXT messageSeverity_ = {}, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageType_ = {}, PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback_ = {}, void* pUserData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), messageSeverity( messageSeverity_ ), messageType( messageType_ ), pfnUserCallback( pfnUserCallback_ ), pUserData( pUserData_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerCreateInfoEXT( DebugUtilsMessengerCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugUtilsMessengerCreateInfoEXT( VkDebugUtilsMessengerCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugUtilsMessengerCreateInfoEXT & operator=( VkDebugUtilsMessengerCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & operator=( DebugUtilsMessengerCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugUtilsMessengerCreateInfoEXT ) ); + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setMessageSeverity( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagsEXT messageSeverity_ ) VULKAN_HPP_NOEXCEPT + { + messageSeverity = messageSeverity_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setMessageType( VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageType_ ) VULKAN_HPP_NOEXCEPT + { + messageType = messageType_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setPfnUserCallback( PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback_ ) VULKAN_HPP_NOEXCEPT + { + pfnUserCallback = pfnUserCallback_; + return *this; + } + + DebugUtilsMessengerCreateInfoEXT & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + + operator VkDebugUtilsMessengerCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugUtilsMessengerCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsMessengerCreateInfoEXT const& ) const = default; +#else + bool operator==( DebugUtilsMessengerCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( messageSeverity == rhs.messageSeverity ) + && ( messageType == rhs.messageType ) + && ( pfnUserCallback == rhs.pfnUserCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DebugUtilsMessengerCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugUtilsMessengerCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagsEXT messageSeverity = {}; + VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageType = {}; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback = {}; + void* pUserData = {}; + + }; + static_assert( sizeof( DebugUtilsMessengerCreateInfoEXT ) == sizeof( VkDebugUtilsMessengerCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugUtilsMessengerCreateInfoEXT; + }; + + struct DebugUtilsObjectTagInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDebugUtilsObjectTagInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DebugUtilsObjectTagInfoEXT(VULKAN_HPP_NAMESPACE::ObjectType objectType_ = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown, uint64_t objectHandle_ = {}, uint64_t tagName_ = {}, size_t tagSize_ = {}, const void* pTag_ = {}) VULKAN_HPP_NOEXCEPT + : objectType( objectType_ ), objectHandle( objectHandle_ ), tagName( tagName_ ), tagSize( tagSize_ ), pTag( pTag_ ) + {} + + VULKAN_HPP_CONSTEXPR DebugUtilsObjectTagInfoEXT( DebugUtilsObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DebugUtilsObjectTagInfoEXT( VkDebugUtilsObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + DebugUtilsObjectTagInfoEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType_, uint64_t objectHandle_, uint64_t tagName_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tag_ ) + : objectType( objectType_ ), objectHandle( objectHandle_ ), tagName( tagName_ ), tagSize( tag_.size() * sizeof(T) ), pTag( tag_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DebugUtilsObjectTagInfoEXT & operator=( VkDebugUtilsObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DebugUtilsObjectTagInfoEXT & operator=( DebugUtilsObjectTagInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DebugUtilsObjectTagInfoEXT ) ); + return *this; + } + + DebugUtilsObjectTagInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DebugUtilsObjectTagInfoEXT & setObjectType( VULKAN_HPP_NAMESPACE::ObjectType objectType_ ) VULKAN_HPP_NOEXCEPT + { + objectType = objectType_; + return *this; + } + + DebugUtilsObjectTagInfoEXT & setObjectHandle( uint64_t objectHandle_ ) VULKAN_HPP_NOEXCEPT + { + objectHandle = objectHandle_; + return *this; + } + + DebugUtilsObjectTagInfoEXT & setTagName( uint64_t tagName_ ) VULKAN_HPP_NOEXCEPT + { + tagName = tagName_; + return *this; + } + + DebugUtilsObjectTagInfoEXT & setTagSize( size_t tagSize_ ) VULKAN_HPP_NOEXCEPT + { + tagSize = tagSize_; + return *this; + } + + DebugUtilsObjectTagInfoEXT & setPTag( const void* pTag_ ) VULKAN_HPP_NOEXCEPT + { + pTag = pTag_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + DebugUtilsObjectTagInfoEXT & setTag( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tag_ ) VULKAN_HPP_NOEXCEPT + { + tagSize = tag_.size() * sizeof(T); + pTag = tag_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDebugUtilsObjectTagInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDebugUtilsObjectTagInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsObjectTagInfoEXT const& ) const = default; +#else + bool operator==( DebugUtilsObjectTagInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( tagName == rhs.tagName ) + && ( tagSize == rhs.tagSize ) + && ( pTag == rhs.pTag ); + } + + bool operator!=( DebugUtilsObjectTagInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDebugUtilsObjectTagInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown; + uint64_t objectHandle = {}; + uint64_t tagName = {}; + size_t tagSize = {}; + const void* pTag = {}; + + }; + static_assert( sizeof( DebugUtilsObjectTagInfoEXT ) == sizeof( VkDebugUtilsObjectTagInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DebugUtilsObjectTagInfoEXT; + }; + + struct DedicatedAllocationBufferCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDedicatedAllocationBufferCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DedicatedAllocationBufferCreateInfoNV(VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ = {}) VULKAN_HPP_NOEXCEPT + : dedicatedAllocation( dedicatedAllocation_ ) + {} + + VULKAN_HPP_CONSTEXPR DedicatedAllocationBufferCreateInfoNV( DedicatedAllocationBufferCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DedicatedAllocationBufferCreateInfoNV( VkDedicatedAllocationBufferCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DedicatedAllocationBufferCreateInfoNV & operator=( VkDedicatedAllocationBufferCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DedicatedAllocationBufferCreateInfoNV & operator=( DedicatedAllocationBufferCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DedicatedAllocationBufferCreateInfoNV ) ); + return *this; + } + + DedicatedAllocationBufferCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationBufferCreateInfoNV & setDedicatedAllocation( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ ) VULKAN_HPP_NOEXCEPT + { + dedicatedAllocation = dedicatedAllocation_; + return *this; + } + + + operator VkDedicatedAllocationBufferCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDedicatedAllocationBufferCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DedicatedAllocationBufferCreateInfoNV const& ) const = default; +#else + bool operator==( DedicatedAllocationBufferCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dedicatedAllocation == rhs.dedicatedAllocation ); + } + + bool operator!=( DedicatedAllocationBufferCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDedicatedAllocationBufferCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation = {}; + + }; + static_assert( sizeof( DedicatedAllocationBufferCreateInfoNV ) == sizeof( VkDedicatedAllocationBufferCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DedicatedAllocationBufferCreateInfoNV; + }; + + struct DedicatedAllocationImageCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDedicatedAllocationImageCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DedicatedAllocationImageCreateInfoNV(VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ = {}) VULKAN_HPP_NOEXCEPT + : dedicatedAllocation( dedicatedAllocation_ ) + {} + + VULKAN_HPP_CONSTEXPR DedicatedAllocationImageCreateInfoNV( DedicatedAllocationImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DedicatedAllocationImageCreateInfoNV( VkDedicatedAllocationImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DedicatedAllocationImageCreateInfoNV & operator=( VkDedicatedAllocationImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DedicatedAllocationImageCreateInfoNV & operator=( DedicatedAllocationImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DedicatedAllocationImageCreateInfoNV ) ); + return *this; + } + + DedicatedAllocationImageCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationImageCreateInfoNV & setDedicatedAllocation( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation_ ) VULKAN_HPP_NOEXCEPT + { + dedicatedAllocation = dedicatedAllocation_; + return *this; + } + + + operator VkDedicatedAllocationImageCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDedicatedAllocationImageCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DedicatedAllocationImageCreateInfoNV const& ) const = default; +#else + bool operator==( DedicatedAllocationImageCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dedicatedAllocation == rhs.dedicatedAllocation ); + } + + bool operator!=( DedicatedAllocationImageCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDedicatedAllocationImageCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocation = {}; + + }; + static_assert( sizeof( DedicatedAllocationImageCreateInfoNV ) == sizeof( VkDedicatedAllocationImageCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DedicatedAllocationImageCreateInfoNV; + }; + + struct DedicatedAllocationMemoryAllocateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDedicatedAllocationMemoryAllocateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DedicatedAllocationMemoryAllocateInfoNV(VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ), buffer( buffer_ ) + {} + + VULKAN_HPP_CONSTEXPR DedicatedAllocationMemoryAllocateInfoNV( DedicatedAllocationMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DedicatedAllocationMemoryAllocateInfoNV( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DedicatedAllocationMemoryAllocateInfoNV & operator=( VkDedicatedAllocationMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV & operator=( DedicatedAllocationMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DedicatedAllocationMemoryAllocateInfoNV ) ); + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + DedicatedAllocationMemoryAllocateInfoNV & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + + operator VkDedicatedAllocationMemoryAllocateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDedicatedAllocationMemoryAllocateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DedicatedAllocationMemoryAllocateInfoNV const& ) const = default; +#else + bool operator==( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( DedicatedAllocationMemoryAllocateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDedicatedAllocationMemoryAllocateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + + }; + static_assert( sizeof( DedicatedAllocationMemoryAllocateInfoNV ) == sizeof( VkDedicatedAllocationMemoryAllocateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DedicatedAllocationMemoryAllocateInfoNV; + }; + + struct DescriptorBufferInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorBufferInfo(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize range_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ), offset( offset_ ), range( range_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorBufferInfo( DescriptorBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorBufferInfo( VkDescriptorBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorBufferInfo & operator=( VkDescriptorBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorBufferInfo & operator=( DescriptorBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorBufferInfo ) ); + return *this; + } + + DescriptorBufferInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + DescriptorBufferInfo & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + DescriptorBufferInfo & setRange( VULKAN_HPP_NAMESPACE::DeviceSize range_ ) VULKAN_HPP_NOEXCEPT + { + range = range_; + return *this; + } + + + operator VkDescriptorBufferInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorBufferInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorBufferInfo const& ) const = default; +#else + bool operator==( DescriptorBufferInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( buffer == rhs.buffer ) + && ( offset == rhs.offset ) + && ( range == rhs.range ); + } + + bool operator!=( DescriptorBufferInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize range = {}; + + }; + static_assert( sizeof( DescriptorBufferInfo ) == sizeof( VkDescriptorBufferInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + class Sampler + { + public: + using CType = VkSampler; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eSampler; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSampler; + + public: + VULKAN_HPP_CONSTEXPR Sampler() VULKAN_HPP_NOEXCEPT + : m_sampler(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Sampler( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_sampler(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Sampler( VkSampler sampler ) VULKAN_HPP_NOEXCEPT + : m_sampler( sampler ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Sampler & operator=(VkSampler sampler) VULKAN_HPP_NOEXCEPT + { + m_sampler = sampler; + return *this; + } +#endif + + Sampler & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_sampler = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Sampler const& ) const = default; +#else + bool operator==( Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_sampler == rhs.m_sampler; + } + + bool operator!=(Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_sampler != rhs.m_sampler; + } + + bool operator<(Sampler const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_sampler < rhs.m_sampler; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSampler() const VULKAN_HPP_NOEXCEPT + { + return m_sampler; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_sampler != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_sampler == VK_NULL_HANDLE; + } + + private: + VkSampler m_sampler; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Sampler ) == sizeof( VkSampler ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Sampler; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Sampler; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Sampler; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class ImageView + { + public: + using CType = VkImageView; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eImageView; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eImageView; + + public: + VULKAN_HPP_CONSTEXPR ImageView() VULKAN_HPP_NOEXCEPT + : m_imageView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ImageView( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_imageView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ImageView( VkImageView imageView ) VULKAN_HPP_NOEXCEPT + : m_imageView( imageView ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ImageView & operator=(VkImageView imageView) VULKAN_HPP_NOEXCEPT + { + m_imageView = imageView; + return *this; + } +#endif + + ImageView & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_imageView = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageView const& ) const = default; +#else + bool operator==( ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_imageView == rhs.m_imageView; + } + + bool operator!=(ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_imageView != rhs.m_imageView; + } + + bool operator<(ImageView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_imageView < rhs.m_imageView; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkImageView() const VULKAN_HPP_NOEXCEPT + { + return m_imageView; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_imageView != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_imageView == VK_NULL_HANDLE; + } + + private: + VkImageView m_imageView; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::ImageView ) == sizeof( VkImageView ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::ImageView; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ImageView; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ImageView; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DescriptorImageInfo + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorImageInfo(VULKAN_HPP_NAMESPACE::Sampler sampler_ = {}, VULKAN_HPP_NAMESPACE::ImageView imageView_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : sampler( sampler_ ), imageView( imageView_ ), imageLayout( imageLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorImageInfo( DescriptorImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorImageInfo( VkDescriptorImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorImageInfo & operator=( VkDescriptorImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorImageInfo & operator=( DescriptorImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorImageInfo ) ); + return *this; + } + + DescriptorImageInfo & setSampler( VULKAN_HPP_NAMESPACE::Sampler sampler_ ) VULKAN_HPP_NOEXCEPT + { + sampler = sampler_; + return *this; + } + + DescriptorImageInfo & setImageView( VULKAN_HPP_NAMESPACE::ImageView imageView_ ) VULKAN_HPP_NOEXCEPT + { + imageView = imageView_; + return *this; + } + + DescriptorImageInfo & setImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout imageLayout_ ) VULKAN_HPP_NOEXCEPT + { + imageLayout = imageLayout_; + return *this; + } + + + operator VkDescriptorImageInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorImageInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorImageInfo const& ) const = default; +#else + bool operator==( DescriptorImageInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sampler == rhs.sampler ) + && ( imageView == rhs.imageView ) + && ( imageLayout == rhs.imageLayout ); + } + + bool operator!=( DescriptorImageInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Sampler sampler = {}; + VULKAN_HPP_NAMESPACE::ImageView imageView = {}; + VULKAN_HPP_NAMESPACE::ImageLayout imageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( DescriptorImageInfo ) == sizeof( VkDescriptorImageInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DescriptorPoolSize + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorPoolSize(VULKAN_HPP_NAMESPACE::DescriptorType type_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, uint32_t descriptorCount_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), descriptorCount( descriptorCount_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorPoolSize( DescriptorPoolSize const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorPoolSize( VkDescriptorPoolSize const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorPoolSize & operator=( VkDescriptorPoolSize const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorPoolSize & operator=( DescriptorPoolSize const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorPoolSize ) ); + return *this; + } + + DescriptorPoolSize & setType( VULKAN_HPP_NAMESPACE::DescriptorType type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + DescriptorPoolSize & setDescriptorCount( uint32_t descriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = descriptorCount_; + return *this; + } + + + operator VkDescriptorPoolSize const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorPoolSize &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorPoolSize const& ) const = default; +#else + bool operator==( DescriptorPoolSize const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( type == rhs.type ) + && ( descriptorCount == rhs.descriptorCount ); + } + + bool operator!=( DescriptorPoolSize const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DescriptorType type = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler; + uint32_t descriptorCount = {}; + + }; + static_assert( sizeof( DescriptorPoolSize ) == sizeof( VkDescriptorPoolSize ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DescriptorPoolCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorPoolCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorPoolCreateInfo(VULKAN_HPP_NAMESPACE::DescriptorPoolCreateFlags flags_ = {}, uint32_t maxSets_ = {}, uint32_t poolSizeCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorPoolSize* pPoolSizes_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), maxSets( maxSets_ ), poolSizeCount( poolSizeCount_ ), pPoolSizes( pPoolSizes_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorPoolCreateInfo( DescriptorPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorPoolCreateInfo( VkDescriptorPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorPoolCreateInfo( VULKAN_HPP_NAMESPACE::DescriptorPoolCreateFlags flags_, uint32_t maxSets_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & poolSizes_ ) + : flags( flags_ ), maxSets( maxSets_ ), poolSizeCount( static_cast( poolSizes_.size() ) ), pPoolSizes( poolSizes_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorPoolCreateInfo & operator=( VkDescriptorPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorPoolCreateInfo & operator=( DescriptorPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorPoolCreateInfo ) ); + return *this; + } + + DescriptorPoolCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorPoolCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::DescriptorPoolCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DescriptorPoolCreateInfo & setMaxSets( uint32_t maxSets_ ) VULKAN_HPP_NOEXCEPT + { + maxSets = maxSets_; + return *this; + } + + DescriptorPoolCreateInfo & setPoolSizeCount( uint32_t poolSizeCount_ ) VULKAN_HPP_NOEXCEPT + { + poolSizeCount = poolSizeCount_; + return *this; + } + + DescriptorPoolCreateInfo & setPPoolSizes( const VULKAN_HPP_NAMESPACE::DescriptorPoolSize* pPoolSizes_ ) VULKAN_HPP_NOEXCEPT + { + pPoolSizes = pPoolSizes_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorPoolCreateInfo & setPoolSizes( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & poolSizes_ ) VULKAN_HPP_NOEXCEPT + { + poolSizeCount = static_cast( poolSizes_.size() ); + pPoolSizes = poolSizes_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorPoolCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorPoolCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorPoolCreateInfo const& ) const = default; +#else + bool operator==( DescriptorPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( maxSets == rhs.maxSets ) + && ( poolSizeCount == rhs.poolSizeCount ) + && ( pPoolSizes == rhs.pPoolSizes ); + } + + bool operator!=( DescriptorPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorPoolCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorPoolCreateFlags flags = {}; + uint32_t maxSets = {}; + uint32_t poolSizeCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorPoolSize* pPoolSizes = {}; + + }; + static_assert( sizeof( DescriptorPoolCreateInfo ) == sizeof( VkDescriptorPoolCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorPoolCreateInfo; + }; + + struct DescriptorPoolInlineUniformBlockCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorPoolInlineUniformBlockCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorPoolInlineUniformBlockCreateInfoEXT(uint32_t maxInlineUniformBlockBindings_ = {}) VULKAN_HPP_NOEXCEPT + : maxInlineUniformBlockBindings( maxInlineUniformBlockBindings_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorPoolInlineUniformBlockCreateInfoEXT( DescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorPoolInlineUniformBlockCreateInfoEXT( VkDescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorPoolInlineUniformBlockCreateInfoEXT & operator=( VkDescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT & operator=( DescriptorPoolInlineUniformBlockCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorPoolInlineUniformBlockCreateInfoEXT ) ); + return *this; + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorPoolInlineUniformBlockCreateInfoEXT & setMaxInlineUniformBlockBindings( uint32_t maxInlineUniformBlockBindings_ ) VULKAN_HPP_NOEXCEPT + { + maxInlineUniformBlockBindings = maxInlineUniformBlockBindings_; + return *this; + } + + + operator VkDescriptorPoolInlineUniformBlockCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorPoolInlineUniformBlockCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorPoolInlineUniformBlockCreateInfoEXT const& ) const = default; +#else + bool operator==( DescriptorPoolInlineUniformBlockCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxInlineUniformBlockBindings == rhs.maxInlineUniformBlockBindings ); + } + + bool operator!=( DescriptorPoolInlineUniformBlockCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorPoolInlineUniformBlockCreateInfoEXT; + const void* pNext = {}; + uint32_t maxInlineUniformBlockBindings = {}; + + }; + static_assert( sizeof( DescriptorPoolInlineUniformBlockCreateInfoEXT ) == sizeof( VkDescriptorPoolInlineUniformBlockCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorPoolInlineUniformBlockCreateInfoEXT; + }; + + class DescriptorPool + { + public: + using CType = VkDescriptorPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorPool; + + public: + VULKAN_HPP_CONSTEXPR DescriptorPool() VULKAN_HPP_NOEXCEPT + : m_descriptorPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_descriptorPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorPool( VkDescriptorPool descriptorPool ) VULKAN_HPP_NOEXCEPT + : m_descriptorPool( descriptorPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorPool & operator=(VkDescriptorPool descriptorPool) VULKAN_HPP_NOEXCEPT + { + m_descriptorPool = descriptorPool; + return *this; + } +#endif + + DescriptorPool & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_descriptorPool = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorPool const& ) const = default; +#else + bool operator==( DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool == rhs.m_descriptorPool; + } + + bool operator!=(DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool != rhs.m_descriptorPool; + } + + bool operator<(DescriptorPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool < rhs.m_descriptorPool; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorPool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorPool == VK_NULL_HANDLE; + } + + private: + VkDescriptorPool m_descriptorPool; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DescriptorPool ) == sizeof( VkDescriptorPool ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DescriptorPool; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorPool; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorPool; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class DescriptorSetLayout + { + public: + using CType = VkDescriptorSetLayout; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorSetLayout; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorSetLayout; + + public: + VULKAN_HPP_CONSTEXPR DescriptorSetLayout() VULKAN_HPP_NOEXCEPT + : m_descriptorSetLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayout( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_descriptorSetLayout(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorSetLayout( VkDescriptorSetLayout descriptorSetLayout ) VULKAN_HPP_NOEXCEPT + : m_descriptorSetLayout( descriptorSetLayout ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorSetLayout & operator=(VkDescriptorSetLayout descriptorSetLayout) VULKAN_HPP_NOEXCEPT + { + m_descriptorSetLayout = descriptorSetLayout; + return *this; + } +#endif + + DescriptorSetLayout & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_descriptorSetLayout = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetLayout const& ) const = default; +#else + bool operator==( DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout == rhs.m_descriptorSetLayout; + } + + bool operator!=(DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout != rhs.m_descriptorSetLayout; + } + + bool operator<(DescriptorSetLayout const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout < rhs.m_descriptorSetLayout; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorSetLayout() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorSetLayout == VK_NULL_HANDLE; + } + + private: + VkDescriptorSetLayout m_descriptorSetLayout; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DescriptorSetLayout ) == sizeof( VkDescriptorSetLayout ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DescriptorSetLayout; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorSetLayout; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorSetLayout; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DescriptorSetAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetAllocateInfo(VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool_ = {}, uint32_t descriptorSetCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts_ = {}) VULKAN_HPP_NOEXCEPT + : descriptorPool( descriptorPool_ ), descriptorSetCount( descriptorSetCount_ ), pSetLayouts( pSetLayouts_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetAllocateInfo( DescriptorSetAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetAllocateInfo( VkDescriptorSetAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetAllocateInfo( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & setLayouts_ ) + : descriptorPool( descriptorPool_ ), descriptorSetCount( static_cast( setLayouts_.size() ) ), pSetLayouts( setLayouts_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetAllocateInfo & operator=( VkDescriptorSetAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetAllocateInfo & operator=( DescriptorSetAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetAllocateInfo ) ); + return *this; + } + + DescriptorSetAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorSetAllocateInfo & setDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool_ ) VULKAN_HPP_NOEXCEPT + { + descriptorPool = descriptorPool_; + return *this; + } + + DescriptorSetAllocateInfo & setDescriptorSetCount( uint32_t descriptorSetCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorSetCount = descriptorSetCount_; + return *this; + } + + DescriptorSetAllocateInfo & setPSetLayouts( const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts_ ) VULKAN_HPP_NOEXCEPT + { + pSetLayouts = pSetLayouts_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetAllocateInfo & setSetLayouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & setLayouts_ ) VULKAN_HPP_NOEXCEPT + { + descriptorSetCount = static_cast( setLayouts_.size() ); + pSetLayouts = setLayouts_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorSetAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetAllocateInfo const& ) const = default; +#else + bool operator==( DescriptorSetAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( descriptorPool == rhs.descriptorPool ) + && ( descriptorSetCount == rhs.descriptorSetCount ) + && ( pSetLayouts == rhs.pSetLayouts ); + } + + bool operator!=( DescriptorSetAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetAllocateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool = {}; + uint32_t descriptorSetCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts = {}; + + }; + static_assert( sizeof( DescriptorSetAllocateInfo ) == sizeof( VkDescriptorSetAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetAllocateInfo; + }; + + struct DescriptorSetLayoutBinding + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutBinding(uint32_t binding_ = {}, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, uint32_t descriptorCount_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, const VULKAN_HPP_NAMESPACE::Sampler* pImmutableSamplers_ = {}) VULKAN_HPP_NOEXCEPT + : binding( binding_ ), descriptorType( descriptorType_ ), descriptorCount( descriptorCount_ ), stageFlags( stageFlags_ ), pImmutableSamplers( pImmutableSamplers_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutBinding( DescriptorSetLayoutBinding const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetLayoutBinding( VkDescriptorSetLayoutBinding const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutBinding( uint32_t binding_, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & immutableSamplers_ ) + : binding( binding_ ), descriptorType( descriptorType_ ), descriptorCount( static_cast( immutableSamplers_.size() ) ), stageFlags( stageFlags_ ), pImmutableSamplers( immutableSamplers_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetLayoutBinding & operator=( VkDescriptorSetLayoutBinding const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetLayoutBinding & operator=( DescriptorSetLayoutBinding const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetLayoutBinding ) ); + return *this; + } + + DescriptorSetLayoutBinding & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT + { + binding = binding_; + return *this; + } + + DescriptorSetLayoutBinding & setDescriptorType( VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ ) VULKAN_HPP_NOEXCEPT + { + descriptorType = descriptorType_; + return *this; + } + + DescriptorSetLayoutBinding & setDescriptorCount( uint32_t descriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = descriptorCount_; + return *this; + } + + DescriptorSetLayoutBinding & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT + { + stageFlags = stageFlags_; + return *this; + } + + DescriptorSetLayoutBinding & setPImmutableSamplers( const VULKAN_HPP_NAMESPACE::Sampler* pImmutableSamplers_ ) VULKAN_HPP_NOEXCEPT + { + pImmutableSamplers = pImmutableSamplers_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutBinding & setImmutableSamplers( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & immutableSamplers_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = static_cast( immutableSamplers_.size() ); + pImmutableSamplers = immutableSamplers_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorSetLayoutBinding const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetLayoutBinding &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetLayoutBinding const& ) const = default; +#else + bool operator==( DescriptorSetLayoutBinding const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( binding == rhs.binding ) + && ( descriptorType == rhs.descriptorType ) + && ( descriptorCount == rhs.descriptorCount ) + && ( stageFlags == rhs.stageFlags ) + && ( pImmutableSamplers == rhs.pImmutableSamplers ); + } + + bool operator!=( DescriptorSetLayoutBinding const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t binding = {}; + VULKAN_HPP_NAMESPACE::DescriptorType descriptorType = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler; + uint32_t descriptorCount = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags = {}; + const VULKAN_HPP_NAMESPACE::Sampler* pImmutableSamplers = {}; + + }; + static_assert( sizeof( DescriptorSetLayoutBinding ) == sizeof( VkDescriptorSetLayoutBinding ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DescriptorSetLayoutBindingFlagsCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetLayoutBindingFlagsCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutBindingFlagsCreateInfo(uint32_t bindingCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorBindingFlags* pBindingFlags_ = {}) VULKAN_HPP_NOEXCEPT + : bindingCount( bindingCount_ ), pBindingFlags( pBindingFlags_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutBindingFlagsCreateInfo( DescriptorSetLayoutBindingFlagsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetLayoutBindingFlagsCreateInfo( VkDescriptorSetLayoutBindingFlagsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutBindingFlagsCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bindingFlags_ ) + : bindingCount( static_cast( bindingFlags_.size() ) ), pBindingFlags( bindingFlags_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetLayoutBindingFlagsCreateInfo & operator=( VkDescriptorSetLayoutBindingFlagsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfo & operator=( DescriptorSetLayoutBindingFlagsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetLayoutBindingFlagsCreateInfo ) ); + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfo & setBindingCount( uint32_t bindingCount_ ) VULKAN_HPP_NOEXCEPT + { + bindingCount = bindingCount_; + return *this; + } + + DescriptorSetLayoutBindingFlagsCreateInfo & setPBindingFlags( const VULKAN_HPP_NAMESPACE::DescriptorBindingFlags* pBindingFlags_ ) VULKAN_HPP_NOEXCEPT + { + pBindingFlags = pBindingFlags_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutBindingFlagsCreateInfo & setBindingFlags( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bindingFlags_ ) VULKAN_HPP_NOEXCEPT + { + bindingCount = static_cast( bindingFlags_.size() ); + pBindingFlags = bindingFlags_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorSetLayoutBindingFlagsCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetLayoutBindingFlagsCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetLayoutBindingFlagsCreateInfo const& ) const = default; +#else + bool operator==( DescriptorSetLayoutBindingFlagsCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( bindingCount == rhs.bindingCount ) + && ( pBindingFlags == rhs.pBindingFlags ); + } + + bool operator!=( DescriptorSetLayoutBindingFlagsCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetLayoutBindingFlagsCreateInfo; + const void* pNext = {}; + uint32_t bindingCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorBindingFlags* pBindingFlags = {}; + + }; + static_assert( sizeof( DescriptorSetLayoutBindingFlagsCreateInfo ) == sizeof( VkDescriptorSetLayoutBindingFlagsCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetLayoutBindingFlagsCreateInfo; + }; + using DescriptorSetLayoutBindingFlagsCreateInfoEXT = DescriptorSetLayoutBindingFlagsCreateInfo; + + struct DescriptorSetLayoutCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetLayoutCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateInfo(VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateFlags flags_ = {}, uint32_t bindingCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutBinding* pBindings_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), bindingCount( bindingCount_ ), pBindings( pBindings_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutCreateInfo( DescriptorSetLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetLayoutCreateInfo( VkDescriptorSetLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutCreateInfo( VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bindings_ ) + : flags( flags_ ), bindingCount( static_cast( bindings_.size() ) ), pBindings( bindings_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetLayoutCreateInfo & operator=( VkDescriptorSetLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetLayoutCreateInfo & operator=( DescriptorSetLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetLayoutCreateInfo ) ); + return *this; + } + + DescriptorSetLayoutCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorSetLayoutCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DescriptorSetLayoutCreateInfo & setBindingCount( uint32_t bindingCount_ ) VULKAN_HPP_NOEXCEPT + { + bindingCount = bindingCount_; + return *this; + } + + DescriptorSetLayoutCreateInfo & setPBindings( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutBinding* pBindings_ ) VULKAN_HPP_NOEXCEPT + { + pBindings = pBindings_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetLayoutCreateInfo & setBindings( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bindings_ ) VULKAN_HPP_NOEXCEPT + { + bindingCount = static_cast( bindings_.size() ); + pBindings = bindings_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorSetLayoutCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetLayoutCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetLayoutCreateInfo const& ) const = default; +#else + bool operator==( DescriptorSetLayoutCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( bindingCount == rhs.bindingCount ) + && ( pBindings == rhs.pBindings ); + } + + bool operator!=( DescriptorSetLayoutCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetLayoutCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateFlags flags = {}; + uint32_t bindingCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutBinding* pBindings = {}; + + }; + static_assert( sizeof( DescriptorSetLayoutCreateInfo ) == sizeof( VkDescriptorSetLayoutCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetLayoutCreateInfo; + }; + + struct DescriptorSetLayoutSupport + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetLayoutSupport; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutSupport(VULKAN_HPP_NAMESPACE::Bool32 supported_ = {}) VULKAN_HPP_NOEXCEPT + : supported( supported_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetLayoutSupport( DescriptorSetLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetLayoutSupport( VkDescriptorSetLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetLayoutSupport & operator=( VkDescriptorSetLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetLayoutSupport & operator=( DescriptorSetLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetLayoutSupport ) ); + return *this; + } + + + operator VkDescriptorSetLayoutSupport const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetLayoutSupport &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetLayoutSupport const& ) const = default; +#else + bool operator==( DescriptorSetLayoutSupport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supported == rhs.supported ); + } + + bool operator!=( DescriptorSetLayoutSupport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetLayoutSupport; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 supported = {}; + + }; + static_assert( sizeof( DescriptorSetLayoutSupport ) == sizeof( VkDescriptorSetLayoutSupport ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetLayoutSupport; + }; + using DescriptorSetLayoutSupportKHR = DescriptorSetLayoutSupport; + + struct DescriptorSetVariableDescriptorCountAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetVariableDescriptorCountAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountAllocateInfo(uint32_t descriptorSetCount_ = {}, const uint32_t* pDescriptorCounts_ = {}) VULKAN_HPP_NOEXCEPT + : descriptorSetCount( descriptorSetCount_ ), pDescriptorCounts( pDescriptorCounts_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountAllocateInfo( DescriptorSetVariableDescriptorCountAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetVariableDescriptorCountAllocateInfo( VkDescriptorSetVariableDescriptorCountAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetVariableDescriptorCountAllocateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorCounts_ ) + : descriptorSetCount( static_cast( descriptorCounts_.size() ) ), pDescriptorCounts( descriptorCounts_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetVariableDescriptorCountAllocateInfo & operator=( VkDescriptorSetVariableDescriptorCountAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfo & operator=( DescriptorSetVariableDescriptorCountAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetVariableDescriptorCountAllocateInfo ) ); + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfo & setDescriptorSetCount( uint32_t descriptorSetCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorSetCount = descriptorSetCount_; + return *this; + } + + DescriptorSetVariableDescriptorCountAllocateInfo & setPDescriptorCounts( const uint32_t* pDescriptorCounts_ ) VULKAN_HPP_NOEXCEPT + { + pDescriptorCounts = pDescriptorCounts_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorSetVariableDescriptorCountAllocateInfo & setDescriptorCounts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorCounts_ ) VULKAN_HPP_NOEXCEPT + { + descriptorSetCount = static_cast( descriptorCounts_.size() ); + pDescriptorCounts = descriptorCounts_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDescriptorSetVariableDescriptorCountAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetVariableDescriptorCountAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetVariableDescriptorCountAllocateInfo const& ) const = default; +#else + bool operator==( DescriptorSetVariableDescriptorCountAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( descriptorSetCount == rhs.descriptorSetCount ) + && ( pDescriptorCounts == rhs.pDescriptorCounts ); + } + + bool operator!=( DescriptorSetVariableDescriptorCountAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetVariableDescriptorCountAllocateInfo; + const void* pNext = {}; + uint32_t descriptorSetCount = {}; + const uint32_t* pDescriptorCounts = {}; + + }; + static_assert( sizeof( DescriptorSetVariableDescriptorCountAllocateInfo ) == sizeof( VkDescriptorSetVariableDescriptorCountAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetVariableDescriptorCountAllocateInfo; + }; + using DescriptorSetVariableDescriptorCountAllocateInfoEXT = DescriptorSetVariableDescriptorCountAllocateInfo; + + struct DescriptorSetVariableDescriptorCountLayoutSupport + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorSetVariableDescriptorCountLayoutSupport; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountLayoutSupport(uint32_t maxVariableDescriptorCount_ = {}) VULKAN_HPP_NOEXCEPT + : maxVariableDescriptorCount( maxVariableDescriptorCount_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorSetVariableDescriptorCountLayoutSupport( DescriptorSetVariableDescriptorCountLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorSetVariableDescriptorCountLayoutSupport( VkDescriptorSetVariableDescriptorCountLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorSetVariableDescriptorCountLayoutSupport & operator=( VkDescriptorSetVariableDescriptorCountLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorSetVariableDescriptorCountLayoutSupport & operator=( DescriptorSetVariableDescriptorCountLayoutSupport const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorSetVariableDescriptorCountLayoutSupport ) ); + return *this; + } + + + operator VkDescriptorSetVariableDescriptorCountLayoutSupport const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorSetVariableDescriptorCountLayoutSupport &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorSetVariableDescriptorCountLayoutSupport const& ) const = default; +#else + bool operator==( DescriptorSetVariableDescriptorCountLayoutSupport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxVariableDescriptorCount == rhs.maxVariableDescriptorCount ); + } + + bool operator!=( DescriptorSetVariableDescriptorCountLayoutSupport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorSetVariableDescriptorCountLayoutSupport; + void* pNext = {}; + uint32_t maxVariableDescriptorCount = {}; + + }; + static_assert( sizeof( DescriptorSetVariableDescriptorCountLayoutSupport ) == sizeof( VkDescriptorSetVariableDescriptorCountLayoutSupport ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorSetVariableDescriptorCountLayoutSupport; + }; + using DescriptorSetVariableDescriptorCountLayoutSupportEXT = DescriptorSetVariableDescriptorCountLayoutSupport; + + struct DescriptorUpdateTemplateEntry + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplateEntry(uint32_t dstBinding_ = {}, uint32_t dstArrayElement_ = {}, uint32_t descriptorCount_ = {}, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, size_t offset_ = {}, size_t stride_ = {}) VULKAN_HPP_NOEXCEPT + : dstBinding( dstBinding_ ), dstArrayElement( dstArrayElement_ ), descriptorCount( descriptorCount_ ), descriptorType( descriptorType_ ), offset( offset_ ), stride( stride_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplateEntry( DescriptorUpdateTemplateEntry const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorUpdateTemplateEntry( VkDescriptorUpdateTemplateEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorUpdateTemplateEntry & operator=( VkDescriptorUpdateTemplateEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorUpdateTemplateEntry & operator=( DescriptorUpdateTemplateEntry const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorUpdateTemplateEntry ) ); + return *this; + } + + DescriptorUpdateTemplateEntry & setDstBinding( uint32_t dstBinding_ ) VULKAN_HPP_NOEXCEPT + { + dstBinding = dstBinding_; + return *this; + } + + DescriptorUpdateTemplateEntry & setDstArrayElement( uint32_t dstArrayElement_ ) VULKAN_HPP_NOEXCEPT + { + dstArrayElement = dstArrayElement_; + return *this; + } + + DescriptorUpdateTemplateEntry & setDescriptorCount( uint32_t descriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = descriptorCount_; + return *this; + } + + DescriptorUpdateTemplateEntry & setDescriptorType( VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ ) VULKAN_HPP_NOEXCEPT + { + descriptorType = descriptorType_; + return *this; + } + + DescriptorUpdateTemplateEntry & setOffset( size_t offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + DescriptorUpdateTemplateEntry & setStride( size_t stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + + operator VkDescriptorUpdateTemplateEntry const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorUpdateTemplateEntry &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorUpdateTemplateEntry const& ) const = default; +#else + bool operator==( DescriptorUpdateTemplateEntry const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ) + && ( descriptorType == rhs.descriptorType ) + && ( offset == rhs.offset ) + && ( stride == rhs.stride ); + } + + bool operator!=( DescriptorUpdateTemplateEntry const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t dstBinding = {}; + uint32_t dstArrayElement = {}; + uint32_t descriptorCount = {}; + VULKAN_HPP_NAMESPACE::DescriptorType descriptorType = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler; + size_t offset = {}; + size_t stride = {}; + + }; + static_assert( sizeof( DescriptorUpdateTemplateEntry ) == sizeof( VkDescriptorUpdateTemplateEntry ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using DescriptorUpdateTemplateEntryKHR = DescriptorUpdateTemplateEntry; + + struct DescriptorUpdateTemplateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDescriptorUpdateTemplateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplateCreateInfo(VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlags flags_ = {}, uint32_t descriptorUpdateEntryCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries_ = {}, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType templateType_ = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType::eDescriptorSet, VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout_ = {}, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout_ = {}, uint32_t set_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), descriptorUpdateEntryCount( descriptorUpdateEntryCount_ ), pDescriptorUpdateEntries( pDescriptorUpdateEntries_ ), templateType( templateType_ ), descriptorSetLayout( descriptorSetLayout_ ), pipelineBindPoint( pipelineBindPoint_ ), pipelineLayout( pipelineLayout_ ), set( set_ ) + {} + + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplateCreateInfo( DescriptorUpdateTemplateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DescriptorUpdateTemplateCreateInfo( VkDescriptorUpdateTemplateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorUpdateTemplateCreateInfo( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorUpdateEntries_, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType templateType_ = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType::eDescriptorSet, VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout_ = {}, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout_ = {}, uint32_t set_ = {} ) + : flags( flags_ ), descriptorUpdateEntryCount( static_cast( descriptorUpdateEntries_.size() ) ), pDescriptorUpdateEntries( descriptorUpdateEntries_.data() ), templateType( templateType_ ), descriptorSetLayout( descriptorSetLayout_ ), pipelineBindPoint( pipelineBindPoint_ ), pipelineLayout( pipelineLayout_ ), set( set_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DescriptorUpdateTemplateCreateInfo & operator=( VkDescriptorUpdateTemplateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DescriptorUpdateTemplateCreateInfo & operator=( DescriptorUpdateTemplateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DescriptorUpdateTemplateCreateInfo ) ); + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setDescriptorUpdateEntryCount( uint32_t descriptorUpdateEntryCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorUpdateEntryCount = descriptorUpdateEntryCount_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setPDescriptorUpdateEntries( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries_ ) VULKAN_HPP_NOEXCEPT + { + pDescriptorUpdateEntries = pDescriptorUpdateEntries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DescriptorUpdateTemplateCreateInfo & setDescriptorUpdateEntries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & descriptorUpdateEntries_ ) VULKAN_HPP_NOEXCEPT + { + descriptorUpdateEntryCount = static_cast( descriptorUpdateEntries_.size() ); + pDescriptorUpdateEntries = descriptorUpdateEntries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DescriptorUpdateTemplateCreateInfo & setTemplateType( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType templateType_ ) VULKAN_HPP_NOEXCEPT + { + templateType = templateType_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout_ ) VULKAN_HPP_NOEXCEPT + { + descriptorSetLayout = descriptorSetLayout_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout_ ) VULKAN_HPP_NOEXCEPT + { + pipelineLayout = pipelineLayout_; + return *this; + } + + DescriptorUpdateTemplateCreateInfo & setSet( uint32_t set_ ) VULKAN_HPP_NOEXCEPT + { + set = set_; + return *this; + } + + + operator VkDescriptorUpdateTemplateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDescriptorUpdateTemplateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorUpdateTemplateCreateInfo const& ) const = default; +#else + bool operator==( DescriptorUpdateTemplateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( descriptorUpdateEntryCount == rhs.descriptorUpdateEntryCount ) + && ( pDescriptorUpdateEntries == rhs.pDescriptorUpdateEntries ) + && ( templateType == rhs.templateType ) + && ( descriptorSetLayout == rhs.descriptorSetLayout ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( pipelineLayout == rhs.pipelineLayout ) + && ( set == rhs.set ); + } + + bool operator!=( DescriptorUpdateTemplateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDescriptorUpdateTemplateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateFlags flags = {}; + uint32_t descriptorUpdateEntryCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateEntry* pDescriptorUpdateEntries = {}; + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType templateType = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateType::eDescriptorSet; + VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout = {}; + uint32_t set = {}; + + }; + static_assert( sizeof( DescriptorUpdateTemplateCreateInfo ) == sizeof( VkDescriptorUpdateTemplateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DescriptorUpdateTemplateCreateInfo; + }; + using DescriptorUpdateTemplateCreateInfoKHR = DescriptorUpdateTemplateCreateInfo; + + struct DeviceQueueCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceQueueCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceQueueCreateInfo(VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags_ = {}, uint32_t queueFamilyIndex_ = {}, uint32_t queueCount_ = {}, const float* pQueuePriorities_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), queueFamilyIndex( queueFamilyIndex_ ), queueCount( queueCount_ ), pQueuePriorities( pQueuePriorities_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceQueueCreateInfo( DeviceQueueCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceQueueCreateInfo( VkDeviceQueueCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceQueueCreateInfo( VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags_, uint32_t queueFamilyIndex_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queuePriorities_ ) + : flags( flags_ ), queueFamilyIndex( queueFamilyIndex_ ), queueCount( static_cast( queuePriorities_.size() ) ), pQueuePriorities( queuePriorities_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceQueueCreateInfo & operator=( VkDeviceQueueCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceQueueCreateInfo & operator=( DeviceQueueCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceQueueCreateInfo ) ); + return *this; + } + + DeviceQueueCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceQueueCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DeviceQueueCreateInfo & setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + DeviceQueueCreateInfo & setQueueCount( uint32_t queueCount_ ) VULKAN_HPP_NOEXCEPT + { + queueCount = queueCount_; + return *this; + } + + DeviceQueueCreateInfo & setPQueuePriorities( const float* pQueuePriorities_ ) VULKAN_HPP_NOEXCEPT + { + pQueuePriorities = pQueuePriorities_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceQueueCreateInfo & setQueuePriorities( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queuePriorities_ ) VULKAN_HPP_NOEXCEPT + { + queueCount = static_cast( queuePriorities_.size() ); + pQueuePriorities = queuePriorities_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDeviceQueueCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceQueueCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceQueueCreateInfo const& ) const = default; +#else + bool operator==( DeviceQueueCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ) + && ( queueCount == rhs.queueCount ) + && ( pQueuePriorities == rhs.pQueuePriorities ); + } + + bool operator!=( DeviceQueueCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceQueueCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags = {}; + uint32_t queueFamilyIndex = {}; + uint32_t queueCount = {}; + const float* pQueuePriorities = {}; + + }; + static_assert( sizeof( DeviceQueueCreateInfo ) == sizeof( VkDeviceQueueCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceQueueCreateInfo; + }; + + struct PhysicalDeviceFeatures + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFeatures(VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fullDrawIndexUint32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 imageCubeArray_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentBlend_ = {}, VULKAN_HPP_NAMESPACE::Bool32 geometryShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 tessellationShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sampleRateShading_ = {}, VULKAN_HPP_NAMESPACE::Bool32 dualSrcBlend_ = {}, VULKAN_HPP_NAMESPACE::Bool32 logicOp_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiDrawIndirect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 drawIndirectFirstInstance_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthClamp_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthBiasClamp_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fillModeNonSolid_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthBounds_ = {}, VULKAN_HPP_NAMESPACE::Bool32 wideLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 largePoints_ = {}, VULKAN_HPP_NAMESPACE::Bool32 alphaToOne_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiViewport_ = {}, VULKAN_HPP_NAMESPACE::Bool32 samplerAnisotropy_ = {}, VULKAN_HPP_NAMESPACE::Bool32 textureCompressionETC2_ = {}, VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_LDR_ = {}, VULKAN_HPP_NAMESPACE::Bool32 textureCompressionBC_ = {}, VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryPrecise_ = {}, VULKAN_HPP_NAMESPACE::Bool32 pipelineStatisticsQuery_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexPipelineStoresAndAtomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentStoresAndAtomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderTessellationAndGeometryPointSize_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderImageGatherExtended_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageExtendedFormats_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageMultisample_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageReadWithoutFormat_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageWriteWithoutFormat_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderClipDistance_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderCullDistance_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInt64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInt16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderResourceResidency_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderResourceMinLod_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseBinding_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyBuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage2D_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage3D_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidency2Samples_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidency4Samples_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidency8Samples_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidency16Samples_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyAliased_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variableMultisampleRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 inheritedQueries_ = {}) VULKAN_HPP_NOEXCEPT + : robustBufferAccess( robustBufferAccess_ ), fullDrawIndexUint32( fullDrawIndexUint32_ ), imageCubeArray( imageCubeArray_ ), independentBlend( independentBlend_ ), geometryShader( geometryShader_ ), tessellationShader( tessellationShader_ ), sampleRateShading( sampleRateShading_ ), dualSrcBlend( dualSrcBlend_ ), logicOp( logicOp_ ), multiDrawIndirect( multiDrawIndirect_ ), drawIndirectFirstInstance( drawIndirectFirstInstance_ ), depthClamp( depthClamp_ ), depthBiasClamp( depthBiasClamp_ ), fillModeNonSolid( fillModeNonSolid_ ), depthBounds( depthBounds_ ), wideLines( wideLines_ ), largePoints( largePoints_ ), alphaToOne( alphaToOne_ ), multiViewport( multiViewport_ ), samplerAnisotropy( samplerAnisotropy_ ), textureCompressionETC2( textureCompressionETC2_ ), textureCompressionASTC_LDR( textureCompressionASTC_LDR_ ), textureCompressionBC( textureCompressionBC_ ), occlusionQueryPrecise( occlusionQueryPrecise_ ), pipelineStatisticsQuery( pipelineStatisticsQuery_ ), vertexPipelineStoresAndAtomics( vertexPipelineStoresAndAtomics_ ), fragmentStoresAndAtomics( fragmentStoresAndAtomics_ ), shaderTessellationAndGeometryPointSize( shaderTessellationAndGeometryPointSize_ ), shaderImageGatherExtended( shaderImageGatherExtended_ ), shaderStorageImageExtendedFormats( shaderStorageImageExtendedFormats_ ), shaderStorageImageMultisample( shaderStorageImageMultisample_ ), shaderStorageImageReadWithoutFormat( shaderStorageImageReadWithoutFormat_ ), shaderStorageImageWriteWithoutFormat( shaderStorageImageWriteWithoutFormat_ ), shaderUniformBufferArrayDynamicIndexing( shaderUniformBufferArrayDynamicIndexing_ ), shaderSampledImageArrayDynamicIndexing( shaderSampledImageArrayDynamicIndexing_ ), shaderStorageBufferArrayDynamicIndexing( shaderStorageBufferArrayDynamicIndexing_ ), shaderStorageImageArrayDynamicIndexing( shaderStorageImageArrayDynamicIndexing_ ), shaderClipDistance( shaderClipDistance_ ), shaderCullDistance( shaderCullDistance_ ), shaderFloat64( shaderFloat64_ ), shaderInt64( shaderInt64_ ), shaderInt16( shaderInt16_ ), shaderResourceResidency( shaderResourceResidency_ ), shaderResourceMinLod( shaderResourceMinLod_ ), sparseBinding( sparseBinding_ ), sparseResidencyBuffer( sparseResidencyBuffer_ ), sparseResidencyImage2D( sparseResidencyImage2D_ ), sparseResidencyImage3D( sparseResidencyImage3D_ ), sparseResidency2Samples( sparseResidency2Samples_ ), sparseResidency4Samples( sparseResidency4Samples_ ), sparseResidency8Samples( sparseResidency8Samples_ ), sparseResidency16Samples( sparseResidency16Samples_ ), sparseResidencyAliased( sparseResidencyAliased_ ), variableMultisampleRate( variableMultisampleRate_ ), inheritedQueries( inheritedQueries_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFeatures( PhysicalDeviceFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFeatures( VkPhysicalDeviceFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFeatures & operator=( VkPhysicalDeviceFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFeatures & operator=( PhysicalDeviceFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFeatures ) ); + return *this; + } + + PhysicalDeviceFeatures & setRobustBufferAccess( VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess_ ) VULKAN_HPP_NOEXCEPT + { + robustBufferAccess = robustBufferAccess_; + return *this; + } + + PhysicalDeviceFeatures & setFullDrawIndexUint32( VULKAN_HPP_NAMESPACE::Bool32 fullDrawIndexUint32_ ) VULKAN_HPP_NOEXCEPT + { + fullDrawIndexUint32 = fullDrawIndexUint32_; + return *this; + } + + PhysicalDeviceFeatures & setImageCubeArray( VULKAN_HPP_NAMESPACE::Bool32 imageCubeArray_ ) VULKAN_HPP_NOEXCEPT + { + imageCubeArray = imageCubeArray_; + return *this; + } + + PhysicalDeviceFeatures & setIndependentBlend( VULKAN_HPP_NAMESPACE::Bool32 independentBlend_ ) VULKAN_HPP_NOEXCEPT + { + independentBlend = independentBlend_; + return *this; + } + + PhysicalDeviceFeatures & setGeometryShader( VULKAN_HPP_NAMESPACE::Bool32 geometryShader_ ) VULKAN_HPP_NOEXCEPT + { + geometryShader = geometryShader_; + return *this; + } + + PhysicalDeviceFeatures & setTessellationShader( VULKAN_HPP_NAMESPACE::Bool32 tessellationShader_ ) VULKAN_HPP_NOEXCEPT + { + tessellationShader = tessellationShader_; + return *this; + } + + PhysicalDeviceFeatures & setSampleRateShading( VULKAN_HPP_NAMESPACE::Bool32 sampleRateShading_ ) VULKAN_HPP_NOEXCEPT + { + sampleRateShading = sampleRateShading_; + return *this; + } + + PhysicalDeviceFeatures & setDualSrcBlend( VULKAN_HPP_NAMESPACE::Bool32 dualSrcBlend_ ) VULKAN_HPP_NOEXCEPT + { + dualSrcBlend = dualSrcBlend_; + return *this; + } + + PhysicalDeviceFeatures & setLogicOp( VULKAN_HPP_NAMESPACE::Bool32 logicOp_ ) VULKAN_HPP_NOEXCEPT + { + logicOp = logicOp_; + return *this; + } + + PhysicalDeviceFeatures & setMultiDrawIndirect( VULKAN_HPP_NAMESPACE::Bool32 multiDrawIndirect_ ) VULKAN_HPP_NOEXCEPT + { + multiDrawIndirect = multiDrawIndirect_; + return *this; + } + + PhysicalDeviceFeatures & setDrawIndirectFirstInstance( VULKAN_HPP_NAMESPACE::Bool32 drawIndirectFirstInstance_ ) VULKAN_HPP_NOEXCEPT + { + drawIndirectFirstInstance = drawIndirectFirstInstance_; + return *this; + } + + PhysicalDeviceFeatures & setDepthClamp( VULKAN_HPP_NAMESPACE::Bool32 depthClamp_ ) VULKAN_HPP_NOEXCEPT + { + depthClamp = depthClamp_; + return *this; + } + + PhysicalDeviceFeatures & setDepthBiasClamp( VULKAN_HPP_NAMESPACE::Bool32 depthBiasClamp_ ) VULKAN_HPP_NOEXCEPT + { + depthBiasClamp = depthBiasClamp_; + return *this; + } + + PhysicalDeviceFeatures & setFillModeNonSolid( VULKAN_HPP_NAMESPACE::Bool32 fillModeNonSolid_ ) VULKAN_HPP_NOEXCEPT + { + fillModeNonSolid = fillModeNonSolid_; + return *this; + } + + PhysicalDeviceFeatures & setDepthBounds( VULKAN_HPP_NAMESPACE::Bool32 depthBounds_ ) VULKAN_HPP_NOEXCEPT + { + depthBounds = depthBounds_; + return *this; + } + + PhysicalDeviceFeatures & setWideLines( VULKAN_HPP_NAMESPACE::Bool32 wideLines_ ) VULKAN_HPP_NOEXCEPT + { + wideLines = wideLines_; + return *this; + } + + PhysicalDeviceFeatures & setLargePoints( VULKAN_HPP_NAMESPACE::Bool32 largePoints_ ) VULKAN_HPP_NOEXCEPT + { + largePoints = largePoints_; + return *this; + } + + PhysicalDeviceFeatures & setAlphaToOne( VULKAN_HPP_NAMESPACE::Bool32 alphaToOne_ ) VULKAN_HPP_NOEXCEPT + { + alphaToOne = alphaToOne_; + return *this; + } + + PhysicalDeviceFeatures & setMultiViewport( VULKAN_HPP_NAMESPACE::Bool32 multiViewport_ ) VULKAN_HPP_NOEXCEPT + { + multiViewport = multiViewport_; + return *this; + } + + PhysicalDeviceFeatures & setSamplerAnisotropy( VULKAN_HPP_NAMESPACE::Bool32 samplerAnisotropy_ ) VULKAN_HPP_NOEXCEPT + { + samplerAnisotropy = samplerAnisotropy_; + return *this; + } + + PhysicalDeviceFeatures & setTextureCompressionETC2( VULKAN_HPP_NAMESPACE::Bool32 textureCompressionETC2_ ) VULKAN_HPP_NOEXCEPT + { + textureCompressionETC2 = textureCompressionETC2_; + return *this; + } + + PhysicalDeviceFeatures & setTextureCompressionASTC_LDR( VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_LDR_ ) VULKAN_HPP_NOEXCEPT + { + textureCompressionASTC_LDR = textureCompressionASTC_LDR_; + return *this; + } + + PhysicalDeviceFeatures & setTextureCompressionBC( VULKAN_HPP_NAMESPACE::Bool32 textureCompressionBC_ ) VULKAN_HPP_NOEXCEPT + { + textureCompressionBC = textureCompressionBC_; + return *this; + } + + PhysicalDeviceFeatures & setOcclusionQueryPrecise( VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryPrecise_ ) VULKAN_HPP_NOEXCEPT + { + occlusionQueryPrecise = occlusionQueryPrecise_; + return *this; + } + + PhysicalDeviceFeatures & setPipelineStatisticsQuery( VULKAN_HPP_NAMESPACE::Bool32 pipelineStatisticsQuery_ ) VULKAN_HPP_NOEXCEPT + { + pipelineStatisticsQuery = pipelineStatisticsQuery_; + return *this; + } + + PhysicalDeviceFeatures & setVertexPipelineStoresAndAtomics( VULKAN_HPP_NAMESPACE::Bool32 vertexPipelineStoresAndAtomics_ ) VULKAN_HPP_NOEXCEPT + { + vertexPipelineStoresAndAtomics = vertexPipelineStoresAndAtomics_; + return *this; + } + + PhysicalDeviceFeatures & setFragmentStoresAndAtomics( VULKAN_HPP_NAMESPACE::Bool32 fragmentStoresAndAtomics_ ) VULKAN_HPP_NOEXCEPT + { + fragmentStoresAndAtomics = fragmentStoresAndAtomics_; + return *this; + } + + PhysicalDeviceFeatures & setShaderTessellationAndGeometryPointSize( VULKAN_HPP_NAMESPACE::Bool32 shaderTessellationAndGeometryPointSize_ ) VULKAN_HPP_NOEXCEPT + { + shaderTessellationAndGeometryPointSize = shaderTessellationAndGeometryPointSize_; + return *this; + } + + PhysicalDeviceFeatures & setShaderImageGatherExtended( VULKAN_HPP_NAMESPACE::Bool32 shaderImageGatherExtended_ ) VULKAN_HPP_NOEXCEPT + { + shaderImageGatherExtended = shaderImageGatherExtended_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageImageExtendedFormats( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageExtendedFormats_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageExtendedFormats = shaderStorageImageExtendedFormats_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageImageMultisample( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageMultisample_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageMultisample = shaderStorageImageMultisample_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageImageReadWithoutFormat( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageReadWithoutFormat_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageReadWithoutFormat = shaderStorageImageReadWithoutFormat_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageImageWriteWithoutFormat( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageWriteWithoutFormat_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageWriteWithoutFormat = shaderStorageImageWriteWithoutFormat_; + return *this; + } + + PhysicalDeviceFeatures & setShaderUniformBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformBufferArrayDynamicIndexing = shaderUniformBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures & setShaderSampledImageArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderSampledImageArrayDynamicIndexing = shaderSampledImageArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageBufferArrayDynamicIndexing = shaderStorageBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures & setShaderStorageImageArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageArrayDynamicIndexing = shaderStorageImageArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceFeatures & setShaderClipDistance( VULKAN_HPP_NAMESPACE::Bool32 shaderClipDistance_ ) VULKAN_HPP_NOEXCEPT + { + shaderClipDistance = shaderClipDistance_; + return *this; + } + + PhysicalDeviceFeatures & setShaderCullDistance( VULKAN_HPP_NAMESPACE::Bool32 shaderCullDistance_ ) VULKAN_HPP_NOEXCEPT + { + shaderCullDistance = shaderCullDistance_; + return *this; + } + + PhysicalDeviceFeatures & setShaderFloat64( VULKAN_HPP_NAMESPACE::Bool32 shaderFloat64_ ) VULKAN_HPP_NOEXCEPT + { + shaderFloat64 = shaderFloat64_; + return *this; + } + + PhysicalDeviceFeatures & setShaderInt64( VULKAN_HPP_NAMESPACE::Bool32 shaderInt64_ ) VULKAN_HPP_NOEXCEPT + { + shaderInt64 = shaderInt64_; + return *this; + } + + PhysicalDeviceFeatures & setShaderInt16( VULKAN_HPP_NAMESPACE::Bool32 shaderInt16_ ) VULKAN_HPP_NOEXCEPT + { + shaderInt16 = shaderInt16_; + return *this; + } + + PhysicalDeviceFeatures & setShaderResourceResidency( VULKAN_HPP_NAMESPACE::Bool32 shaderResourceResidency_ ) VULKAN_HPP_NOEXCEPT + { + shaderResourceResidency = shaderResourceResidency_; + return *this; + } + + PhysicalDeviceFeatures & setShaderResourceMinLod( VULKAN_HPP_NAMESPACE::Bool32 shaderResourceMinLod_ ) VULKAN_HPP_NOEXCEPT + { + shaderResourceMinLod = shaderResourceMinLod_; + return *this; + } + + PhysicalDeviceFeatures & setSparseBinding( VULKAN_HPP_NAMESPACE::Bool32 sparseBinding_ ) VULKAN_HPP_NOEXCEPT + { + sparseBinding = sparseBinding_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidencyBuffer( VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyBuffer_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidencyBuffer = sparseResidencyBuffer_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidencyImage2D( VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage2D_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidencyImage2D = sparseResidencyImage2D_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidencyImage3D( VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage3D_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidencyImage3D = sparseResidencyImage3D_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidency2Samples( VULKAN_HPP_NAMESPACE::Bool32 sparseResidency2Samples_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidency2Samples = sparseResidency2Samples_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidency4Samples( VULKAN_HPP_NAMESPACE::Bool32 sparseResidency4Samples_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidency4Samples = sparseResidency4Samples_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidency8Samples( VULKAN_HPP_NAMESPACE::Bool32 sparseResidency8Samples_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidency8Samples = sparseResidency8Samples_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidency16Samples( VULKAN_HPP_NAMESPACE::Bool32 sparseResidency16Samples_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidency16Samples = sparseResidency16Samples_; + return *this; + } + + PhysicalDeviceFeatures & setSparseResidencyAliased( VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyAliased_ ) VULKAN_HPP_NOEXCEPT + { + sparseResidencyAliased = sparseResidencyAliased_; + return *this; + } + + PhysicalDeviceFeatures & setVariableMultisampleRate( VULKAN_HPP_NAMESPACE::Bool32 variableMultisampleRate_ ) VULKAN_HPP_NOEXCEPT + { + variableMultisampleRate = variableMultisampleRate_; + return *this; + } + + PhysicalDeviceFeatures & setInheritedQueries( VULKAN_HPP_NAMESPACE::Bool32 inheritedQueries_ ) VULKAN_HPP_NOEXCEPT + { + inheritedQueries = inheritedQueries_; + return *this; + } + + + operator VkPhysicalDeviceFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( robustBufferAccess == rhs.robustBufferAccess ) + && ( fullDrawIndexUint32 == rhs.fullDrawIndexUint32 ) + && ( imageCubeArray == rhs.imageCubeArray ) + && ( independentBlend == rhs.independentBlend ) + && ( geometryShader == rhs.geometryShader ) + && ( tessellationShader == rhs.tessellationShader ) + && ( sampleRateShading == rhs.sampleRateShading ) + && ( dualSrcBlend == rhs.dualSrcBlend ) + && ( logicOp == rhs.logicOp ) + && ( multiDrawIndirect == rhs.multiDrawIndirect ) + && ( drawIndirectFirstInstance == rhs.drawIndirectFirstInstance ) + && ( depthClamp == rhs.depthClamp ) + && ( depthBiasClamp == rhs.depthBiasClamp ) + && ( fillModeNonSolid == rhs.fillModeNonSolid ) + && ( depthBounds == rhs.depthBounds ) + && ( wideLines == rhs.wideLines ) + && ( largePoints == rhs.largePoints ) + && ( alphaToOne == rhs.alphaToOne ) + && ( multiViewport == rhs.multiViewport ) + && ( samplerAnisotropy == rhs.samplerAnisotropy ) + && ( textureCompressionETC2 == rhs.textureCompressionETC2 ) + && ( textureCompressionASTC_LDR == rhs.textureCompressionASTC_LDR ) + && ( textureCompressionBC == rhs.textureCompressionBC ) + && ( occlusionQueryPrecise == rhs.occlusionQueryPrecise ) + && ( pipelineStatisticsQuery == rhs.pipelineStatisticsQuery ) + && ( vertexPipelineStoresAndAtomics == rhs.vertexPipelineStoresAndAtomics ) + && ( fragmentStoresAndAtomics == rhs.fragmentStoresAndAtomics ) + && ( shaderTessellationAndGeometryPointSize == rhs.shaderTessellationAndGeometryPointSize ) + && ( shaderImageGatherExtended == rhs.shaderImageGatherExtended ) + && ( shaderStorageImageExtendedFormats == rhs.shaderStorageImageExtendedFormats ) + && ( shaderStorageImageMultisample == rhs.shaderStorageImageMultisample ) + && ( shaderStorageImageReadWithoutFormat == rhs.shaderStorageImageReadWithoutFormat ) + && ( shaderStorageImageWriteWithoutFormat == rhs.shaderStorageImageWriteWithoutFormat ) + && ( shaderUniformBufferArrayDynamicIndexing == rhs.shaderUniformBufferArrayDynamicIndexing ) + && ( shaderSampledImageArrayDynamicIndexing == rhs.shaderSampledImageArrayDynamicIndexing ) + && ( shaderStorageBufferArrayDynamicIndexing == rhs.shaderStorageBufferArrayDynamicIndexing ) + && ( shaderStorageImageArrayDynamicIndexing == rhs.shaderStorageImageArrayDynamicIndexing ) + && ( shaderClipDistance == rhs.shaderClipDistance ) + && ( shaderCullDistance == rhs.shaderCullDistance ) + && ( shaderFloat64 == rhs.shaderFloat64 ) + && ( shaderInt64 == rhs.shaderInt64 ) + && ( shaderInt16 == rhs.shaderInt16 ) + && ( shaderResourceResidency == rhs.shaderResourceResidency ) + && ( shaderResourceMinLod == rhs.shaderResourceMinLod ) + && ( sparseBinding == rhs.sparseBinding ) + && ( sparseResidencyBuffer == rhs.sparseResidencyBuffer ) + && ( sparseResidencyImage2D == rhs.sparseResidencyImage2D ) + && ( sparseResidencyImage3D == rhs.sparseResidencyImage3D ) + && ( sparseResidency2Samples == rhs.sparseResidency2Samples ) + && ( sparseResidency4Samples == rhs.sparseResidency4Samples ) + && ( sparseResidency8Samples == rhs.sparseResidency8Samples ) + && ( sparseResidency16Samples == rhs.sparseResidency16Samples ) + && ( sparseResidencyAliased == rhs.sparseResidencyAliased ) + && ( variableMultisampleRate == rhs.variableMultisampleRate ) + && ( inheritedQueries == rhs.inheritedQueries ); + } + + bool operator!=( PhysicalDeviceFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 fullDrawIndexUint32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageCubeArray = {}; + VULKAN_HPP_NAMESPACE::Bool32 independentBlend = {}; + VULKAN_HPP_NAMESPACE::Bool32 geometryShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 tessellationShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 sampleRateShading = {}; + VULKAN_HPP_NAMESPACE::Bool32 dualSrcBlend = {}; + VULKAN_HPP_NAMESPACE::Bool32 logicOp = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiDrawIndirect = {}; + VULKAN_HPP_NAMESPACE::Bool32 drawIndirectFirstInstance = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthClamp = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthBiasClamp = {}; + VULKAN_HPP_NAMESPACE::Bool32 fillModeNonSolid = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthBounds = {}; + VULKAN_HPP_NAMESPACE::Bool32 wideLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 largePoints = {}; + VULKAN_HPP_NAMESPACE::Bool32 alphaToOne = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiViewport = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerAnisotropy = {}; + VULKAN_HPP_NAMESPACE::Bool32 textureCompressionETC2 = {}; + VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_LDR = {}; + VULKAN_HPP_NAMESPACE::Bool32 textureCompressionBC = {}; + VULKAN_HPP_NAMESPACE::Bool32 occlusionQueryPrecise = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineStatisticsQuery = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexPipelineStoresAndAtomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentStoresAndAtomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderTessellationAndGeometryPointSize = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderImageGatherExtended = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageExtendedFormats = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageMultisample = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageReadWithoutFormat = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageWriteWithoutFormat = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderClipDistance = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderCullDistance = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInt64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInt16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderResourceResidency = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderResourceMinLod = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseBinding = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyBuffer = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage2D = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyImage3D = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidency2Samples = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidency4Samples = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidency8Samples = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidency16Samples = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseResidencyAliased = {}; + VULKAN_HPP_NAMESPACE::Bool32 variableMultisampleRate = {}; + VULKAN_HPP_NAMESPACE::Bool32 inheritedQueries = {}; + + }; + static_assert( sizeof( PhysicalDeviceFeatures ) == sizeof( VkPhysicalDeviceFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DeviceCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceCreateInfo(VULKAN_HPP_NAMESPACE::DeviceCreateFlags flags_ = {}, uint32_t queueCreateInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::DeviceQueueCreateInfo* pQueueCreateInfos_ = {}, uint32_t enabledLayerCount_ = {}, const char* const * ppEnabledLayerNames_ = {}, uint32_t enabledExtensionCount_ = {}, const char* const * ppEnabledExtensionNames_ = {}, const VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pEnabledFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), queueCreateInfoCount( queueCreateInfoCount_ ), pQueueCreateInfos( pQueueCreateInfos_ ), enabledLayerCount( enabledLayerCount_ ), ppEnabledLayerNames( ppEnabledLayerNames_ ), enabledExtensionCount( enabledExtensionCount_ ), ppEnabledExtensionNames( ppEnabledExtensionNames_ ), pEnabledFeatures( pEnabledFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceCreateInfo( DeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceCreateInfo( VkDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceCreateInfo( VULKAN_HPP_NAMESPACE::DeviceCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueCreateInfos_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledLayerNames_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledExtensionNames_ = {}, const VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pEnabledFeatures_ = {} ) + : flags( flags_ ), queueCreateInfoCount( static_cast( queueCreateInfos_.size() ) ), pQueueCreateInfos( queueCreateInfos_.data() ), enabledLayerCount( static_cast( pEnabledLayerNames_.size() ) ), ppEnabledLayerNames( pEnabledLayerNames_.data() ), enabledExtensionCount( static_cast( pEnabledExtensionNames_.size() ) ), ppEnabledExtensionNames( pEnabledExtensionNames_.data() ), pEnabledFeatures( pEnabledFeatures_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceCreateInfo & operator=( VkDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceCreateInfo & operator=( DeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceCreateInfo ) ); + return *this; + } + + DeviceCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::DeviceCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DeviceCreateInfo & setQueueCreateInfoCount( uint32_t queueCreateInfoCount_ ) VULKAN_HPP_NOEXCEPT + { + queueCreateInfoCount = queueCreateInfoCount_; + return *this; + } + + DeviceCreateInfo & setPQueueCreateInfos( const VULKAN_HPP_NAMESPACE::DeviceQueueCreateInfo* pQueueCreateInfos_ ) VULKAN_HPP_NOEXCEPT + { + pQueueCreateInfos = pQueueCreateInfos_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceCreateInfo & setQueueCreateInfos( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueCreateInfos_ ) VULKAN_HPP_NOEXCEPT + { + queueCreateInfoCount = static_cast( queueCreateInfos_.size() ); + pQueueCreateInfos = queueCreateInfos_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceCreateInfo & setEnabledLayerCount( uint32_t enabledLayerCount_ ) VULKAN_HPP_NOEXCEPT + { + enabledLayerCount = enabledLayerCount_; + return *this; + } + + DeviceCreateInfo & setPpEnabledLayerNames( const char* const * ppEnabledLayerNames_ ) VULKAN_HPP_NOEXCEPT + { + ppEnabledLayerNames = ppEnabledLayerNames_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceCreateInfo & setPEnabledLayerNames( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledLayerNames_ ) VULKAN_HPP_NOEXCEPT + { + enabledLayerCount = static_cast( pEnabledLayerNames_.size() ); + ppEnabledLayerNames = pEnabledLayerNames_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceCreateInfo & setEnabledExtensionCount( uint32_t enabledExtensionCount_ ) VULKAN_HPP_NOEXCEPT + { + enabledExtensionCount = enabledExtensionCount_; + return *this; + } + + DeviceCreateInfo & setPpEnabledExtensionNames( const char* const * ppEnabledExtensionNames_ ) VULKAN_HPP_NOEXCEPT + { + ppEnabledExtensionNames = ppEnabledExtensionNames_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceCreateInfo & setPEnabledExtensionNames( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledExtensionNames_ ) VULKAN_HPP_NOEXCEPT + { + enabledExtensionCount = static_cast( pEnabledExtensionNames_.size() ); + ppEnabledExtensionNames = pEnabledExtensionNames_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceCreateInfo & setPEnabledFeatures( const VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pEnabledFeatures_ ) VULKAN_HPP_NOEXCEPT + { + pEnabledFeatures = pEnabledFeatures_; + return *this; + } + + + operator VkDeviceCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceCreateInfo const& ) const = default; +#else + bool operator==( DeviceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueCreateInfoCount == rhs.queueCreateInfoCount ) + && ( pQueueCreateInfos == rhs.pQueueCreateInfos ) + && ( enabledLayerCount == rhs.enabledLayerCount ) + && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames ) + && ( enabledExtensionCount == rhs.enabledExtensionCount ) + && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames ) + && ( pEnabledFeatures == rhs.pEnabledFeatures ); + } + + bool operator!=( DeviceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceCreateFlags flags = {}; + uint32_t queueCreateInfoCount = {}; + const VULKAN_HPP_NAMESPACE::DeviceQueueCreateInfo* pQueueCreateInfos = {}; + uint32_t enabledLayerCount = {}; + const char* const * ppEnabledLayerNames = {}; + uint32_t enabledExtensionCount = {}; + const char* const * ppEnabledExtensionNames = {}; + const VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pEnabledFeatures = {}; + + }; + static_assert( sizeof( DeviceCreateInfo ) == sizeof( VkDeviceCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceCreateInfo; + }; + + struct DeviceDeviceMemoryReportCreateInfoEXT + { + static const bool allowDuplicate = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceDeviceMemoryReportCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceDeviceMemoryReportCreateInfoEXT(VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ = {}, PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback_ = {}, void* pUserData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pfnUserCallback( pfnUserCallback_ ), pUserData( pUserData_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceDeviceMemoryReportCreateInfoEXT( DeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceDeviceMemoryReportCreateInfoEXT( VkDeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceDeviceMemoryReportCreateInfoEXT & operator=( VkDeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & operator=( DeviceDeviceMemoryReportCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceDeviceMemoryReportCreateInfoEXT ) ); + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPfnUserCallback( PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback_ ) VULKAN_HPP_NOEXCEPT + { + pfnUserCallback = pfnUserCallback_; + return *this; + } + + DeviceDeviceMemoryReportCreateInfoEXT & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + + operator VkDeviceDeviceMemoryReportCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceDeviceMemoryReportCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceDeviceMemoryReportCreateInfoEXT const& ) const = default; +#else + bool operator==( DeviceDeviceMemoryReportCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pfnUserCallback == rhs.pfnUserCallback ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( DeviceDeviceMemoryReportCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceDeviceMemoryReportCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags = {}; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback = {}; + void* pUserData = {}; + + }; + static_assert( sizeof( DeviceDeviceMemoryReportCreateInfoEXT ) == sizeof( VkDeviceDeviceMemoryReportCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceDeviceMemoryReportCreateInfoEXT; + }; + + struct DeviceDiagnosticsConfigCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceDiagnosticsConfigCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigCreateInfoNV(VULKAN_HPP_NAMESPACE::DeviceDiagnosticsConfigFlagsNV flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceDiagnosticsConfigCreateInfoNV( DeviceDiagnosticsConfigCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceDiagnosticsConfigCreateInfoNV( VkDeviceDiagnosticsConfigCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceDiagnosticsConfigCreateInfoNV & operator=( VkDeviceDiagnosticsConfigCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceDiagnosticsConfigCreateInfoNV & operator=( DeviceDiagnosticsConfigCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceDiagnosticsConfigCreateInfoNV ) ); + return *this; + } + + DeviceDiagnosticsConfigCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceDiagnosticsConfigCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::DeviceDiagnosticsConfigFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkDeviceDiagnosticsConfigCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceDiagnosticsConfigCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceDiagnosticsConfigCreateInfoNV const& ) const = default; +#else + bool operator==( DeviceDiagnosticsConfigCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( DeviceDiagnosticsConfigCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceDiagnosticsConfigCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceDiagnosticsConfigFlagsNV flags = {}; + + }; + static_assert( sizeof( DeviceDiagnosticsConfigCreateInfoNV ) == sizeof( VkDeviceDiagnosticsConfigCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceDiagnosticsConfigCreateInfoNV; + }; + + struct DeviceEventInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceEventInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceEventInfoEXT(VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT deviceEvent_ = VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT::eDisplayHotplug) VULKAN_HPP_NOEXCEPT + : deviceEvent( deviceEvent_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceEventInfoEXT( DeviceEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceEventInfoEXT( VkDeviceEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceEventInfoEXT & operator=( VkDeviceEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceEventInfoEXT & operator=( DeviceEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceEventInfoEXT ) ); + return *this; + } + + DeviceEventInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceEventInfoEXT & setDeviceEvent( VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT deviceEvent_ ) VULKAN_HPP_NOEXCEPT + { + deviceEvent = deviceEvent_; + return *this; + } + + + operator VkDeviceEventInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceEventInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceEventInfoEXT const& ) const = default; +#else + bool operator==( DeviceEventInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceEvent == rhs.deviceEvent ); + } + + bool operator!=( DeviceEventInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceEventInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT deviceEvent = VULKAN_HPP_NAMESPACE::DeviceEventTypeEXT::eDisplayHotplug; + + }; + static_assert( sizeof( DeviceEventInfoEXT ) == sizeof( VkDeviceEventInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceEventInfoEXT; + }; + + struct DeviceGroupBindSparseInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupBindSparseInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupBindSparseInfo(uint32_t resourceDeviceIndex_ = {}, uint32_t memoryDeviceIndex_ = {}) VULKAN_HPP_NOEXCEPT + : resourceDeviceIndex( resourceDeviceIndex_ ), memoryDeviceIndex( memoryDeviceIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupBindSparseInfo( DeviceGroupBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupBindSparseInfo( VkDeviceGroupBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupBindSparseInfo & operator=( VkDeviceGroupBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupBindSparseInfo & operator=( DeviceGroupBindSparseInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupBindSparseInfo ) ); + return *this; + } + + DeviceGroupBindSparseInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupBindSparseInfo & setResourceDeviceIndex( uint32_t resourceDeviceIndex_ ) VULKAN_HPP_NOEXCEPT + { + resourceDeviceIndex = resourceDeviceIndex_; + return *this; + } + + DeviceGroupBindSparseInfo & setMemoryDeviceIndex( uint32_t memoryDeviceIndex_ ) VULKAN_HPP_NOEXCEPT + { + memoryDeviceIndex = memoryDeviceIndex_; + return *this; + } + + + operator VkDeviceGroupBindSparseInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupBindSparseInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupBindSparseInfo const& ) const = default; +#else + bool operator==( DeviceGroupBindSparseInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( resourceDeviceIndex == rhs.resourceDeviceIndex ) + && ( memoryDeviceIndex == rhs.memoryDeviceIndex ); + } + + bool operator!=( DeviceGroupBindSparseInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupBindSparseInfo; + const void* pNext = {}; + uint32_t resourceDeviceIndex = {}; + uint32_t memoryDeviceIndex = {}; + + }; + static_assert( sizeof( DeviceGroupBindSparseInfo ) == sizeof( VkDeviceGroupBindSparseInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupBindSparseInfo; + }; + using DeviceGroupBindSparseInfoKHR = DeviceGroupBindSparseInfo; + + struct DeviceGroupCommandBufferBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupCommandBufferBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupCommandBufferBeginInfo(uint32_t deviceMask_ = {}) VULKAN_HPP_NOEXCEPT + : deviceMask( deviceMask_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupCommandBufferBeginInfo( DeviceGroupCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupCommandBufferBeginInfo( VkDeviceGroupCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupCommandBufferBeginInfo & operator=( VkDeviceGroupCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupCommandBufferBeginInfo & operator=( DeviceGroupCommandBufferBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupCommandBufferBeginInfo ) ); + return *this; + } + + DeviceGroupCommandBufferBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupCommandBufferBeginInfo & setDeviceMask( uint32_t deviceMask_ ) VULKAN_HPP_NOEXCEPT + { + deviceMask = deviceMask_; + return *this; + } + + + operator VkDeviceGroupCommandBufferBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupCommandBufferBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupCommandBufferBeginInfo const& ) const = default; +#else + bool operator==( DeviceGroupCommandBufferBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( DeviceGroupCommandBufferBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupCommandBufferBeginInfo; + const void* pNext = {}; + uint32_t deviceMask = {}; + + }; + static_assert( sizeof( DeviceGroupCommandBufferBeginInfo ) == sizeof( VkDeviceGroupCommandBufferBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupCommandBufferBeginInfo; + }; + using DeviceGroupCommandBufferBeginInfoKHR = DeviceGroupCommandBufferBeginInfo; + + class DisplayKHR + { + public: + using CType = VkDisplayKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDisplayKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayKHR; + + public: + VULKAN_HPP_CONSTEXPR DisplayKHR() VULKAN_HPP_NOEXCEPT + : m_displayKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DisplayKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_displayKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DisplayKHR( VkDisplayKHR displayKHR ) VULKAN_HPP_NOEXCEPT + : m_displayKHR( displayKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DisplayKHR & operator=(VkDisplayKHR displayKHR) VULKAN_HPP_NOEXCEPT + { + m_displayKHR = displayKHR; + return *this; + } +#endif + + DisplayKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_displayKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayKHR const& ) const = default; +#else + bool operator==( DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR == rhs.m_displayKHR; + } + + bool operator!=(DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR != rhs.m_displayKHR; + } + + bool operator<(DisplayKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR < rhs.m_displayKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayKHR() const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_displayKHR == VK_NULL_HANDLE; + } + + private: + VkDisplayKHR m_displayKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DisplayKHR ) == sizeof( VkDisplayKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DisplayKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DisplayKHR; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DisplayKHR; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct PerformanceConfigurationAcquireInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceConfigurationAcquireInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PerformanceConfigurationAcquireInfoINTEL(VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL type_ = VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL::eCommandQueueMetricsDiscoveryActivated) VULKAN_HPP_NOEXCEPT + : type( type_ ) + {} + + VULKAN_HPP_CONSTEXPR PerformanceConfigurationAcquireInfoINTEL( PerformanceConfigurationAcquireInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceConfigurationAcquireInfoINTEL( VkPerformanceConfigurationAcquireInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceConfigurationAcquireInfoINTEL & operator=( VkPerformanceConfigurationAcquireInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceConfigurationAcquireInfoINTEL & operator=( PerformanceConfigurationAcquireInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceConfigurationAcquireInfoINTEL ) ); + return *this; + } + + PerformanceConfigurationAcquireInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PerformanceConfigurationAcquireInfoINTEL & setType( VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + + operator VkPerformanceConfigurationAcquireInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceConfigurationAcquireInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceConfigurationAcquireInfoINTEL const& ) const = default; +#else + bool operator==( PerformanceConfigurationAcquireInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ); + } + + bool operator!=( PerformanceConfigurationAcquireInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceConfigurationAcquireInfoINTEL; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL type = VULKAN_HPP_NAMESPACE::PerformanceConfigurationTypeINTEL::eCommandQueueMetricsDiscoveryActivated; + + }; + static_assert( sizeof( PerformanceConfigurationAcquireInfoINTEL ) == sizeof( VkPerformanceConfigurationAcquireInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceConfigurationAcquireInfoINTEL; + }; + + class PerformanceConfigurationINTEL + { + public: + using CType = VkPerformanceConfigurationINTEL; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePerformanceConfigurationINTEL; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR PerformanceConfigurationINTEL() VULKAN_HPP_NOEXCEPT + : m_performanceConfigurationINTEL(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PerformanceConfigurationINTEL( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_performanceConfigurationINTEL(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PerformanceConfigurationINTEL( VkPerformanceConfigurationINTEL performanceConfigurationINTEL ) VULKAN_HPP_NOEXCEPT + : m_performanceConfigurationINTEL( performanceConfigurationINTEL ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PerformanceConfigurationINTEL & operator=(VkPerformanceConfigurationINTEL performanceConfigurationINTEL) VULKAN_HPP_NOEXCEPT + { + m_performanceConfigurationINTEL = performanceConfigurationINTEL; + return *this; + } +#endif + + PerformanceConfigurationINTEL & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_performanceConfigurationINTEL = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceConfigurationINTEL const& ) const = default; +#else + bool operator==( PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL == rhs.m_performanceConfigurationINTEL; + } + + bool operator!=(PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL != rhs.m_performanceConfigurationINTEL; + } + + bool operator<(PerformanceConfigurationINTEL const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL < rhs.m_performanceConfigurationINTEL; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPerformanceConfigurationINTEL() const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_performanceConfigurationINTEL == VK_NULL_HANDLE; + } + + private: + VkPerformanceConfigurationINTEL m_performanceConfigurationINTEL; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL ) == sizeof( VkPerformanceConfigurationINTEL ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL; + }; + + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class QueryPool + { + public: + using CType = VkQueryPool; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eQueryPool; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueryPool; + + public: + VULKAN_HPP_CONSTEXPR QueryPool() VULKAN_HPP_NOEXCEPT + : m_queryPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR QueryPool( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_queryPool(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT QueryPool( VkQueryPool queryPool ) VULKAN_HPP_NOEXCEPT + : m_queryPool( queryPool ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + QueryPool & operator=(VkQueryPool queryPool) VULKAN_HPP_NOEXCEPT + { + m_queryPool = queryPool; + return *this; + } +#endif + + QueryPool & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_queryPool = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueryPool const& ) const = default; +#else + bool operator==( QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queryPool == rhs.m_queryPool; + } + + bool operator!=(QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queryPool != rhs.m_queryPool; + } + + bool operator<(QueryPool const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queryPool < rhs.m_queryPool; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueryPool() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_queryPool == VK_NULL_HANDLE; + } + + private: + VkQueryPool m_queryPool; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::QueryPool ) == sizeof( VkQueryPool ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::QueryPool; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::QueryPool; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::QueryPool; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct RenderPassBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 RenderPassBeginInfo(VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, VULKAN_HPP_NAMESPACE::Framebuffer framebuffer_ = {}, VULKAN_HPP_NAMESPACE::Rect2D renderArea_ = {}, uint32_t clearValueCount_ = {}, const VULKAN_HPP_NAMESPACE::ClearValue* pClearValues_ = {}) VULKAN_HPP_NOEXCEPT + : renderPass( renderPass_ ), framebuffer( framebuffer_ ), renderArea( renderArea_ ), clearValueCount( clearValueCount_ ), pClearValues( pClearValues_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 RenderPassBeginInfo( RenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassBeginInfo( VkRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassBeginInfo( VULKAN_HPP_NAMESPACE::RenderPass renderPass_, VULKAN_HPP_NAMESPACE::Framebuffer framebuffer_, VULKAN_HPP_NAMESPACE::Rect2D renderArea_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & clearValues_ ) + : renderPass( renderPass_ ), framebuffer( framebuffer_ ), renderArea( renderArea_ ), clearValueCount( static_cast( clearValues_.size() ) ), pClearValues( clearValues_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassBeginInfo & operator=( VkRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassBeginInfo & operator=( RenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassBeginInfo ) ); + return *this; + } + + RenderPassBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassBeginInfo & setRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass_ ) VULKAN_HPP_NOEXCEPT + { + renderPass = renderPass_; + return *this; + } + + RenderPassBeginInfo & setFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer_ ) VULKAN_HPP_NOEXCEPT + { + framebuffer = framebuffer_; + return *this; + } + + RenderPassBeginInfo & setRenderArea( VULKAN_HPP_NAMESPACE::Rect2D const & renderArea_ ) VULKAN_HPP_NOEXCEPT + { + renderArea = renderArea_; + return *this; + } + + RenderPassBeginInfo & setClearValueCount( uint32_t clearValueCount_ ) VULKAN_HPP_NOEXCEPT + { + clearValueCount = clearValueCount_; + return *this; + } + + RenderPassBeginInfo & setPClearValues( const VULKAN_HPP_NAMESPACE::ClearValue* pClearValues_ ) VULKAN_HPP_NOEXCEPT + { + pClearValues = pClearValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassBeginInfo & setClearValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & clearValues_ ) VULKAN_HPP_NOEXCEPT + { + clearValueCount = static_cast( clearValues_.size() ); + pClearValues = clearValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassBeginInfo const& ) const = default; +#else + bool operator==( RenderPassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( renderPass == rhs.renderPass ) + && ( framebuffer == rhs.framebuffer ) + && ( renderArea == rhs.renderArea ) + && ( clearValueCount == rhs.clearValueCount ) + && ( pClearValues == rhs.pClearValues ); + } + + bool operator!=( RenderPassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassBeginInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RenderPass renderPass = {}; + VULKAN_HPP_NAMESPACE::Framebuffer framebuffer = {}; + VULKAN_HPP_NAMESPACE::Rect2D renderArea = {}; + uint32_t clearValueCount = {}; + const VULKAN_HPP_NAMESPACE::ClearValue* pClearValues = {}; + + }; + static_assert( sizeof( RenderPassBeginInfo ) == sizeof( VkRenderPassBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassBeginInfo; + }; + + struct SubpassBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassBeginInfo(VULKAN_HPP_NAMESPACE::SubpassContents contents_ = VULKAN_HPP_NAMESPACE::SubpassContents::eInline) VULKAN_HPP_NOEXCEPT + : contents( contents_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassBeginInfo( SubpassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassBeginInfo( VkSubpassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassBeginInfo & operator=( VkSubpassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassBeginInfo & operator=( SubpassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassBeginInfo ) ); + return *this; + } + + SubpassBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SubpassBeginInfo & setContents( VULKAN_HPP_NAMESPACE::SubpassContents contents_ ) VULKAN_HPP_NOEXCEPT + { + contents = contents_; + return *this; + } + + + operator VkSubpassBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassBeginInfo const& ) const = default; +#else + bool operator==( SubpassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( contents == rhs.contents ); + } + + bool operator!=( SubpassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubpassBeginInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SubpassContents contents = VULKAN_HPP_NAMESPACE::SubpassContents::eInline; + + }; + static_assert( sizeof( SubpassBeginInfo ) == sizeof( VkSubpassBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubpassBeginInfo; + }; + using SubpassBeginInfoKHR = SubpassBeginInfo; + + struct ImageBlit + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 ImageBlit(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, std::array const& srcOffsets_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, std::array const& dstOffsets_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffsets( srcOffsets_ ), dstSubresource( dstSubresource_ ), dstOffsets( dstOffsets_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 ImageBlit( ImageBlit const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageBlit( VkImageBlit const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageBlit & operator=( VkImageBlit const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageBlit & operator=( ImageBlit const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageBlit ) ); + return *this; + } + + ImageBlit & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageBlit & setSrcOffsets( std::array const & srcOffsets_ ) VULKAN_HPP_NOEXCEPT + { + srcOffsets = srcOffsets_; + return *this; + } + + ImageBlit & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageBlit & setDstOffsets( std::array const & dstOffsets_ ) VULKAN_HPP_NOEXCEPT + { + dstOffsets = dstOffsets_; + return *this; + } + + + operator VkImageBlit const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageBlit &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageBlit const& ) const = default; +#else + bool operator==( ImageBlit const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( srcSubresource == rhs.srcSubresource ) + && ( srcOffsets == rhs.srcOffsets ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffsets == rhs.dstOffsets ); + } + + bool operator!=( ImageBlit const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D srcOffsets = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D dstOffsets = {}; + + }; + static_assert( sizeof( ImageBlit ) == sizeof( VkImageBlit ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageSubresourceRange + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageSubresourceRange(VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, uint32_t baseMipLevel_ = {}, uint32_t levelCount_ = {}, uint32_t baseArrayLayer_ = {}, uint32_t layerCount_ = {}) VULKAN_HPP_NOEXCEPT + : aspectMask( aspectMask_ ), baseMipLevel( baseMipLevel_ ), levelCount( levelCount_ ), baseArrayLayer( baseArrayLayer_ ), layerCount( layerCount_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageSubresourceRange( ImageSubresourceRange const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageSubresourceRange( VkImageSubresourceRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageSubresourceRange & operator=( VkImageSubresourceRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageSubresourceRange & operator=( ImageSubresourceRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageSubresourceRange ) ); + return *this; + } + + ImageSubresourceRange & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + ImageSubresourceRange & setBaseMipLevel( uint32_t baseMipLevel_ ) VULKAN_HPP_NOEXCEPT + { + baseMipLevel = baseMipLevel_; + return *this; + } + + ImageSubresourceRange & setLevelCount( uint32_t levelCount_ ) VULKAN_HPP_NOEXCEPT + { + levelCount = levelCount_; + return *this; + } + + ImageSubresourceRange & setBaseArrayLayer( uint32_t baseArrayLayer_ ) VULKAN_HPP_NOEXCEPT + { + baseArrayLayer = baseArrayLayer_; + return *this; + } + + ImageSubresourceRange & setLayerCount( uint32_t layerCount_ ) VULKAN_HPP_NOEXCEPT + { + layerCount = layerCount_; + return *this; + } + + + operator VkImageSubresourceRange const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageSubresourceRange &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageSubresourceRange const& ) const = default; +#else + bool operator==( ImageSubresourceRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( aspectMask == rhs.aspectMask ) + && ( baseMipLevel == rhs.baseMipLevel ) + && ( levelCount == rhs.levelCount ) + && ( baseArrayLayer == rhs.baseArrayLayer ) + && ( layerCount == rhs.layerCount ); + } + + bool operator!=( ImageSubresourceRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + uint32_t baseMipLevel = {}; + uint32_t levelCount = {}; + uint32_t baseArrayLayer = {}; + uint32_t layerCount = {}; + + }; + static_assert( sizeof( ImageSubresourceRange ) == sizeof( VkImageSubresourceRange ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageCopy + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageCopy(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D srcOffset_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffset( srcOffset_ ), dstSubresource( dstSubresource_ ), dstOffset( dstOffset_ ), extent( extent_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageCopy( ImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageCopy( VkImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageCopy & operator=( VkImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageCopy & operator=( ImageCopy const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageCopy ) ); + return *this; + } + + ImageCopy & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageCopy & setSrcOffset( VULKAN_HPP_NAMESPACE::Offset3D const & srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + ImageCopy & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageCopy & setDstOffset( VULKAN_HPP_NAMESPACE::Offset3D const & dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + ImageCopy & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + + operator VkImageCopy const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageCopy &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageCopy const& ) const = default; +#else + bool operator==( ImageCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageCopy const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D srcOffset = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D dstOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + + }; + static_assert( sizeof( ImageCopy ) == sizeof( VkImageCopy ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SubpassEndInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassEndInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassEndInfo() VULKAN_HPP_NOEXCEPT + + {} + + VULKAN_HPP_CONSTEXPR SubpassEndInfo( SubpassEndInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassEndInfo( VkSubpassEndInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassEndInfo & operator=( VkSubpassEndInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassEndInfo & operator=( SubpassEndInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassEndInfo ) ); + return *this; + } + + SubpassEndInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + + operator VkSubpassEndInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassEndInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassEndInfo const& ) const = default; +#else + bool operator==( SubpassEndInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ); + } + + bool operator!=( SubpassEndInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubpassEndInfo; + const void* pNext = {}; + + }; + static_assert( sizeof( SubpassEndInfo ) == sizeof( VkSubpassEndInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubpassEndInfo; + }; + using SubpassEndInfoKHR = SubpassEndInfo; + + class IndirectCommandsLayoutNV + { + public: + using CType = VkIndirectCommandsLayoutNV; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eIndirectCommandsLayoutNV; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNV() VULKAN_HPP_NOEXCEPT + : m_indirectCommandsLayoutNV(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutNV( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_indirectCommandsLayoutNV(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT IndirectCommandsLayoutNV( VkIndirectCommandsLayoutNV indirectCommandsLayoutNV ) VULKAN_HPP_NOEXCEPT + : m_indirectCommandsLayoutNV( indirectCommandsLayoutNV ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + IndirectCommandsLayoutNV & operator=(VkIndirectCommandsLayoutNV indirectCommandsLayoutNV) VULKAN_HPP_NOEXCEPT + { + m_indirectCommandsLayoutNV = indirectCommandsLayoutNV; + return *this; + } +#endif + + IndirectCommandsLayoutNV & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_indirectCommandsLayoutNV = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( IndirectCommandsLayoutNV const& ) const = default; +#else + bool operator==( IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV == rhs.m_indirectCommandsLayoutNV; + } + + bool operator!=(IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV != rhs.m_indirectCommandsLayoutNV; + } + + bool operator<(IndirectCommandsLayoutNV const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV < rhs.m_indirectCommandsLayoutNV; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkIndirectCommandsLayoutNV() const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_indirectCommandsLayoutNV == VK_NULL_HANDLE; + } + + private: + VkIndirectCommandsLayoutNV m_indirectCommandsLayoutNV; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV ) == sizeof( VkIndirectCommandsLayoutNV ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV; + }; + + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct IndirectCommandsStreamNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR IndirectCommandsStreamNV(VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ), offset( offset_ ) + {} + + VULKAN_HPP_CONSTEXPR IndirectCommandsStreamNV( IndirectCommandsStreamNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + IndirectCommandsStreamNV( VkIndirectCommandsStreamNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + IndirectCommandsStreamNV & operator=( VkIndirectCommandsStreamNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + IndirectCommandsStreamNV & operator=( IndirectCommandsStreamNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( IndirectCommandsStreamNV ) ); + return *this; + } + + IndirectCommandsStreamNV & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + IndirectCommandsStreamNV & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + + operator VkIndirectCommandsStreamNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkIndirectCommandsStreamNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( IndirectCommandsStreamNV const& ) const = default; +#else + bool operator==( IndirectCommandsStreamNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( buffer == rhs.buffer ) + && ( offset == rhs.offset ); + } + + bool operator!=( IndirectCommandsStreamNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + + }; + static_assert( sizeof( IndirectCommandsStreamNV ) == sizeof( VkIndirectCommandsStreamNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct GeneratedCommandsInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGeneratedCommandsInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeneratedCommandsInfoNV(VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_ = {}, uint32_t streamCount_ = {}, const VULKAN_HPP_NAMESPACE::IndirectCommandsStreamNV* pStreams_ = {}, uint32_t sequencesCount_ = {}, VULKAN_HPP_NAMESPACE::Buffer preprocessBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize preprocessOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize preprocessSize_ = {}, VULKAN_HPP_NAMESPACE::Buffer sequencesCountBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sequencesCountOffset_ = {}, VULKAN_HPP_NAMESPACE::Buffer sequencesIndexBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sequencesIndexOffset_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineBindPoint( pipelineBindPoint_ ), pipeline( pipeline_ ), indirectCommandsLayout( indirectCommandsLayout_ ), streamCount( streamCount_ ), pStreams( pStreams_ ), sequencesCount( sequencesCount_ ), preprocessBuffer( preprocessBuffer_ ), preprocessOffset( preprocessOffset_ ), preprocessSize( preprocessSize_ ), sequencesCountBuffer( sequencesCountBuffer_ ), sequencesCountOffset( sequencesCountOffset_ ), sequencesIndexBuffer( sequencesIndexBuffer_ ), sequencesIndexOffset( sequencesIndexOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR GeneratedCommandsInfoNV( GeneratedCommandsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeneratedCommandsInfoNV( VkGeneratedCommandsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GeneratedCommandsInfoNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_, VULKAN_HPP_NAMESPACE::Pipeline pipeline_, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & streams_, uint32_t sequencesCount_ = {}, VULKAN_HPP_NAMESPACE::Buffer preprocessBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize preprocessOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize preprocessSize_ = {}, VULKAN_HPP_NAMESPACE::Buffer sequencesCountBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sequencesCountOffset_ = {}, VULKAN_HPP_NAMESPACE::Buffer sequencesIndexBuffer_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sequencesIndexOffset_ = {} ) + : pipelineBindPoint( pipelineBindPoint_ ), pipeline( pipeline_ ), indirectCommandsLayout( indirectCommandsLayout_ ), streamCount( static_cast( streams_.size() ) ), pStreams( streams_.data() ), sequencesCount( sequencesCount_ ), preprocessBuffer( preprocessBuffer_ ), preprocessOffset( preprocessOffset_ ), preprocessSize( preprocessSize_ ), sequencesCountBuffer( sequencesCountBuffer_ ), sequencesCountOffset( sequencesCountOffset_ ), sequencesIndexBuffer( sequencesIndexBuffer_ ), sequencesIndexOffset( sequencesIndexOffset_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeneratedCommandsInfoNV & operator=( VkGeneratedCommandsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeneratedCommandsInfoNV & operator=( GeneratedCommandsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeneratedCommandsInfoNV ) ); + return *this; + } + + GeneratedCommandsInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GeneratedCommandsInfoNV & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + GeneratedCommandsInfoNV & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } + + GeneratedCommandsInfoNV & setIndirectCommandsLayout( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_ ) VULKAN_HPP_NOEXCEPT + { + indirectCommandsLayout = indirectCommandsLayout_; + return *this; + } + + GeneratedCommandsInfoNV & setStreamCount( uint32_t streamCount_ ) VULKAN_HPP_NOEXCEPT + { + streamCount = streamCount_; + return *this; + } + + GeneratedCommandsInfoNV & setPStreams( const VULKAN_HPP_NAMESPACE::IndirectCommandsStreamNV* pStreams_ ) VULKAN_HPP_NOEXCEPT + { + pStreams = pStreams_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GeneratedCommandsInfoNV & setStreams( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & streams_ ) VULKAN_HPP_NOEXCEPT + { + streamCount = static_cast( streams_.size() ); + pStreams = streams_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + GeneratedCommandsInfoNV & setSequencesCount( uint32_t sequencesCount_ ) VULKAN_HPP_NOEXCEPT + { + sequencesCount = sequencesCount_; + return *this; + } + + GeneratedCommandsInfoNV & setPreprocessBuffer( VULKAN_HPP_NAMESPACE::Buffer preprocessBuffer_ ) VULKAN_HPP_NOEXCEPT + { + preprocessBuffer = preprocessBuffer_; + return *this; + } + + GeneratedCommandsInfoNV & setPreprocessOffset( VULKAN_HPP_NAMESPACE::DeviceSize preprocessOffset_ ) VULKAN_HPP_NOEXCEPT + { + preprocessOffset = preprocessOffset_; + return *this; + } + + GeneratedCommandsInfoNV & setPreprocessSize( VULKAN_HPP_NAMESPACE::DeviceSize preprocessSize_ ) VULKAN_HPP_NOEXCEPT + { + preprocessSize = preprocessSize_; + return *this; + } + + GeneratedCommandsInfoNV & setSequencesCountBuffer( VULKAN_HPP_NAMESPACE::Buffer sequencesCountBuffer_ ) VULKAN_HPP_NOEXCEPT + { + sequencesCountBuffer = sequencesCountBuffer_; + return *this; + } + + GeneratedCommandsInfoNV & setSequencesCountOffset( VULKAN_HPP_NAMESPACE::DeviceSize sequencesCountOffset_ ) VULKAN_HPP_NOEXCEPT + { + sequencesCountOffset = sequencesCountOffset_; + return *this; + } + + GeneratedCommandsInfoNV & setSequencesIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer sequencesIndexBuffer_ ) VULKAN_HPP_NOEXCEPT + { + sequencesIndexBuffer = sequencesIndexBuffer_; + return *this; + } + + GeneratedCommandsInfoNV & setSequencesIndexOffset( VULKAN_HPP_NAMESPACE::DeviceSize sequencesIndexOffset_ ) VULKAN_HPP_NOEXCEPT + { + sequencesIndexOffset = sequencesIndexOffset_; + return *this; + } + + + operator VkGeneratedCommandsInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeneratedCommandsInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeneratedCommandsInfoNV const& ) const = default; +#else + bool operator==( GeneratedCommandsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( pipeline == rhs.pipeline ) + && ( indirectCommandsLayout == rhs.indirectCommandsLayout ) + && ( streamCount == rhs.streamCount ) + && ( pStreams == rhs.pStreams ) + && ( sequencesCount == rhs.sequencesCount ) + && ( preprocessBuffer == rhs.preprocessBuffer ) + && ( preprocessOffset == rhs.preprocessOffset ) + && ( preprocessSize == rhs.preprocessSize ) + && ( sequencesCountBuffer == rhs.sequencesCountBuffer ) + && ( sequencesCountOffset == rhs.sequencesCountOffset ) + && ( sequencesIndexBuffer == rhs.sequencesIndexBuffer ) + && ( sequencesIndexOffset == rhs.sequencesIndexOffset ); + } + + bool operator!=( GeneratedCommandsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGeneratedCommandsInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout = {}; + uint32_t streamCount = {}; + const VULKAN_HPP_NAMESPACE::IndirectCommandsStreamNV* pStreams = {}; + uint32_t sequencesCount = {}; + VULKAN_HPP_NAMESPACE::Buffer preprocessBuffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize preprocessOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize preprocessSize = {}; + VULKAN_HPP_NAMESPACE::Buffer sequencesCountBuffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize sequencesCountOffset = {}; + VULKAN_HPP_NAMESPACE::Buffer sequencesIndexBuffer = {}; + VULKAN_HPP_NAMESPACE::DeviceSize sequencesIndexOffset = {}; + + }; + static_assert( sizeof( GeneratedCommandsInfoNV ) == sizeof( VkGeneratedCommandsInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GeneratedCommandsInfoNV; + }; + + struct MemoryBarrier + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryBarrier; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryBarrier(VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}) VULKAN_HPP_NOEXCEPT + : srcAccessMask( srcAccessMask_ ), dstAccessMask( dstAccessMask_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryBarrier( MemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryBarrier( VkMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryBarrier & operator=( VkMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryBarrier & operator=( MemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryBarrier ) ); + return *this; + } + + MemoryBarrier & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryBarrier & setSrcAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + srcAccessMask = srcAccessMask_; + return *this; + } + + MemoryBarrier & setDstAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + dstAccessMask = dstAccessMask_; + return *this; + } + + + operator VkMemoryBarrier const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryBarrier &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryBarrier const& ) const = default; +#else + bool operator==( MemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ); + } + + bool operator!=( MemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryBarrier; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask = {}; + + }; + static_assert( sizeof( MemoryBarrier ) == sizeof( VkMemoryBarrier ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryBarrier; + }; + + struct ImageMemoryBarrier + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageMemoryBarrier; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageMemoryBarrier(VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, uint32_t srcQueueFamilyIndex_ = {}, uint32_t dstQueueFamilyIndex_ = {}, VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}) VULKAN_HPP_NOEXCEPT + : srcAccessMask( srcAccessMask_ ), dstAccessMask( dstAccessMask_ ), oldLayout( oldLayout_ ), newLayout( newLayout_ ), srcQueueFamilyIndex( srcQueueFamilyIndex_ ), dstQueueFamilyIndex( dstQueueFamilyIndex_ ), image( image_ ), subresourceRange( subresourceRange_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageMemoryBarrier( ImageMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageMemoryBarrier( VkImageMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageMemoryBarrier & operator=( VkImageMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageMemoryBarrier & operator=( ImageMemoryBarrier const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageMemoryBarrier ) ); + return *this; + } + + ImageMemoryBarrier & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageMemoryBarrier & setSrcAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + srcAccessMask = srcAccessMask_; + return *this; + } + + ImageMemoryBarrier & setDstAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + dstAccessMask = dstAccessMask_; + return *this; + } + + ImageMemoryBarrier & setOldLayout( VULKAN_HPP_NAMESPACE::ImageLayout oldLayout_ ) VULKAN_HPP_NOEXCEPT + { + oldLayout = oldLayout_; + return *this; + } + + ImageMemoryBarrier & setNewLayout( VULKAN_HPP_NAMESPACE::ImageLayout newLayout_ ) VULKAN_HPP_NOEXCEPT + { + newLayout = newLayout_; + return *this; + } + + ImageMemoryBarrier & setSrcQueueFamilyIndex( uint32_t srcQueueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + srcQueueFamilyIndex = srcQueueFamilyIndex_; + return *this; + } + + ImageMemoryBarrier & setDstQueueFamilyIndex( uint32_t dstQueueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + dstQueueFamilyIndex = dstQueueFamilyIndex_; + return *this; + } + + ImageMemoryBarrier & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + ImageMemoryBarrier & setSubresourceRange( VULKAN_HPP_NAMESPACE::ImageSubresourceRange const & subresourceRange_ ) VULKAN_HPP_NOEXCEPT + { + subresourceRange = subresourceRange_; + return *this; + } + + + operator VkImageMemoryBarrier const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageMemoryBarrier &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageMemoryBarrier const& ) const = default; +#else + bool operator==( ImageMemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( oldLayout == rhs.oldLayout ) + && ( newLayout == rhs.newLayout ) + && ( srcQueueFamilyIndex == rhs.srcQueueFamilyIndex ) + && ( dstQueueFamilyIndex == rhs.dstQueueFamilyIndex ) + && ( image == rhs.image ) + && ( subresourceRange == rhs.subresourceRange ); + } + + bool operator!=( ImageMemoryBarrier const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageMemoryBarrier; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask = {}; + VULKAN_HPP_NAMESPACE::ImageLayout oldLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::ImageLayout newLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t srcQueueFamilyIndex = {}; + uint32_t dstQueueFamilyIndex = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange = {}; + + }; + static_assert( sizeof( ImageMemoryBarrier ) == sizeof( VkImageMemoryBarrier ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageMemoryBarrier; + }; + + class BufferView + { + public: + using CType = VkBufferView; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eBufferView; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eBufferView; + + public: + VULKAN_HPP_CONSTEXPR BufferView() VULKAN_HPP_NOEXCEPT + : m_bufferView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR BufferView( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_bufferView(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT BufferView( VkBufferView bufferView ) VULKAN_HPP_NOEXCEPT + : m_bufferView( bufferView ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + BufferView & operator=(VkBufferView bufferView) VULKAN_HPP_NOEXCEPT + { + m_bufferView = bufferView; + return *this; + } +#endif + + BufferView & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_bufferView = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( BufferView const& ) const = default; +#else + bool operator==( BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_bufferView == rhs.m_bufferView; + } + + bool operator!=(BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_bufferView != rhs.m_bufferView; + } + + bool operator<(BufferView const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_bufferView < rhs.m_bufferView; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkBufferView() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_bufferView == VK_NULL_HANDLE; + } + + private: + VkBufferView m_bufferView; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::BufferView ) == sizeof( VkBufferView ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::BufferView; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::BufferView; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::BufferView; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct WriteDescriptorSet + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWriteDescriptorSet; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WriteDescriptorSet(VULKAN_HPP_NAMESPACE::DescriptorSet dstSet_ = {}, uint32_t dstBinding_ = {}, uint32_t dstArrayElement_ = {}, uint32_t descriptorCount_ = {}, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, const VULKAN_HPP_NAMESPACE::DescriptorImageInfo* pImageInfo_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorBufferInfo* pBufferInfo_ = {}, const VULKAN_HPP_NAMESPACE::BufferView* pTexelBufferView_ = {}) VULKAN_HPP_NOEXCEPT + : dstSet( dstSet_ ), dstBinding( dstBinding_ ), dstArrayElement( dstArrayElement_ ), descriptorCount( descriptorCount_ ), descriptorType( descriptorType_ ), pImageInfo( pImageInfo_ ), pBufferInfo( pBufferInfo_ ), pTexelBufferView( pTexelBufferView_ ) + {} + + VULKAN_HPP_CONSTEXPR WriteDescriptorSet( WriteDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WriteDescriptorSet( VkWriteDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSet( VULKAN_HPP_NAMESPACE::DescriptorSet dstSet_, uint32_t dstBinding_, uint32_t dstArrayElement_, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageInfo_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bufferInfo_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & texelBufferView_ = {} ) + : dstSet( dstSet_ ), dstBinding( dstBinding_ ), dstArrayElement( dstArrayElement_ ), descriptorCount( static_cast( !imageInfo_.empty() ? imageInfo_.size() : !bufferInfo_.empty() ? bufferInfo_.size() : texelBufferView_.size() ) ), descriptorType( descriptorType_ ), pImageInfo( imageInfo_.data() ), pBufferInfo( bufferInfo_.data() ), pTexelBufferView( texelBufferView_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( ( !imageInfo_.empty() + !bufferInfo_.empty() + !texelBufferView_.empty() ) == 1 ); +#else + if ( ( !imageInfo_.empty() + !bufferInfo_.empty() + !texelBufferView_.empty() ) != 1 ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::WriteDescriptorSet::WriteDescriptorSet: ( !imageInfo_.empty() + !bufferInfo_.empty() + !texelBufferView_.empty() ) != 1" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WriteDescriptorSet & operator=( VkWriteDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + WriteDescriptorSet & operator=( WriteDescriptorSet const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( WriteDescriptorSet ) ); + return *this; + } + + WriteDescriptorSet & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSet & setDstSet( VULKAN_HPP_NAMESPACE::DescriptorSet dstSet_ ) VULKAN_HPP_NOEXCEPT + { + dstSet = dstSet_; + return *this; + } + + WriteDescriptorSet & setDstBinding( uint32_t dstBinding_ ) VULKAN_HPP_NOEXCEPT + { + dstBinding = dstBinding_; + return *this; + } + + WriteDescriptorSet & setDstArrayElement( uint32_t dstArrayElement_ ) VULKAN_HPP_NOEXCEPT + { + dstArrayElement = dstArrayElement_; + return *this; + } + + WriteDescriptorSet & setDescriptorCount( uint32_t descriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = descriptorCount_; + return *this; + } + + WriteDescriptorSet & setDescriptorType( VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ ) VULKAN_HPP_NOEXCEPT + { + descriptorType = descriptorType_; + return *this; + } + + WriteDescriptorSet & setPImageInfo( const VULKAN_HPP_NAMESPACE::DescriptorImageInfo* pImageInfo_ ) VULKAN_HPP_NOEXCEPT + { + pImageInfo = pImageInfo_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSet & setImageInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageInfo_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = static_cast( imageInfo_.size() ); + pImageInfo = imageInfo_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + WriteDescriptorSet & setPBufferInfo( const VULKAN_HPP_NAMESPACE::DescriptorBufferInfo* pBufferInfo_ ) VULKAN_HPP_NOEXCEPT + { + pBufferInfo = pBufferInfo_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSet & setBufferInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & bufferInfo_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = static_cast( bufferInfo_.size() ); + pBufferInfo = bufferInfo_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + WriteDescriptorSet & setPTexelBufferView( const VULKAN_HPP_NAMESPACE::BufferView* pTexelBufferView_ ) VULKAN_HPP_NOEXCEPT + { + pTexelBufferView = pTexelBufferView_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSet & setTexelBufferView( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & texelBufferView_ ) VULKAN_HPP_NOEXCEPT + { + descriptorCount = static_cast( texelBufferView_.size() ); + pTexelBufferView = texelBufferView_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWriteDescriptorSet const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWriteDescriptorSet &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WriteDescriptorSet const& ) const = default; +#else + bool operator==( WriteDescriptorSet const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dstSet == rhs.dstSet ) + && ( dstBinding == rhs.dstBinding ) + && ( dstArrayElement == rhs.dstArrayElement ) + && ( descriptorCount == rhs.descriptorCount ) + && ( descriptorType == rhs.descriptorType ) + && ( pImageInfo == rhs.pImageInfo ) + && ( pBufferInfo == rhs.pBufferInfo ) + && ( pTexelBufferView == rhs.pTexelBufferView ); + } + + bool operator!=( WriteDescriptorSet const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWriteDescriptorSet; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DescriptorSet dstSet = {}; + uint32_t dstBinding = {}; + uint32_t dstArrayElement = {}; + uint32_t descriptorCount = {}; + VULKAN_HPP_NAMESPACE::DescriptorType descriptorType = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler; + const VULKAN_HPP_NAMESPACE::DescriptorImageInfo* pImageInfo = {}; + const VULKAN_HPP_NAMESPACE::DescriptorBufferInfo* pBufferInfo = {}; + const VULKAN_HPP_NAMESPACE::BufferView* pTexelBufferView = {}; + + }; + static_assert( sizeof( WriteDescriptorSet ) == sizeof( VkWriteDescriptorSet ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = WriteDescriptorSet; + }; + + class DescriptorUpdateTemplate + { + public: + using CType = VkDescriptorUpdateTemplate; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDescriptorUpdateTemplate; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDescriptorUpdateTemplate; + + public: + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate() VULKAN_HPP_NOEXCEPT + : m_descriptorUpdateTemplate(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DescriptorUpdateTemplate( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_descriptorUpdateTemplate(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DescriptorUpdateTemplate( VkDescriptorUpdateTemplate descriptorUpdateTemplate ) VULKAN_HPP_NOEXCEPT + : m_descriptorUpdateTemplate( descriptorUpdateTemplate ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DescriptorUpdateTemplate & operator=(VkDescriptorUpdateTemplate descriptorUpdateTemplate) VULKAN_HPP_NOEXCEPT + { + m_descriptorUpdateTemplate = descriptorUpdateTemplate; + return *this; + } +#endif + + DescriptorUpdateTemplate & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_descriptorUpdateTemplate = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DescriptorUpdateTemplate const& ) const = default; +#else + bool operator==( DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate == rhs.m_descriptorUpdateTemplate; + } + + bool operator!=(DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate != rhs.m_descriptorUpdateTemplate; + } + + bool operator<(DescriptorUpdateTemplate const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate < rhs.m_descriptorUpdateTemplate; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDescriptorUpdateTemplate() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_descriptorUpdateTemplate == VK_NULL_HANDLE; + } + + private: + VkDescriptorUpdateTemplate m_descriptorUpdateTemplate; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate ) == sizeof( VkDescriptorUpdateTemplate ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + using DescriptorUpdateTemplateKHR = DescriptorUpdateTemplate; + + class Event + { + public: + using CType = VkEvent; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eEvent; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eEvent; + + public: + VULKAN_HPP_CONSTEXPR Event() VULKAN_HPP_NOEXCEPT + : m_event(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Event( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_event(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Event( VkEvent event ) VULKAN_HPP_NOEXCEPT + : m_event( event ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Event & operator=(VkEvent event) VULKAN_HPP_NOEXCEPT + { + m_event = event; + return *this; + } +#endif + + Event & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_event = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Event const& ) const = default; +#else + bool operator==( Event const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_event == rhs.m_event; + } + + bool operator!=(Event const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_event != rhs.m_event; + } + + bool operator<(Event const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_event < rhs.m_event; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkEvent() const VULKAN_HPP_NOEXCEPT + { + return m_event; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_event != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_event == VK_NULL_HANDLE; + } + + private: + VkEvent m_event; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Event ) == sizeof( VkEvent ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Event; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Event; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Event; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct ImageResolve + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageResolve(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D srcOffset_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffset( srcOffset_ ), dstSubresource( dstSubresource_ ), dstOffset( dstOffset_ ), extent( extent_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageResolve( ImageResolve const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageResolve( VkImageResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageResolve & operator=( VkImageResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageResolve & operator=( ImageResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageResolve ) ); + return *this; + } + + ImageResolve & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageResolve & setSrcOffset( VULKAN_HPP_NAMESPACE::Offset3D const & srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + ImageResolve & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageResolve & setDstOffset( VULKAN_HPP_NAMESPACE::Offset3D const & dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + ImageResolve & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + + operator VkImageResolve const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageResolve &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageResolve const& ) const = default; +#else + bool operator==( ImageResolve const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageResolve const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D srcOffset = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D dstOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + + }; + static_assert( sizeof( ImageResolve ) == sizeof( VkImageResolve ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageResolve2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageResolve2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageResolve2KHR(VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D srcOffset_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource_ = {}, VULKAN_HPP_NAMESPACE::Offset3D dstOffset_ = {}, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubresource( srcSubresource_ ), srcOffset( srcOffset_ ), dstSubresource( dstSubresource_ ), dstOffset( dstOffset_ ), extent( extent_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageResolve2KHR( ImageResolve2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageResolve2KHR( VkImageResolve2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageResolve2KHR & operator=( VkImageResolve2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageResolve2KHR & operator=( ImageResolve2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageResolve2KHR ) ); + return *this; + } + + ImageResolve2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageResolve2KHR & setSrcSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & srcSubresource_ ) VULKAN_HPP_NOEXCEPT + { + srcSubresource = srcSubresource_; + return *this; + } + + ImageResolve2KHR & setSrcOffset( VULKAN_HPP_NAMESPACE::Offset3D const & srcOffset_ ) VULKAN_HPP_NOEXCEPT + { + srcOffset = srcOffset_; + return *this; + } + + ImageResolve2KHR & setDstSubresource( VULKAN_HPP_NAMESPACE::ImageSubresourceLayers const & dstSubresource_ ) VULKAN_HPP_NOEXCEPT + { + dstSubresource = dstSubresource_; + return *this; + } + + ImageResolve2KHR & setDstOffset( VULKAN_HPP_NAMESPACE::Offset3D const & dstOffset_ ) VULKAN_HPP_NOEXCEPT + { + dstOffset = dstOffset_; + return *this; + } + + ImageResolve2KHR & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + + operator VkImageResolve2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageResolve2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageResolve2KHR const& ) const = default; +#else + bool operator==( ImageResolve2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSubresource == rhs.srcSubresource ) + && ( srcOffset == rhs.srcOffset ) + && ( dstSubresource == rhs.dstSubresource ) + && ( dstOffset == rhs.dstOffset ) + && ( extent == rhs.extent ); + } + + bool operator!=( ImageResolve2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageResolve2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers srcSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D srcOffset = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceLayers dstSubresource = {}; + VULKAN_HPP_NAMESPACE::Offset3D dstOffset = {}; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + + }; + static_assert( sizeof( ImageResolve2KHR ) == sizeof( VkImageResolve2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageResolve2KHR; + }; + + struct ResolveImageInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eResolveImageInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ResolveImageInfo2KHR(VULKAN_HPP_NAMESPACE::Image srcImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, VULKAN_HPP_NAMESPACE::Image dstImage_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined, uint32_t regionCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageResolve2KHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( regionCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR ResolveImageInfo2KHR( ResolveImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ResolveImageInfo2KHR( VkResolveImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ResolveImageInfo2KHR( VULKAN_HPP_NAMESPACE::Image srcImage_, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_, VULKAN_HPP_NAMESPACE::Image dstImage_, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : srcImage( srcImage_ ), srcImageLayout( srcImageLayout_ ), dstImage( dstImage_ ), dstImageLayout( dstImageLayout_ ), regionCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ResolveImageInfo2KHR & operator=( VkResolveImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ResolveImageInfo2KHR & operator=( ResolveImageInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ResolveImageInfo2KHR ) ); + return *this; + } + + ResolveImageInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ResolveImageInfo2KHR & setSrcImage( VULKAN_HPP_NAMESPACE::Image srcImage_ ) VULKAN_HPP_NOEXCEPT + { + srcImage = srcImage_; + return *this; + } + + ResolveImageInfo2KHR & setSrcImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + srcImageLayout = srcImageLayout_; + return *this; + } + + ResolveImageInfo2KHR & setDstImage( VULKAN_HPP_NAMESPACE::Image dstImage_ ) VULKAN_HPP_NOEXCEPT + { + dstImage = dstImage_; + return *this; + } + + ResolveImageInfo2KHR & setDstImageLayout( VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout_ ) VULKAN_HPP_NOEXCEPT + { + dstImageLayout = dstImageLayout_; + return *this; + } + + ResolveImageInfo2KHR & setRegionCount( uint32_t regionCount_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = regionCount_; + return *this; + } + + ResolveImageInfo2KHR & setPRegions( const VULKAN_HPP_NAMESPACE::ImageResolve2KHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ResolveImageInfo2KHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + regionCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkResolveImageInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkResolveImageInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ResolveImageInfo2KHR const& ) const = default; +#else + bool operator==( ResolveImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcImage == rhs.srcImage ) + && ( srcImageLayout == rhs.srcImageLayout ) + && ( dstImage == rhs.dstImage ) + && ( dstImageLayout == rhs.dstImageLayout ) + && ( regionCount == rhs.regionCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( ResolveImageInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eResolveImageInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image srcImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + VULKAN_HPP_NAMESPACE::Image dstImage = {}; + VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + uint32_t regionCount = {}; + const VULKAN_HPP_NAMESPACE::ImageResolve2KHR* pRegions = {}; + + }; + static_assert( sizeof( ResolveImageInfo2KHR ) == sizeof( VkResolveImageInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ResolveImageInfo2KHR; + }; + + struct PerformanceMarkerInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceMarkerInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PerformanceMarkerInfoINTEL(uint64_t marker_ = {}) VULKAN_HPP_NOEXCEPT + : marker( marker_ ) + {} + + VULKAN_HPP_CONSTEXPR PerformanceMarkerInfoINTEL( PerformanceMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceMarkerInfoINTEL( VkPerformanceMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceMarkerInfoINTEL & operator=( VkPerformanceMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceMarkerInfoINTEL & operator=( PerformanceMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceMarkerInfoINTEL ) ); + return *this; + } + + PerformanceMarkerInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PerformanceMarkerInfoINTEL & setMarker( uint64_t marker_ ) VULKAN_HPP_NOEXCEPT + { + marker = marker_; + return *this; + } + + + operator VkPerformanceMarkerInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceMarkerInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceMarkerInfoINTEL const& ) const = default; +#else + bool operator==( PerformanceMarkerInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( marker == rhs.marker ); + } + + bool operator!=( PerformanceMarkerInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceMarkerInfoINTEL; + const void* pNext = {}; + uint64_t marker = {}; + + }; + static_assert( sizeof( PerformanceMarkerInfoINTEL ) == sizeof( VkPerformanceMarkerInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceMarkerInfoINTEL; + }; + + struct PerformanceOverrideInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceOverrideInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PerformanceOverrideInfoINTEL(VULKAN_HPP_NAMESPACE::PerformanceOverrideTypeINTEL type_ = VULKAN_HPP_NAMESPACE::PerformanceOverrideTypeINTEL::eNullHardware, VULKAN_HPP_NAMESPACE::Bool32 enable_ = {}, uint64_t parameter_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), enable( enable_ ), parameter( parameter_ ) + {} + + VULKAN_HPP_CONSTEXPR PerformanceOverrideInfoINTEL( PerformanceOverrideInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceOverrideInfoINTEL( VkPerformanceOverrideInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceOverrideInfoINTEL & operator=( VkPerformanceOverrideInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceOverrideInfoINTEL & operator=( PerformanceOverrideInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceOverrideInfoINTEL ) ); + return *this; + } + + PerformanceOverrideInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PerformanceOverrideInfoINTEL & setType( VULKAN_HPP_NAMESPACE::PerformanceOverrideTypeINTEL type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + PerformanceOverrideInfoINTEL & setEnable( VULKAN_HPP_NAMESPACE::Bool32 enable_ ) VULKAN_HPP_NOEXCEPT + { + enable = enable_; + return *this; + } + + PerformanceOverrideInfoINTEL & setParameter( uint64_t parameter_ ) VULKAN_HPP_NOEXCEPT + { + parameter = parameter_; + return *this; + } + + + operator VkPerformanceOverrideInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceOverrideInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceOverrideInfoINTEL const& ) const = default; +#else + bool operator==( PerformanceOverrideInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( enable == rhs.enable ) + && ( parameter == rhs.parameter ); + } + + bool operator!=( PerformanceOverrideInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceOverrideInfoINTEL; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PerformanceOverrideTypeINTEL type = VULKAN_HPP_NAMESPACE::PerformanceOverrideTypeINTEL::eNullHardware; + VULKAN_HPP_NAMESPACE::Bool32 enable = {}; + uint64_t parameter = {}; + + }; + static_assert( sizeof( PerformanceOverrideInfoINTEL ) == sizeof( VkPerformanceOverrideInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceOverrideInfoINTEL; + }; + + struct PerformanceStreamMarkerInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceStreamMarkerInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PerformanceStreamMarkerInfoINTEL(uint32_t marker_ = {}) VULKAN_HPP_NOEXCEPT + : marker( marker_ ) + {} + + VULKAN_HPP_CONSTEXPR PerformanceStreamMarkerInfoINTEL( PerformanceStreamMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceStreamMarkerInfoINTEL( VkPerformanceStreamMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceStreamMarkerInfoINTEL & operator=( VkPerformanceStreamMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceStreamMarkerInfoINTEL & operator=( PerformanceStreamMarkerInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceStreamMarkerInfoINTEL ) ); + return *this; + } + + PerformanceStreamMarkerInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PerformanceStreamMarkerInfoINTEL & setMarker( uint32_t marker_ ) VULKAN_HPP_NOEXCEPT + { + marker = marker_; + return *this; + } + + + operator VkPerformanceStreamMarkerInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceStreamMarkerInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceStreamMarkerInfoINTEL const& ) const = default; +#else + bool operator==( PerformanceStreamMarkerInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( marker == rhs.marker ); + } + + bool operator!=( PerformanceStreamMarkerInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceStreamMarkerInfoINTEL; + const void* pNext = {}; + uint32_t marker = {}; + + }; + static_assert( sizeof( PerformanceStreamMarkerInfoINTEL ) == sizeof( VkPerformanceStreamMarkerInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceStreamMarkerInfoINTEL; + }; + + struct Viewport + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Viewport(float x_ = {}, float y_ = {}, float width_ = {}, float height_ = {}, float minDepth_ = {}, float maxDepth_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ), width( width_ ), height( height_ ), minDepth( minDepth_ ), maxDepth( maxDepth_ ) + {} + + VULKAN_HPP_CONSTEXPR Viewport( Viewport const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Viewport( VkViewport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Viewport & operator=( VkViewport const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Viewport & operator=( Viewport const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Viewport ) ); + return *this; + } + + Viewport & setX( float x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + Viewport & setY( float y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + Viewport & setWidth( float width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + Viewport & setHeight( float height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + Viewport & setMinDepth( float minDepth_ ) VULKAN_HPP_NOEXCEPT + { + minDepth = minDepth_; + return *this; + } + + Viewport & setMaxDepth( float maxDepth_ ) VULKAN_HPP_NOEXCEPT + { + maxDepth = maxDepth_; + return *this; + } + + + operator VkViewport const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkViewport &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Viewport const& ) const = default; +#else + bool operator==( Viewport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( width == rhs.width ) + && ( height == rhs.height ) + && ( minDepth == rhs.minDepth ) + && ( maxDepth == rhs.maxDepth ); + } + + bool operator!=( Viewport const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float x = {}; + float y = {}; + float width = {}; + float height = {}; + float minDepth = {}; + float maxDepth = {}; + + }; + static_assert( sizeof( Viewport ) == sizeof( VkViewport ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ShadingRatePaletteNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ShadingRatePaletteNV(uint32_t shadingRatePaletteEntryCount_ = {}, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV* pShadingRatePaletteEntries_ = {}) VULKAN_HPP_NOEXCEPT + : shadingRatePaletteEntryCount( shadingRatePaletteEntryCount_ ), pShadingRatePaletteEntries( pShadingRatePaletteEntries_ ) + {} + + VULKAN_HPP_CONSTEXPR ShadingRatePaletteNV( ShadingRatePaletteNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ShadingRatePaletteNV( VkShadingRatePaletteNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ShadingRatePaletteNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & shadingRatePaletteEntries_ ) + : shadingRatePaletteEntryCount( static_cast( shadingRatePaletteEntries_.size() ) ), pShadingRatePaletteEntries( shadingRatePaletteEntries_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ShadingRatePaletteNV & operator=( VkShadingRatePaletteNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ShadingRatePaletteNV & operator=( ShadingRatePaletteNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ShadingRatePaletteNV ) ); + return *this; + } + + ShadingRatePaletteNV & setShadingRatePaletteEntryCount( uint32_t shadingRatePaletteEntryCount_ ) VULKAN_HPP_NOEXCEPT + { + shadingRatePaletteEntryCount = shadingRatePaletteEntryCount_; + return *this; + } + + ShadingRatePaletteNV & setPShadingRatePaletteEntries( const VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV* pShadingRatePaletteEntries_ ) VULKAN_HPP_NOEXCEPT + { + pShadingRatePaletteEntries = pShadingRatePaletteEntries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ShadingRatePaletteNV & setShadingRatePaletteEntries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & shadingRatePaletteEntries_ ) VULKAN_HPP_NOEXCEPT + { + shadingRatePaletteEntryCount = static_cast( shadingRatePaletteEntries_.size() ); + pShadingRatePaletteEntries = shadingRatePaletteEntries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkShadingRatePaletteNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkShadingRatePaletteNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShadingRatePaletteNV const& ) const = default; +#else + bool operator==( ShadingRatePaletteNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( shadingRatePaletteEntryCount == rhs.shadingRatePaletteEntryCount ) + && ( pShadingRatePaletteEntries == rhs.pShadingRatePaletteEntries ); + } + + bool operator!=( ShadingRatePaletteNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t shadingRatePaletteEntryCount = {}; + const VULKAN_HPP_NAMESPACE::ShadingRatePaletteEntryNV* pShadingRatePaletteEntries = {}; + + }; + static_assert( sizeof( ShadingRatePaletteNV ) == sizeof( VkShadingRatePaletteNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ViewportWScalingNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ViewportWScalingNV(float xcoeff_ = {}, float ycoeff_ = {}) VULKAN_HPP_NOEXCEPT + : xcoeff( xcoeff_ ), ycoeff( ycoeff_ ) + {} + + VULKAN_HPP_CONSTEXPR ViewportWScalingNV( ViewportWScalingNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ViewportWScalingNV( VkViewportWScalingNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ViewportWScalingNV & operator=( VkViewportWScalingNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ViewportWScalingNV & operator=( ViewportWScalingNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ViewportWScalingNV ) ); + return *this; + } + + ViewportWScalingNV & setXcoeff( float xcoeff_ ) VULKAN_HPP_NOEXCEPT + { + xcoeff = xcoeff_; + return *this; + } + + ViewportWScalingNV & setYcoeff( float ycoeff_ ) VULKAN_HPP_NOEXCEPT + { + ycoeff = ycoeff_; + return *this; + } + + + operator VkViewportWScalingNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkViewportWScalingNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ViewportWScalingNV const& ) const = default; +#else + bool operator==( ViewportWScalingNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( xcoeff == rhs.xcoeff ) + && ( ycoeff == rhs.ycoeff ); + } + + bool operator!=( ViewportWScalingNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float xcoeff = {}; + float ycoeff = {}; + + }; + static_assert( sizeof( ViewportWScalingNV ) == sizeof( VkViewportWScalingNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct StridedDeviceAddressRegionKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR StridedDeviceAddressRegionKHR(VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize stride_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : deviceAddress( deviceAddress_ ), stride( stride_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR StridedDeviceAddressRegionKHR( StridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + StridedDeviceAddressRegionKHR( VkStridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + StridedDeviceAddressRegionKHR & operator=( VkStridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + StridedDeviceAddressRegionKHR & operator=( StridedDeviceAddressRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( StridedDeviceAddressRegionKHR ) ); + return *this; + } + + StridedDeviceAddressRegionKHR & setDeviceAddress( VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + deviceAddress = deviceAddress_; + return *this; + } + + StridedDeviceAddressRegionKHR & setStride( VULKAN_HPP_NAMESPACE::DeviceSize stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + StridedDeviceAddressRegionKHR & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkStridedDeviceAddressRegionKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkStridedDeviceAddressRegionKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( StridedDeviceAddressRegionKHR const& ) const = default; +#else + bool operator==( StridedDeviceAddressRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( deviceAddress == rhs.deviceAddress ) + && ( stride == rhs.stride ) + && ( size == rhs.size ); + } + + bool operator!=( StridedDeviceAddressRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; + VULKAN_HPP_NAMESPACE::DeviceSize stride = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( StridedDeviceAddressRegionKHR ) == sizeof( VkStridedDeviceAddressRegionKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + class CommandBuffer + { + public: + using CType = VkCommandBuffer; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eCommandBuffer; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eCommandBuffer; + + public: + VULKAN_HPP_CONSTEXPR CommandBuffer() VULKAN_HPP_NOEXCEPT + : m_commandBuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR CommandBuffer( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_commandBuffer(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT CommandBuffer( VkCommandBuffer commandBuffer ) VULKAN_HPP_NOEXCEPT + : m_commandBuffer( commandBuffer ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + CommandBuffer & operator=(VkCommandBuffer commandBuffer) VULKAN_HPP_NOEXCEPT + { + m_commandBuffer = commandBuffer; + return *this; + } +#endif + + CommandBuffer & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_commandBuffer = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( CommandBuffer const& ) const = default; +#else + bool operator==( CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer == rhs.m_commandBuffer; + } + + bool operator!=(CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer != rhs.m_commandBuffer; + } + + bool operator<(CommandBuffer const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer < rhs.m_commandBuffer; + } +#endif + + + template + VULKAN_HPP_NODISCARD Result begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type begin( const CommandBufferBeginInfo & beginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy const & counterBuffers, ArrayProxy const & counterBufferOffsets VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy const & descriptorSets, ArrayProxy const & dynamicOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, ArrayProxy const & sizes VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindVertexBuffers( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, ArrayProxy const & sizes VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, ArrayProxy const & strides VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void buildAccelerationStructuresIndirectKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::DeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void buildAccelerationStructuresIndirectKHR( ArrayProxy const & infos, ArrayProxy const & indirectDeviceAddresses, ArrayProxy const & indirectStrides, ArrayProxy const & pMaxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void buildAccelerationStructuresKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void buildAccelerationStructuresKHR( ArrayProxy const & infos, ArrayProxy const & pBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearAttachments( ArrayProxy const & attachments, ArrayProxy const & rects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy const & ranges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy const & ranges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void debugMarkerEndEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endConditionalRenderingEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endDebugUtilsLabelEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endRenderPass( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy const & counterBuffers, ArrayProxy const & counterBufferOffsets VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void executeCommands( ArrayProxy const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy const & memoryBarriers, ArrayProxy const & bufferMemoryBarriers, ArrayProxy const & imageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy const & values, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy const & descriptorWrites, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setBlendConstants( const float blendConstants[4], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setCheckpointNV( const void* pCheckpointMarker, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy const & customSampleOrders, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDeviceMask( uint32_t deviceMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void setDeviceMaskKHR( uint32_t deviceMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy const & discardRectangles, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy const & exclusiveScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setFragmentShadingRateEnumNV( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setFragmentShadingRateKHR( const VULKAN_HPP_NAMESPACE::Extent2D* pFragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setFragmentShadingRateKHR( const Extent2D & fragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setLineWidth( float lineWidth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + VULKAN_HPP_NODISCARD Result setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setScissor( uint32_t firstScissor, ArrayProxy const & scissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setScissorWithCountEXT( ArrayProxy const & scissors, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewport( uint32_t firstViewport, ArrayProxy const & viewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy const & shadingRatePalettes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewportWScalingNV( uint32_t firstViewport, ArrayProxy const & viewportWScalings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setViewportWithCountEXT( ArrayProxy const & viewports, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void traceRaysIndirectKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy const & data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void waitEvents( ArrayProxy const & events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy const & memoryBarriers, ArrayProxy const & bufferMemoryBarriers, ArrayProxy const & imageMemoryBarriers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void writeAccelerationStructuresPropertiesKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void writeAccelerationStructuresPropertiesNV( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result end( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type end( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkCommandBuffer() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_commandBuffer == VK_NULL_HANDLE; + } + + private: + VkCommandBuffer m_commandBuffer; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::CommandBuffer ) == sizeof( VkCommandBuffer ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::CommandBuffer; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::CommandBuffer; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::CommandBuffer; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct MemoryAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryAllocateInfo(VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ = {}, uint32_t memoryTypeIndex_ = {}) VULKAN_HPP_NOEXCEPT + : allocationSize( allocationSize_ ), memoryTypeIndex( memoryTypeIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryAllocateInfo( MemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryAllocateInfo( VkMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryAllocateInfo & operator=( VkMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryAllocateInfo & operator=( MemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryAllocateInfo ) ); + return *this; + } + + MemoryAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryAllocateInfo & setAllocationSize( VULKAN_HPP_NAMESPACE::DeviceSize allocationSize_ ) VULKAN_HPP_NOEXCEPT + { + allocationSize = allocationSize_; + return *this; + } + + MemoryAllocateInfo & setMemoryTypeIndex( uint32_t memoryTypeIndex_ ) VULKAN_HPP_NOEXCEPT + { + memoryTypeIndex = memoryTypeIndex_; + return *this; + } + + + operator VkMemoryAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryAllocateInfo const& ) const = default; +#else + bool operator==( MemoryAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( allocationSize == rhs.allocationSize ) + && ( memoryTypeIndex == rhs.memoryTypeIndex ); + } + + bool operator!=( MemoryAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryAllocateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize allocationSize = {}; + uint32_t memoryTypeIndex = {}; + + }; + static_assert( sizeof( MemoryAllocateInfo ) == sizeof( VkMemoryAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryAllocateInfo; + }; + + class DeferredOperationKHR + { + public: + using CType = VkDeferredOperationKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDeferredOperationKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR DeferredOperationKHR() VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DeferredOperationKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DeferredOperationKHR( VkDeferredOperationKHR deferredOperationKHR ) VULKAN_HPP_NOEXCEPT + : m_deferredOperationKHR( deferredOperationKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DeferredOperationKHR & operator=(VkDeferredOperationKHR deferredOperationKHR) VULKAN_HPP_NOEXCEPT + { + m_deferredOperationKHR = deferredOperationKHR; + return *this; + } +#endif + + DeferredOperationKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_deferredOperationKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeferredOperationKHR const& ) const = default; +#else + bool operator==( DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR == rhs.m_deferredOperationKHR; + } + + bool operator!=(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR != rhs.m_deferredOperationKHR; + } + + bool operator<(DeferredOperationKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR < rhs.m_deferredOperationKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDeferredOperationKHR() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_deferredOperationKHR == VK_NULL_HANDLE; + } + + private: + VkDeferredOperationKHR m_deferredOperationKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DeferredOperationKHR ) == sizeof( VkDeferredOperationKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DeferredOperationKHR; + }; + + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class PipelineCache + { + public: + using CType = VkPipelineCache; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePipelineCache; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePipelineCache; + + public: + VULKAN_HPP_CONSTEXPR PipelineCache() VULKAN_HPP_NOEXCEPT + : m_pipelineCache(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PipelineCache( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_pipelineCache(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PipelineCache( VkPipelineCache pipelineCache ) VULKAN_HPP_NOEXCEPT + : m_pipelineCache( pipelineCache ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PipelineCache & operator=(VkPipelineCache pipelineCache) VULKAN_HPP_NOEXCEPT + { + m_pipelineCache = pipelineCache; + return *this; + } +#endif + + PipelineCache & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_pipelineCache = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCache const& ) const = default; +#else + bool operator==( PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache == rhs.m_pipelineCache; + } + + bool operator!=(PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache != rhs.m_pipelineCache; + } + + bool operator<(PipelineCache const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache < rhs.m_pipelineCache; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPipelineCache() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_pipelineCache == VK_NULL_HANDLE; + } + + private: + VkPipelineCache m_pipelineCache; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::PipelineCache ) == sizeof( VkPipelineCache ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::PipelineCache; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineCache; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PipelineCache; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct EventCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eEventCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR EventCreateInfo(VULKAN_HPP_NAMESPACE::EventCreateFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR EventCreateInfo( EventCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + EventCreateInfo( VkEventCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + EventCreateInfo & operator=( VkEventCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + EventCreateInfo & operator=( EventCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( EventCreateInfo ) ); + return *this; + } + + EventCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + EventCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::EventCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkEventCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkEventCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( EventCreateInfo const& ) const = default; +#else + bool operator==( EventCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( EventCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eEventCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::EventCreateFlags flags = {}; + + }; + static_assert( sizeof( EventCreateInfo ) == sizeof( VkEventCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = EventCreateInfo; + }; + + struct FenceCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFenceCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FenceCreateInfo(VULKAN_HPP_NAMESPACE::FenceCreateFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR FenceCreateInfo( FenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FenceCreateInfo( VkFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FenceCreateInfo & operator=( VkFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FenceCreateInfo & operator=( FenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FenceCreateInfo ) ); + return *this; + } + + FenceCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FenceCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::FenceCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkFenceCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFenceCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FenceCreateInfo const& ) const = default; +#else + bool operator==( FenceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( FenceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFenceCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::FenceCreateFlags flags = {}; + + }; + static_assert( sizeof( FenceCreateInfo ) == sizeof( VkFenceCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FenceCreateInfo; + }; + + struct FramebufferCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFramebufferCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FramebufferCreateInfo(VULKAN_HPP_NAMESPACE::FramebufferCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageView* pAttachments_ = {}, uint32_t width_ = {}, uint32_t height_ = {}, uint32_t layers_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), renderPass( renderPass_ ), attachmentCount( attachmentCount_ ), pAttachments( pAttachments_ ), width( width_ ), height( height_ ), layers( layers_ ) + {} + + VULKAN_HPP_CONSTEXPR FramebufferCreateInfo( FramebufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FramebufferCreateInfo( VkFramebufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferCreateInfo( VULKAN_HPP_NAMESPACE::FramebufferCreateFlags flags_, VULKAN_HPP_NAMESPACE::RenderPass renderPass_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_, uint32_t width_ = {}, uint32_t height_ = {}, uint32_t layers_ = {} ) + : flags( flags_ ), renderPass( renderPass_ ), attachmentCount( static_cast( attachments_.size() ) ), pAttachments( attachments_.data() ), width( width_ ), height( height_ ), layers( layers_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FramebufferCreateInfo & operator=( VkFramebufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FramebufferCreateInfo & operator=( FramebufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FramebufferCreateInfo ) ); + return *this; + } + + FramebufferCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FramebufferCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::FramebufferCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + FramebufferCreateInfo & setRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass_ ) VULKAN_HPP_NOEXCEPT + { + renderPass = renderPass_; + return *this; + } + + FramebufferCreateInfo & setAttachmentCount( uint32_t attachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = attachmentCount_; + return *this; + } + + FramebufferCreateInfo & setPAttachments( const VULKAN_HPP_NAMESPACE::ImageView* pAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pAttachments = pAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferCreateInfo & setAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = static_cast( attachments_.size() ); + pAttachments = attachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + FramebufferCreateInfo & setWidth( uint32_t width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + FramebufferCreateInfo & setHeight( uint32_t height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + FramebufferCreateInfo & setLayers( uint32_t layers_ ) VULKAN_HPP_NOEXCEPT + { + layers = layers_; + return *this; + } + + + operator VkFramebufferCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFramebufferCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FramebufferCreateInfo const& ) const = default; +#else + bool operator==( FramebufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( renderPass == rhs.renderPass ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( width == rhs.width ) + && ( height == rhs.height ) + && ( layers == rhs.layers ); + } + + bool operator!=( FramebufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFramebufferCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::FramebufferCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::RenderPass renderPass = {}; + uint32_t attachmentCount = {}; + const VULKAN_HPP_NAMESPACE::ImageView* pAttachments = {}; + uint32_t width = {}; + uint32_t height = {}; + uint32_t layers = {}; + + }; + static_assert( sizeof( FramebufferCreateInfo ) == sizeof( VkFramebufferCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FramebufferCreateInfo; + }; + + struct VertexInputBindingDescription + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VertexInputBindingDescription(uint32_t binding_ = {}, uint32_t stride_ = {}, VULKAN_HPP_NAMESPACE::VertexInputRate inputRate_ = VULKAN_HPP_NAMESPACE::VertexInputRate::eVertex) VULKAN_HPP_NOEXCEPT + : binding( binding_ ), stride( stride_ ), inputRate( inputRate_ ) + {} + + VULKAN_HPP_CONSTEXPR VertexInputBindingDescription( VertexInputBindingDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + VertexInputBindingDescription( VkVertexInputBindingDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + VertexInputBindingDescription & operator=( VkVertexInputBindingDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + VertexInputBindingDescription & operator=( VertexInputBindingDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( VertexInputBindingDescription ) ); + return *this; + } + + VertexInputBindingDescription & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT + { + binding = binding_; + return *this; + } + + VertexInputBindingDescription & setStride( uint32_t stride_ ) VULKAN_HPP_NOEXCEPT + { + stride = stride_; + return *this; + } + + VertexInputBindingDescription & setInputRate( VULKAN_HPP_NAMESPACE::VertexInputRate inputRate_ ) VULKAN_HPP_NOEXCEPT + { + inputRate = inputRate_; + return *this; + } + + + operator VkVertexInputBindingDescription const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkVertexInputBindingDescription &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( VertexInputBindingDescription const& ) const = default; +#else + bool operator==( VertexInputBindingDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( binding == rhs.binding ) + && ( stride == rhs.stride ) + && ( inputRate == rhs.inputRate ); + } + + bool operator!=( VertexInputBindingDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t binding = {}; + uint32_t stride = {}; + VULKAN_HPP_NAMESPACE::VertexInputRate inputRate = VULKAN_HPP_NAMESPACE::VertexInputRate::eVertex; + + }; + static_assert( sizeof( VertexInputBindingDescription ) == sizeof( VkVertexInputBindingDescription ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct VertexInputAttributeDescription + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VertexInputAttributeDescription(uint32_t location_ = {}, uint32_t binding_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, uint32_t offset_ = {}) VULKAN_HPP_NOEXCEPT + : location( location_ ), binding( binding_ ), format( format_ ), offset( offset_ ) + {} + + VULKAN_HPP_CONSTEXPR VertexInputAttributeDescription( VertexInputAttributeDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + VertexInputAttributeDescription( VkVertexInputAttributeDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + VertexInputAttributeDescription & operator=( VkVertexInputAttributeDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + VertexInputAttributeDescription & operator=( VertexInputAttributeDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( VertexInputAttributeDescription ) ); + return *this; + } + + VertexInputAttributeDescription & setLocation( uint32_t location_ ) VULKAN_HPP_NOEXCEPT + { + location = location_; + return *this; + } + + VertexInputAttributeDescription & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT + { + binding = binding_; + return *this; + } + + VertexInputAttributeDescription & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + VertexInputAttributeDescription & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + + operator VkVertexInputAttributeDescription const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkVertexInputAttributeDescription &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( VertexInputAttributeDescription const& ) const = default; +#else + bool operator==( VertexInputAttributeDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( location == rhs.location ) + && ( binding == rhs.binding ) + && ( format == rhs.format ) + && ( offset == rhs.offset ); + } + + bool operator!=( VertexInputAttributeDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t location = {}; + uint32_t binding = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + uint32_t offset = {}; + + }; + static_assert( sizeof( VertexInputAttributeDescription ) == sizeof( VkVertexInputAttributeDescription ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineVertexInputStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineVertexInputStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineVertexInputStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateFlags flags_ = {}, uint32_t vertexBindingDescriptionCount_ = {}, const VULKAN_HPP_NAMESPACE::VertexInputBindingDescription* pVertexBindingDescriptions_ = {}, uint32_t vertexAttributeDescriptionCount_ = {}, const VULKAN_HPP_NAMESPACE::VertexInputAttributeDescription* pVertexAttributeDescriptions_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), vertexBindingDescriptionCount( vertexBindingDescriptionCount_ ), pVertexBindingDescriptions( pVertexBindingDescriptions_ ), vertexAttributeDescriptionCount( vertexAttributeDescriptionCount_ ), pVertexAttributeDescriptions( pVertexAttributeDescriptions_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineVertexInputStateCreateInfo( PipelineVertexInputStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineVertexInputStateCreateInfo( VkPipelineVertexInputStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineVertexInputStateCreateInfo( VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDescriptions_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexAttributeDescriptions_ = {} ) + : flags( flags_ ), vertexBindingDescriptionCount( static_cast( vertexBindingDescriptions_.size() ) ), pVertexBindingDescriptions( vertexBindingDescriptions_.data() ), vertexAttributeDescriptionCount( static_cast( vertexAttributeDescriptions_.size() ) ), pVertexAttributeDescriptions( vertexAttributeDescriptions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineVertexInputStateCreateInfo & operator=( VkPipelineVertexInputStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineVertexInputStateCreateInfo & operator=( PipelineVertexInputStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineVertexInputStateCreateInfo ) ); + return *this; + } + + PipelineVertexInputStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineVertexInputStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineVertexInputStateCreateInfo & setVertexBindingDescriptionCount( uint32_t vertexBindingDescriptionCount_ ) VULKAN_HPP_NOEXCEPT + { + vertexBindingDescriptionCount = vertexBindingDescriptionCount_; + return *this; + } + + PipelineVertexInputStateCreateInfo & setPVertexBindingDescriptions( const VULKAN_HPP_NAMESPACE::VertexInputBindingDescription* pVertexBindingDescriptions_ ) VULKAN_HPP_NOEXCEPT + { + pVertexBindingDescriptions = pVertexBindingDescriptions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineVertexInputStateCreateInfo & setVertexBindingDescriptions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDescriptions_ ) VULKAN_HPP_NOEXCEPT + { + vertexBindingDescriptionCount = static_cast( vertexBindingDescriptions_.size() ); + pVertexBindingDescriptions = vertexBindingDescriptions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PipelineVertexInputStateCreateInfo & setVertexAttributeDescriptionCount( uint32_t vertexAttributeDescriptionCount_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeDescriptionCount = vertexAttributeDescriptionCount_; + return *this; + } + + PipelineVertexInputStateCreateInfo & setPVertexAttributeDescriptions( const VULKAN_HPP_NAMESPACE::VertexInputAttributeDescription* pVertexAttributeDescriptions_ ) VULKAN_HPP_NOEXCEPT + { + pVertexAttributeDescriptions = pVertexAttributeDescriptions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineVertexInputStateCreateInfo & setVertexAttributeDescriptions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexAttributeDescriptions_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeDescriptionCount = static_cast( vertexAttributeDescriptions_.size() ); + pVertexAttributeDescriptions = vertexAttributeDescriptions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineVertexInputStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineVertexInputStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineVertexInputStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineVertexInputStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( vertexBindingDescriptionCount == rhs.vertexBindingDescriptionCount ) + && ( pVertexBindingDescriptions == rhs.pVertexBindingDescriptions ) + && ( vertexAttributeDescriptionCount == rhs.vertexAttributeDescriptionCount ) + && ( pVertexAttributeDescriptions == rhs.pVertexAttributeDescriptions ); + } + + bool operator!=( PipelineVertexInputStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineVertexInputStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateFlags flags = {}; + uint32_t vertexBindingDescriptionCount = {}; + const VULKAN_HPP_NAMESPACE::VertexInputBindingDescription* pVertexBindingDescriptions = {}; + uint32_t vertexAttributeDescriptionCount = {}; + const VULKAN_HPP_NAMESPACE::VertexInputAttributeDescription* pVertexAttributeDescriptions = {}; + + }; + static_assert( sizeof( PipelineVertexInputStateCreateInfo ) == sizeof( VkPipelineVertexInputStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineVertexInputStateCreateInfo; + }; + + struct PipelineInputAssemblyStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineInputAssemblyStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineInputAssemblyStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::PrimitiveTopology topology_ = VULKAN_HPP_NAMESPACE::PrimitiveTopology::ePointList, VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), topology( topology_ ), primitiveRestartEnable( primitiveRestartEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineInputAssemblyStateCreateInfo( PipelineInputAssemblyStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineInputAssemblyStateCreateInfo( VkPipelineInputAssemblyStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineInputAssemblyStateCreateInfo & operator=( VkPipelineInputAssemblyStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineInputAssemblyStateCreateInfo & operator=( PipelineInputAssemblyStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineInputAssemblyStateCreateInfo ) ); + return *this; + } + + PipelineInputAssemblyStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo & setTopology( VULKAN_HPP_NAMESPACE::PrimitiveTopology topology_ ) VULKAN_HPP_NOEXCEPT + { + topology = topology_; + return *this; + } + + PipelineInputAssemblyStateCreateInfo & setPrimitiveRestartEnable( VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable_ ) VULKAN_HPP_NOEXCEPT + { + primitiveRestartEnable = primitiveRestartEnable_; + return *this; + } + + + operator VkPipelineInputAssemblyStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineInputAssemblyStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineInputAssemblyStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineInputAssemblyStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( topology == rhs.topology ) + && ( primitiveRestartEnable == rhs.primitiveRestartEnable ); + } + + bool operator!=( PipelineInputAssemblyStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineInputAssemblyStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::PrimitiveTopology topology = VULKAN_HPP_NAMESPACE::PrimitiveTopology::ePointList; + VULKAN_HPP_NAMESPACE::Bool32 primitiveRestartEnable = {}; + + }; + static_assert( sizeof( PipelineInputAssemblyStateCreateInfo ) == sizeof( VkPipelineInputAssemblyStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineInputAssemblyStateCreateInfo; + }; + + struct PipelineTessellationStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineTessellationStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineTessellationStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateFlags flags_ = {}, uint32_t patchControlPoints_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), patchControlPoints( patchControlPoints_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineTessellationStateCreateInfo( PipelineTessellationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineTessellationStateCreateInfo( VkPipelineTessellationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineTessellationStateCreateInfo & operator=( VkPipelineTessellationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineTessellationStateCreateInfo & operator=( PipelineTessellationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineTessellationStateCreateInfo ) ); + return *this; + } + + PipelineTessellationStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineTessellationStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineTessellationStateCreateInfo & setPatchControlPoints( uint32_t patchControlPoints_ ) VULKAN_HPP_NOEXCEPT + { + patchControlPoints = patchControlPoints_; + return *this; + } + + + operator VkPipelineTessellationStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineTessellationStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineTessellationStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineTessellationStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( patchControlPoints == rhs.patchControlPoints ); + } + + bool operator!=( PipelineTessellationStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineTessellationStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateFlags flags = {}; + uint32_t patchControlPoints = {}; + + }; + static_assert( sizeof( PipelineTessellationStateCreateInfo ) == sizeof( VkPipelineTessellationStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineTessellationStateCreateInfo; + }; + + struct PipelineViewportStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateFlags flags_ = {}, uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::Viewport* pViewports_ = {}, uint32_t scissorCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), viewportCount( viewportCount_ ), pViewports( pViewports_ ), scissorCount( scissorCount_ ), pScissors( pScissors_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportStateCreateInfo( PipelineViewportStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportStateCreateInfo( VkPipelineViewportStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportStateCreateInfo( VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewports_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & scissors_ = {} ) + : flags( flags_ ), viewportCount( static_cast( viewports_.size() ) ), pViewports( viewports_.data() ), scissorCount( static_cast( scissors_.size() ) ), pScissors( scissors_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportStateCreateInfo & operator=( VkPipelineViewportStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportStateCreateInfo & operator=( PipelineViewportStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportStateCreateInfo ) ); + return *this; + } + + PipelineViewportStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineViewportStateCreateInfo & setViewportCount( uint32_t viewportCount_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportStateCreateInfo & setPViewports( const VULKAN_HPP_NAMESPACE::Viewport* pViewports_ ) VULKAN_HPP_NOEXCEPT + { + pViewports = pViewports_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportStateCreateInfo & setViewports( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewports_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = static_cast( viewports_.size() ); + pViewports = viewports_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PipelineViewportStateCreateInfo & setScissorCount( uint32_t scissorCount_ ) VULKAN_HPP_NOEXCEPT + { + scissorCount = scissorCount_; + return *this; + } + + PipelineViewportStateCreateInfo & setPScissors( const VULKAN_HPP_NAMESPACE::Rect2D* pScissors_ ) VULKAN_HPP_NOEXCEPT + { + pScissors = pScissors_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportStateCreateInfo & setScissors( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & scissors_ ) VULKAN_HPP_NOEXCEPT + { + scissorCount = static_cast( scissors_.size() ); + pScissors = scissors_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineViewportStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewports == rhs.pViewports ) + && ( scissorCount == rhs.scissorCount ) + && ( pScissors == rhs.pScissors ); + } + + bool operator!=( PipelineViewportStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateFlags flags = {}; + uint32_t viewportCount = {}; + const VULKAN_HPP_NAMESPACE::Viewport* pViewports = {}; + uint32_t scissorCount = {}; + const VULKAN_HPP_NAMESPACE::Rect2D* pScissors = {}; + + }; + static_assert( sizeof( PipelineViewportStateCreateInfo ) == sizeof( VkPipelineViewportStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportStateCreateInfo; + }; + + struct PipelineRasterizationStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthClampEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rasterizerDiscardEnable_ = {}, VULKAN_HPP_NAMESPACE::PolygonMode polygonMode_ = VULKAN_HPP_NAMESPACE::PolygonMode::eFill, VULKAN_HPP_NAMESPACE::CullModeFlags cullMode_ = {}, VULKAN_HPP_NAMESPACE::FrontFace frontFace_ = VULKAN_HPP_NAMESPACE::FrontFace::eCounterClockwise, VULKAN_HPP_NAMESPACE::Bool32 depthBiasEnable_ = {}, float depthBiasConstantFactor_ = {}, float depthBiasClamp_ = {}, float depthBiasSlopeFactor_ = {}, float lineWidth_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), depthClampEnable( depthClampEnable_ ), rasterizerDiscardEnable( rasterizerDiscardEnable_ ), polygonMode( polygonMode_ ), cullMode( cullMode_ ), frontFace( frontFace_ ), depthBiasEnable( depthBiasEnable_ ), depthBiasConstantFactor( depthBiasConstantFactor_ ), depthBiasClamp( depthBiasClamp_ ), depthBiasSlopeFactor( depthBiasSlopeFactor_ ), lineWidth( lineWidth_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateCreateInfo( PipelineRasterizationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationStateCreateInfo( VkPipelineRasterizationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationStateCreateInfo & operator=( VkPipelineRasterizationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationStateCreateInfo & operator=( PipelineRasterizationStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationStateCreateInfo ) ); + return *this; + } + + PipelineRasterizationStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setDepthClampEnable( VULKAN_HPP_NAMESPACE::Bool32 depthClampEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthClampEnable = depthClampEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setRasterizerDiscardEnable( VULKAN_HPP_NAMESPACE::Bool32 rasterizerDiscardEnable_ ) VULKAN_HPP_NOEXCEPT + { + rasterizerDiscardEnable = rasterizerDiscardEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setPolygonMode( VULKAN_HPP_NAMESPACE::PolygonMode polygonMode_ ) VULKAN_HPP_NOEXCEPT + { + polygonMode = polygonMode_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setCullMode( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode_ ) VULKAN_HPP_NOEXCEPT + { + cullMode = cullMode_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setFrontFace( VULKAN_HPP_NAMESPACE::FrontFace frontFace_ ) VULKAN_HPP_NOEXCEPT + { + frontFace = frontFace_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setDepthBiasEnable( VULKAN_HPP_NAMESPACE::Bool32 depthBiasEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthBiasEnable = depthBiasEnable_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setDepthBiasConstantFactor( float depthBiasConstantFactor_ ) VULKAN_HPP_NOEXCEPT + { + depthBiasConstantFactor = depthBiasConstantFactor_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setDepthBiasClamp( float depthBiasClamp_ ) VULKAN_HPP_NOEXCEPT + { + depthBiasClamp = depthBiasClamp_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setDepthBiasSlopeFactor( float depthBiasSlopeFactor_ ) VULKAN_HPP_NOEXCEPT + { + depthBiasSlopeFactor = depthBiasSlopeFactor_; + return *this; + } + + PipelineRasterizationStateCreateInfo & setLineWidth( float lineWidth_ ) VULKAN_HPP_NOEXCEPT + { + lineWidth = lineWidth_; + return *this; + } + + + operator VkPipelineRasterizationStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineRasterizationStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( depthClampEnable == rhs.depthClampEnable ) + && ( rasterizerDiscardEnable == rhs.rasterizerDiscardEnable ) + && ( polygonMode == rhs.polygonMode ) + && ( cullMode == rhs.cullMode ) + && ( frontFace == rhs.frontFace ) + && ( depthBiasEnable == rhs.depthBiasEnable ) + && ( depthBiasConstantFactor == rhs.depthBiasConstantFactor ) + && ( depthBiasClamp == rhs.depthBiasClamp ) + && ( depthBiasSlopeFactor == rhs.depthBiasSlopeFactor ) + && ( lineWidth == rhs.lineWidth ); + } + + bool operator!=( PipelineRasterizationStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthClampEnable = {}; + VULKAN_HPP_NAMESPACE::Bool32 rasterizerDiscardEnable = {}; + VULKAN_HPP_NAMESPACE::PolygonMode polygonMode = VULKAN_HPP_NAMESPACE::PolygonMode::eFill; + VULKAN_HPP_NAMESPACE::CullModeFlags cullMode = {}; + VULKAN_HPP_NAMESPACE::FrontFace frontFace = VULKAN_HPP_NAMESPACE::FrontFace::eCounterClockwise; + VULKAN_HPP_NAMESPACE::Bool32 depthBiasEnable = {}; + float depthBiasConstantFactor = {}; + float depthBiasClamp = {}; + float depthBiasSlopeFactor = {}; + float lineWidth = {}; + + }; + static_assert( sizeof( PipelineRasterizationStateCreateInfo ) == sizeof( VkPipelineRasterizationStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationStateCreateInfo; + }; + + struct PipelineMultisampleStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineMultisampleStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineMultisampleStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::Bool32 sampleShadingEnable_ = {}, float minSampleShading_ = {}, const VULKAN_HPP_NAMESPACE::SampleMask* pSampleMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 alphaToCoverageEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 alphaToOneEnable_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), rasterizationSamples( rasterizationSamples_ ), sampleShadingEnable( sampleShadingEnable_ ), minSampleShading( minSampleShading_ ), pSampleMask( pSampleMask_ ), alphaToCoverageEnable( alphaToCoverageEnable_ ), alphaToOneEnable( alphaToOneEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineMultisampleStateCreateInfo( PipelineMultisampleStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineMultisampleStateCreateInfo( VkPipelineMultisampleStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineMultisampleStateCreateInfo & operator=( VkPipelineMultisampleStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineMultisampleStateCreateInfo & operator=( PipelineMultisampleStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineMultisampleStateCreateInfo ) ); + return *this; + } + + PipelineMultisampleStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setRasterizationSamples( VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples_ ) VULKAN_HPP_NOEXCEPT + { + rasterizationSamples = rasterizationSamples_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setSampleShadingEnable( VULKAN_HPP_NAMESPACE::Bool32 sampleShadingEnable_ ) VULKAN_HPP_NOEXCEPT + { + sampleShadingEnable = sampleShadingEnable_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setMinSampleShading( float minSampleShading_ ) VULKAN_HPP_NOEXCEPT + { + minSampleShading = minSampleShading_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setPSampleMask( const VULKAN_HPP_NAMESPACE::SampleMask* pSampleMask_ ) VULKAN_HPP_NOEXCEPT + { + pSampleMask = pSampleMask_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setAlphaToCoverageEnable( VULKAN_HPP_NAMESPACE::Bool32 alphaToCoverageEnable_ ) VULKAN_HPP_NOEXCEPT + { + alphaToCoverageEnable = alphaToCoverageEnable_; + return *this; + } + + PipelineMultisampleStateCreateInfo & setAlphaToOneEnable( VULKAN_HPP_NAMESPACE::Bool32 alphaToOneEnable_ ) VULKAN_HPP_NOEXCEPT + { + alphaToOneEnable = alphaToOneEnable_; + return *this; + } + + + operator VkPipelineMultisampleStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineMultisampleStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineMultisampleStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineMultisampleStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( rasterizationSamples == rhs.rasterizationSamples ) + && ( sampleShadingEnable == rhs.sampleShadingEnable ) + && ( minSampleShading == rhs.minSampleShading ) + && ( pSampleMask == rhs.pSampleMask ) + && ( alphaToCoverageEnable == rhs.alphaToCoverageEnable ) + && ( alphaToOneEnable == rhs.alphaToOneEnable ); + } + + bool operator!=( PipelineMultisampleStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineMultisampleStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::Bool32 sampleShadingEnable = {}; + float minSampleShading = {}; + const VULKAN_HPP_NAMESPACE::SampleMask* pSampleMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 alphaToCoverageEnable = {}; + VULKAN_HPP_NAMESPACE::Bool32 alphaToOneEnable = {}; + + }; + static_assert( sizeof( PipelineMultisampleStateCreateInfo ) == sizeof( VkPipelineMultisampleStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineMultisampleStateCreateInfo; + }; + + struct StencilOpState + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR StencilOpState(VULKAN_HPP_NAMESPACE::StencilOp failOp_ = VULKAN_HPP_NAMESPACE::StencilOp::eKeep, VULKAN_HPP_NAMESPACE::StencilOp passOp_ = VULKAN_HPP_NAMESPACE::StencilOp::eKeep, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp_ = VULKAN_HPP_NAMESPACE::StencilOp::eKeep, VULKAN_HPP_NAMESPACE::CompareOp compareOp_ = VULKAN_HPP_NAMESPACE::CompareOp::eNever, uint32_t compareMask_ = {}, uint32_t writeMask_ = {}, uint32_t reference_ = {}) VULKAN_HPP_NOEXCEPT + : failOp( failOp_ ), passOp( passOp_ ), depthFailOp( depthFailOp_ ), compareOp( compareOp_ ), compareMask( compareMask_ ), writeMask( writeMask_ ), reference( reference_ ) + {} + + VULKAN_HPP_CONSTEXPR StencilOpState( StencilOpState const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + StencilOpState( VkStencilOpState const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + StencilOpState & operator=( VkStencilOpState const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + StencilOpState & operator=( StencilOpState const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( StencilOpState ) ); + return *this; + } + + StencilOpState & setFailOp( VULKAN_HPP_NAMESPACE::StencilOp failOp_ ) VULKAN_HPP_NOEXCEPT + { + failOp = failOp_; + return *this; + } + + StencilOpState & setPassOp( VULKAN_HPP_NAMESPACE::StencilOp passOp_ ) VULKAN_HPP_NOEXCEPT + { + passOp = passOp_; + return *this; + } + + StencilOpState & setDepthFailOp( VULKAN_HPP_NAMESPACE::StencilOp depthFailOp_ ) VULKAN_HPP_NOEXCEPT + { + depthFailOp = depthFailOp_; + return *this; + } + + StencilOpState & setCompareOp( VULKAN_HPP_NAMESPACE::CompareOp compareOp_ ) VULKAN_HPP_NOEXCEPT + { + compareOp = compareOp_; + return *this; + } + + StencilOpState & setCompareMask( uint32_t compareMask_ ) VULKAN_HPP_NOEXCEPT + { + compareMask = compareMask_; + return *this; + } + + StencilOpState & setWriteMask( uint32_t writeMask_ ) VULKAN_HPP_NOEXCEPT + { + writeMask = writeMask_; + return *this; + } + + StencilOpState & setReference( uint32_t reference_ ) VULKAN_HPP_NOEXCEPT + { + reference = reference_; + return *this; + } + + + operator VkStencilOpState const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkStencilOpState &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( StencilOpState const& ) const = default; +#else + bool operator==( StencilOpState const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( failOp == rhs.failOp ) + && ( passOp == rhs.passOp ) + && ( depthFailOp == rhs.depthFailOp ) + && ( compareOp == rhs.compareOp ) + && ( compareMask == rhs.compareMask ) + && ( writeMask == rhs.writeMask ) + && ( reference == rhs.reference ); + } + + bool operator!=( StencilOpState const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::StencilOp failOp = VULKAN_HPP_NAMESPACE::StencilOp::eKeep; + VULKAN_HPP_NAMESPACE::StencilOp passOp = VULKAN_HPP_NAMESPACE::StencilOp::eKeep; + VULKAN_HPP_NAMESPACE::StencilOp depthFailOp = VULKAN_HPP_NAMESPACE::StencilOp::eKeep; + VULKAN_HPP_NAMESPACE::CompareOp compareOp = VULKAN_HPP_NAMESPACE::CompareOp::eNever; + uint32_t compareMask = {}; + uint32_t writeMask = {}; + uint32_t reference = {}; + + }; + static_assert( sizeof( StencilOpState ) == sizeof( VkStencilOpState ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineDepthStencilStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineDepthStencilStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineDepthStencilStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable_ = {}, VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp_ = VULKAN_HPP_NAMESPACE::CompareOp::eNever, VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable_ = {}, VULKAN_HPP_NAMESPACE::StencilOpState front_ = {}, VULKAN_HPP_NAMESPACE::StencilOpState back_ = {}, float minDepthBounds_ = {}, float maxDepthBounds_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), depthTestEnable( depthTestEnable_ ), depthWriteEnable( depthWriteEnable_ ), depthCompareOp( depthCompareOp_ ), depthBoundsTestEnable( depthBoundsTestEnable_ ), stencilTestEnable( stencilTestEnable_ ), front( front_ ), back( back_ ), minDepthBounds( minDepthBounds_ ), maxDepthBounds( maxDepthBounds_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineDepthStencilStateCreateInfo( PipelineDepthStencilStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineDepthStencilStateCreateInfo( VkPipelineDepthStencilStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineDepthStencilStateCreateInfo & operator=( VkPipelineDepthStencilStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineDepthStencilStateCreateInfo & operator=( PipelineDepthStencilStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineDepthStencilStateCreateInfo ) ); + return *this; + } + + PipelineDepthStencilStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setDepthTestEnable( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthTestEnable = depthTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setDepthWriteEnable( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthWriteEnable = depthWriteEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setDepthCompareOp( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp_ ) VULKAN_HPP_NOEXCEPT + { + depthCompareOp = depthCompareOp_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setDepthBoundsTestEnable( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthBoundsTestEnable = depthBoundsTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setStencilTestEnable( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable_ ) VULKAN_HPP_NOEXCEPT + { + stencilTestEnable = stencilTestEnable_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setFront( VULKAN_HPP_NAMESPACE::StencilOpState const & front_ ) VULKAN_HPP_NOEXCEPT + { + front = front_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setBack( VULKAN_HPP_NAMESPACE::StencilOpState const & back_ ) VULKAN_HPP_NOEXCEPT + { + back = back_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setMinDepthBounds( float minDepthBounds_ ) VULKAN_HPP_NOEXCEPT + { + minDepthBounds = minDepthBounds_; + return *this; + } + + PipelineDepthStencilStateCreateInfo & setMaxDepthBounds( float maxDepthBounds_ ) VULKAN_HPP_NOEXCEPT + { + maxDepthBounds = maxDepthBounds_; + return *this; + } + + + operator VkPipelineDepthStencilStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineDepthStencilStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineDepthStencilStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineDepthStencilStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( depthTestEnable == rhs.depthTestEnable ) + && ( depthWriteEnable == rhs.depthWriteEnable ) + && ( depthCompareOp == rhs.depthCompareOp ) + && ( depthBoundsTestEnable == rhs.depthBoundsTestEnable ) + && ( stencilTestEnable == rhs.stencilTestEnable ) + && ( front == rhs.front ) + && ( back == rhs.back ) + && ( minDepthBounds == rhs.minDepthBounds ) + && ( maxDepthBounds == rhs.maxDepthBounds ); + } + + bool operator!=( PipelineDepthStencilStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineDepthStencilStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable = {}; + VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp = VULKAN_HPP_NAMESPACE::CompareOp::eNever; + VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable = {}; + VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable = {}; + VULKAN_HPP_NAMESPACE::StencilOpState front = {}; + VULKAN_HPP_NAMESPACE::StencilOpState back = {}; + float minDepthBounds = {}; + float maxDepthBounds = {}; + + }; + static_assert( sizeof( PipelineDepthStencilStateCreateInfo ) == sizeof( VkPipelineDepthStencilStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineDepthStencilStateCreateInfo; + }; + + struct PipelineColorBlendAttachmentState + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineColorBlendAttachmentState(VULKAN_HPP_NAMESPACE::Bool32 blendEnable_ = {}, VULKAN_HPP_NAMESPACE::BlendFactor srcColorBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendFactor dstColorBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendOp colorBlendOp_ = VULKAN_HPP_NAMESPACE::BlendOp::eAdd, VULKAN_HPP_NAMESPACE::BlendFactor srcAlphaBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendFactor dstAlphaBlendFactor_ = VULKAN_HPP_NAMESPACE::BlendFactor::eZero, VULKAN_HPP_NAMESPACE::BlendOp alphaBlendOp_ = VULKAN_HPP_NAMESPACE::BlendOp::eAdd, VULKAN_HPP_NAMESPACE::ColorComponentFlags colorWriteMask_ = {}) VULKAN_HPP_NOEXCEPT + : blendEnable( blendEnable_ ), srcColorBlendFactor( srcColorBlendFactor_ ), dstColorBlendFactor( dstColorBlendFactor_ ), colorBlendOp( colorBlendOp_ ), srcAlphaBlendFactor( srcAlphaBlendFactor_ ), dstAlphaBlendFactor( dstAlphaBlendFactor_ ), alphaBlendOp( alphaBlendOp_ ), colorWriteMask( colorWriteMask_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineColorBlendAttachmentState( PipelineColorBlendAttachmentState const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineColorBlendAttachmentState( VkPipelineColorBlendAttachmentState const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineColorBlendAttachmentState & operator=( VkPipelineColorBlendAttachmentState const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineColorBlendAttachmentState & operator=( PipelineColorBlendAttachmentState const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineColorBlendAttachmentState ) ); + return *this; + } + + PipelineColorBlendAttachmentState & setBlendEnable( VULKAN_HPP_NAMESPACE::Bool32 blendEnable_ ) VULKAN_HPP_NOEXCEPT + { + blendEnable = blendEnable_; + return *this; + } + + PipelineColorBlendAttachmentState & setSrcColorBlendFactor( VULKAN_HPP_NAMESPACE::BlendFactor srcColorBlendFactor_ ) VULKAN_HPP_NOEXCEPT + { + srcColorBlendFactor = srcColorBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState & setDstColorBlendFactor( VULKAN_HPP_NAMESPACE::BlendFactor dstColorBlendFactor_ ) VULKAN_HPP_NOEXCEPT + { + dstColorBlendFactor = dstColorBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState & setColorBlendOp( VULKAN_HPP_NAMESPACE::BlendOp colorBlendOp_ ) VULKAN_HPP_NOEXCEPT + { + colorBlendOp = colorBlendOp_; + return *this; + } + + PipelineColorBlendAttachmentState & setSrcAlphaBlendFactor( VULKAN_HPP_NAMESPACE::BlendFactor srcAlphaBlendFactor_ ) VULKAN_HPP_NOEXCEPT + { + srcAlphaBlendFactor = srcAlphaBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState & setDstAlphaBlendFactor( VULKAN_HPP_NAMESPACE::BlendFactor dstAlphaBlendFactor_ ) VULKAN_HPP_NOEXCEPT + { + dstAlphaBlendFactor = dstAlphaBlendFactor_; + return *this; + } + + PipelineColorBlendAttachmentState & setAlphaBlendOp( VULKAN_HPP_NAMESPACE::BlendOp alphaBlendOp_ ) VULKAN_HPP_NOEXCEPT + { + alphaBlendOp = alphaBlendOp_; + return *this; + } + + PipelineColorBlendAttachmentState & setColorWriteMask( VULKAN_HPP_NAMESPACE::ColorComponentFlags colorWriteMask_ ) VULKAN_HPP_NOEXCEPT + { + colorWriteMask = colorWriteMask_; + return *this; + } + + + operator VkPipelineColorBlendAttachmentState const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineColorBlendAttachmentState &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineColorBlendAttachmentState const& ) const = default; +#else + bool operator==( PipelineColorBlendAttachmentState const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( blendEnable == rhs.blendEnable ) + && ( srcColorBlendFactor == rhs.srcColorBlendFactor ) + && ( dstColorBlendFactor == rhs.dstColorBlendFactor ) + && ( colorBlendOp == rhs.colorBlendOp ) + && ( srcAlphaBlendFactor == rhs.srcAlphaBlendFactor ) + && ( dstAlphaBlendFactor == rhs.dstAlphaBlendFactor ) + && ( alphaBlendOp == rhs.alphaBlendOp ) + && ( colorWriteMask == rhs.colorWriteMask ); + } + + bool operator!=( PipelineColorBlendAttachmentState const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Bool32 blendEnable = {}; + VULKAN_HPP_NAMESPACE::BlendFactor srcColorBlendFactor = VULKAN_HPP_NAMESPACE::BlendFactor::eZero; + VULKAN_HPP_NAMESPACE::BlendFactor dstColorBlendFactor = VULKAN_HPP_NAMESPACE::BlendFactor::eZero; + VULKAN_HPP_NAMESPACE::BlendOp colorBlendOp = VULKAN_HPP_NAMESPACE::BlendOp::eAdd; + VULKAN_HPP_NAMESPACE::BlendFactor srcAlphaBlendFactor = VULKAN_HPP_NAMESPACE::BlendFactor::eZero; + VULKAN_HPP_NAMESPACE::BlendFactor dstAlphaBlendFactor = VULKAN_HPP_NAMESPACE::BlendFactor::eZero; + VULKAN_HPP_NAMESPACE::BlendOp alphaBlendOp = VULKAN_HPP_NAMESPACE::BlendOp::eAdd; + VULKAN_HPP_NAMESPACE::ColorComponentFlags colorWriteMask = {}; + + }; + static_assert( sizeof( PipelineColorBlendAttachmentState ) == sizeof( VkPipelineColorBlendAttachmentState ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineColorBlendStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineColorBlendStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineColorBlendStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 logicOpEnable_ = {}, VULKAN_HPP_NAMESPACE::LogicOp logicOp_ = VULKAN_HPP_NAMESPACE::LogicOp::eClear, uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineColorBlendAttachmentState* pAttachments_ = {}, std::array const& blendConstants_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), logicOpEnable( logicOpEnable_ ), logicOp( logicOp_ ), attachmentCount( attachmentCount_ ), pAttachments( pAttachments_ ), blendConstants( blendConstants_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineColorBlendStateCreateInfo( PipelineColorBlendStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineColorBlendStateCreateInfo( VkPipelineColorBlendStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineColorBlendStateCreateInfo( VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateFlags flags_, VULKAN_HPP_NAMESPACE::Bool32 logicOpEnable_, VULKAN_HPP_NAMESPACE::LogicOp logicOp_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_, std::array const& blendConstants_ = {} ) + : flags( flags_ ), logicOpEnable( logicOpEnable_ ), logicOp( logicOp_ ), attachmentCount( static_cast( attachments_.size() ) ), pAttachments( attachments_.data() ), blendConstants( blendConstants_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineColorBlendStateCreateInfo & operator=( VkPipelineColorBlendStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineColorBlendStateCreateInfo & operator=( PipelineColorBlendStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineColorBlendStateCreateInfo ) ); + return *this; + } + + PipelineColorBlendStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineColorBlendStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineColorBlendStateCreateInfo & setLogicOpEnable( VULKAN_HPP_NAMESPACE::Bool32 logicOpEnable_ ) VULKAN_HPP_NOEXCEPT + { + logicOpEnable = logicOpEnable_; + return *this; + } + + PipelineColorBlendStateCreateInfo & setLogicOp( VULKAN_HPP_NAMESPACE::LogicOp logicOp_ ) VULKAN_HPP_NOEXCEPT + { + logicOp = logicOp_; + return *this; + } + + PipelineColorBlendStateCreateInfo & setAttachmentCount( uint32_t attachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = attachmentCount_; + return *this; + } + + PipelineColorBlendStateCreateInfo & setPAttachments( const VULKAN_HPP_NAMESPACE::PipelineColorBlendAttachmentState* pAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pAttachments = pAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineColorBlendStateCreateInfo & setAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = static_cast( attachments_.size() ); + pAttachments = attachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PipelineColorBlendStateCreateInfo & setBlendConstants( std::array blendConstants_ ) VULKAN_HPP_NOEXCEPT + { + blendConstants = blendConstants_; + return *this; + } + + + operator VkPipelineColorBlendStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineColorBlendStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineColorBlendStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineColorBlendStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( logicOpEnable == rhs.logicOpEnable ) + && ( logicOp == rhs.logicOp ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( blendConstants == rhs.blendConstants ); + } + + bool operator!=( PipelineColorBlendStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineColorBlendStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Bool32 logicOpEnable = {}; + VULKAN_HPP_NAMESPACE::LogicOp logicOp = VULKAN_HPP_NAMESPACE::LogicOp::eClear; + uint32_t attachmentCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineColorBlendAttachmentState* pAttachments = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D blendConstants = {}; + + }; + static_assert( sizeof( PipelineColorBlendStateCreateInfo ) == sizeof( VkPipelineColorBlendStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineColorBlendStateCreateInfo; + }; + + struct PipelineDynamicStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineDynamicStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineDynamicStateCreateInfo(VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateFlags flags_ = {}, uint32_t dynamicStateCount_ = {}, const VULKAN_HPP_NAMESPACE::DynamicState* pDynamicStates_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), dynamicStateCount( dynamicStateCount_ ), pDynamicStates( pDynamicStates_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineDynamicStateCreateInfo( PipelineDynamicStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineDynamicStateCreateInfo( VkPipelineDynamicStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineDynamicStateCreateInfo( VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicStates_ ) + : flags( flags_ ), dynamicStateCount( static_cast( dynamicStates_.size() ) ), pDynamicStates( dynamicStates_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineDynamicStateCreateInfo & operator=( VkPipelineDynamicStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineDynamicStateCreateInfo & operator=( PipelineDynamicStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineDynamicStateCreateInfo ) ); + return *this; + } + + PipelineDynamicStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineDynamicStateCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineDynamicStateCreateInfo & setDynamicStateCount( uint32_t dynamicStateCount_ ) VULKAN_HPP_NOEXCEPT + { + dynamicStateCount = dynamicStateCount_; + return *this; + } + + PipelineDynamicStateCreateInfo & setPDynamicStates( const VULKAN_HPP_NAMESPACE::DynamicState* pDynamicStates_ ) VULKAN_HPP_NOEXCEPT + { + pDynamicStates = pDynamicStates_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineDynamicStateCreateInfo & setDynamicStates( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dynamicStates_ ) VULKAN_HPP_NOEXCEPT + { + dynamicStateCount = static_cast( dynamicStates_.size() ); + pDynamicStates = dynamicStates_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineDynamicStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineDynamicStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineDynamicStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineDynamicStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( dynamicStateCount == rhs.dynamicStateCount ) + && ( pDynamicStates == rhs.pDynamicStates ); + } + + bool operator!=( PipelineDynamicStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineDynamicStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateFlags flags = {}; + uint32_t dynamicStateCount = {}; + const VULKAN_HPP_NAMESPACE::DynamicState* pDynamicStates = {}; + + }; + static_assert( sizeof( PipelineDynamicStateCreateInfo ) == sizeof( VkPipelineDynamicStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineDynamicStateCreateInfo; + }; + + struct GraphicsPipelineCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGraphicsPipelineCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 GraphicsPipelineCreateInfo(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateInfo* pViewportState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateInfo* pRasterizationState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateInfo* pMultisampleState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateInfo* pDepthStencilState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateInfo* pColorBlendState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, uint32_t subpass_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stageCount( stageCount_ ), pStages( pStages_ ), pVertexInputState( pVertexInputState_ ), pInputAssemblyState( pInputAssemblyState_ ), pTessellationState( pTessellationState_ ), pViewportState( pViewportState_ ), pRasterizationState( pRasterizationState_ ), pMultisampleState( pMultisampleState_ ), pDepthStencilState( pDepthStencilState_ ), pColorBlendState( pColorBlendState_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), renderPass( renderPass_ ), subpass( subpass_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 GraphicsPipelineCreateInfo( GraphicsPipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GraphicsPipelineCreateInfo( VkGraphicsPipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsPipelineCreateInfo( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_, const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateInfo* pViewportState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateInfo* pRasterizationState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateInfo* pMultisampleState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateInfo* pDepthStencilState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateInfo* pColorBlendState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::RenderPass renderPass_ = {}, uint32_t subpass_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {} ) + : flags( flags_ ), stageCount( static_cast( stages_.size() ) ), pStages( stages_.data() ), pVertexInputState( pVertexInputState_ ), pInputAssemblyState( pInputAssemblyState_ ), pTessellationState( pTessellationState_ ), pViewportState( pViewportState_ ), pRasterizationState( pRasterizationState_ ), pMultisampleState( pMultisampleState_ ), pDepthStencilState( pDepthStencilState_ ), pColorBlendState( pColorBlendState_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), renderPass( renderPass_ ), subpass( subpass_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GraphicsPipelineCreateInfo & operator=( VkGraphicsPipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GraphicsPipelineCreateInfo & operator=( GraphicsPipelineCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GraphicsPipelineCreateInfo ) ); + return *this; + } + + GraphicsPipelineCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GraphicsPipelineCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + GraphicsPipelineCreateInfo & setStageCount( uint32_t stageCount_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = stageCount_; + return *this; + } + + GraphicsPipelineCreateInfo & setPStages( const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ ) VULKAN_HPP_NOEXCEPT + { + pStages = pStages_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsPipelineCreateInfo & setStages( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = static_cast( stages_.size() ); + pStages = stages_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + GraphicsPipelineCreateInfo & setPVertexInputState( const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ ) VULKAN_HPP_NOEXCEPT + { + pVertexInputState = pVertexInputState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPInputAssemblyState( const VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateInfo* pInputAssemblyState_ ) VULKAN_HPP_NOEXCEPT + { + pInputAssemblyState = pInputAssemblyState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPTessellationState( const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ ) VULKAN_HPP_NOEXCEPT + { + pTessellationState = pTessellationState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPViewportState( const VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateInfo* pViewportState_ ) VULKAN_HPP_NOEXCEPT + { + pViewportState = pViewportState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPRasterizationState( const VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateInfo* pRasterizationState_ ) VULKAN_HPP_NOEXCEPT + { + pRasterizationState = pRasterizationState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPMultisampleState( const VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateInfo* pMultisampleState_ ) VULKAN_HPP_NOEXCEPT + { + pMultisampleState = pMultisampleState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPDepthStencilState( const VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateInfo* pDepthStencilState_ ) VULKAN_HPP_NOEXCEPT + { + pDepthStencilState = pDepthStencilState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPColorBlendState( const VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateInfo* pColorBlendState_ ) VULKAN_HPP_NOEXCEPT + { + pColorBlendState = pColorBlendState_; + return *this; + } + + GraphicsPipelineCreateInfo & setPDynamicState( const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ ) VULKAN_HPP_NOEXCEPT + { + pDynamicState = pDynamicState_; + return *this; + } + + GraphicsPipelineCreateInfo & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + GraphicsPipelineCreateInfo & setRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass_ ) VULKAN_HPP_NOEXCEPT + { + renderPass = renderPass_; + return *this; + } + + GraphicsPipelineCreateInfo & setSubpass( uint32_t subpass_ ) VULKAN_HPP_NOEXCEPT + { + subpass = subpass_; + return *this; + } + + GraphicsPipelineCreateInfo & setBasePipelineHandle( VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + GraphicsPipelineCreateInfo & setBasePipelineIndex( int32_t basePipelineIndex_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + + operator VkGraphicsPipelineCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGraphicsPipelineCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GraphicsPipelineCreateInfo const& ) const = default; +#else + bool operator==( GraphicsPipelineCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( pVertexInputState == rhs.pVertexInputState ) + && ( pInputAssemblyState == rhs.pInputAssemblyState ) + && ( pTessellationState == rhs.pTessellationState ) + && ( pViewportState == rhs.pViewportState ) + && ( pRasterizationState == rhs.pRasterizationState ) + && ( pMultisampleState == rhs.pMultisampleState ) + && ( pDepthStencilState == rhs.pDepthStencilState ) + && ( pColorBlendState == rhs.pColorBlendState ) + && ( pDynamicState == rhs.pDynamicState ) + && ( layout == rhs.layout ) + && ( renderPass == rhs.renderPass ) + && ( subpass == rhs.subpass ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( GraphicsPipelineCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGraphicsPipelineCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags = {}; + uint32_t stageCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages = {}; + const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState = {}; + const VULKAN_HPP_NAMESPACE::PipelineInputAssemblyStateCreateInfo* pInputAssemblyState = {}; + const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState = {}; + const VULKAN_HPP_NAMESPACE::PipelineViewportStateCreateInfo* pViewportState = {}; + const VULKAN_HPP_NAMESPACE::PipelineRasterizationStateCreateInfo* pRasterizationState = {}; + const VULKAN_HPP_NAMESPACE::PipelineMultisampleStateCreateInfo* pMultisampleState = {}; + const VULKAN_HPP_NAMESPACE::PipelineDepthStencilStateCreateInfo* pDepthStencilState = {}; + const VULKAN_HPP_NAMESPACE::PipelineColorBlendStateCreateInfo* pColorBlendState = {}; + const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState = {}; + VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; + VULKAN_HPP_NAMESPACE::RenderPass renderPass = {}; + uint32_t subpass = {}; + VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle = {}; + int32_t basePipelineIndex = {}; + + }; + static_assert( sizeof( GraphicsPipelineCreateInfo ) == sizeof( VkGraphicsPipelineCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GraphicsPipelineCreateInfo; + }; + + struct ImageCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageCreateInfo(VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ImageType imageType_ = VULKAN_HPP_NAMESPACE::ImageType::e1D, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::Extent3D extent_ = {}, uint32_t mipLevels_ = {}, uint32_t arrayLayers_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::ImageTiling tiling_ = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = {}, const uint32_t* pQueueFamilyIndices_ = {}, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), imageType( imageType_ ), format( format_ ), extent( extent_ ), mipLevels( mipLevels_ ), arrayLayers( arrayLayers_ ), samples( samples_ ), tiling( tiling_ ), usage( usage_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( queueFamilyIndexCount_ ), pQueueFamilyIndices( pQueueFamilyIndices_ ), initialLayout( initialLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageCreateInfo( ImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageCreateInfo( VkImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageCreateInfo( VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_, VULKAN_HPP_NAMESPACE::ImageType imageType_, VULKAN_HPP_NAMESPACE::Format format_, VULKAN_HPP_NAMESPACE::Extent3D extent_, uint32_t mipLevels_, uint32_t arrayLayers_, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_, VULKAN_HPP_NAMESPACE::ImageTiling tiling_, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_, VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined ) + : flags( flags_ ), imageType( imageType_ ), format( format_ ), extent( extent_ ), mipLevels( mipLevels_ ), arrayLayers( arrayLayers_ ), samples( samples_ ), tiling( tiling_ ), usage( usage_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( static_cast( queueFamilyIndices_.size() ) ), pQueueFamilyIndices( queueFamilyIndices_.data() ), initialLayout( initialLayout_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageCreateInfo & operator=( VkImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageCreateInfo & operator=( ImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageCreateInfo ) ); + return *this; + } + + ImageCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImageCreateInfo & setImageType( VULKAN_HPP_NAMESPACE::ImageType imageType_ ) VULKAN_HPP_NOEXCEPT + { + imageType = imageType_; + return *this; + } + + ImageCreateInfo & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + ImageCreateInfo & setExtent( VULKAN_HPP_NAMESPACE::Extent3D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + ImageCreateInfo & setMipLevels( uint32_t mipLevels_ ) VULKAN_HPP_NOEXCEPT + { + mipLevels = mipLevels_; + return *this; + } + + ImageCreateInfo & setArrayLayers( uint32_t arrayLayers_ ) VULKAN_HPP_NOEXCEPT + { + arrayLayers = arrayLayers_; + return *this; + } + + ImageCreateInfo & setSamples( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ ) VULKAN_HPP_NOEXCEPT + { + samples = samples_; + return *this; + } + + ImageCreateInfo & setTiling( VULKAN_HPP_NAMESPACE::ImageTiling tiling_ ) VULKAN_HPP_NOEXCEPT + { + tiling = tiling_; + return *this; + } + + ImageCreateInfo & setUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + ImageCreateInfo & setSharingMode( VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ ) VULKAN_HPP_NOEXCEPT + { + sharingMode = sharingMode_; + return *this; + } + + ImageCreateInfo & setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + ImageCreateInfo & setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageCreateInfo & setQueueFamilyIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = static_cast( queueFamilyIndices_.size() ); + pQueueFamilyIndices = queueFamilyIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + ImageCreateInfo & setInitialLayout( VULKAN_HPP_NAMESPACE::ImageLayout initialLayout_ ) VULKAN_HPP_NOEXCEPT + { + initialLayout = initialLayout_; + return *this; + } + + + operator VkImageCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageCreateInfo const& ) const = default; +#else + bool operator==( ImageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( imageType == rhs.imageType ) + && ( format == rhs.format ) + && ( extent == rhs.extent ) + && ( mipLevels == rhs.mipLevels ) + && ( arrayLayers == rhs.arrayLayers ) + && ( samples == rhs.samples ) + && ( tiling == rhs.tiling ) + && ( usage == rhs.usage ) + && ( sharingMode == rhs.sharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ) + && ( initialLayout == rhs.initialLayout ); + } + + bool operator!=( ImageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::ImageType imageType = VULKAN_HPP_NAMESPACE::ImageType::e1D; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::Extent3D extent = {}; + uint32_t mipLevels = {}; + uint32_t arrayLayers = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::ImageTiling tiling = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal; + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage = {}; + VULKAN_HPP_NAMESPACE::SharingMode sharingMode = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive; + uint32_t queueFamilyIndexCount = {}; + const uint32_t* pQueueFamilyIndices = {}; + VULKAN_HPP_NAMESPACE::ImageLayout initialLayout = VULKAN_HPP_NAMESPACE::ImageLayout::eUndefined; + + }; + static_assert( sizeof( ImageCreateInfo ) == sizeof( VkImageCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageCreateInfo; + }; + + struct ImageViewCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageViewCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageViewCreateInfo(VULKAN_HPP_NAMESPACE::ImageViewCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::ImageViewType viewType_ = VULKAN_HPP_NAMESPACE::ImageViewType::e1D, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ComponentMapping components_ = {}, VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), image( image_ ), viewType( viewType_ ), format( format_ ), components( components_ ), subresourceRange( subresourceRange_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageViewCreateInfo( ImageViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageViewCreateInfo( VkImageViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageViewCreateInfo & operator=( VkImageViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageViewCreateInfo & operator=( ImageViewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageViewCreateInfo ) ); + return *this; + } + + ImageViewCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageViewCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::ImageViewCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImageViewCreateInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + ImageViewCreateInfo & setViewType( VULKAN_HPP_NAMESPACE::ImageViewType viewType_ ) VULKAN_HPP_NOEXCEPT + { + viewType = viewType_; + return *this; + } + + ImageViewCreateInfo & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + ImageViewCreateInfo & setComponents( VULKAN_HPP_NAMESPACE::ComponentMapping const & components_ ) VULKAN_HPP_NOEXCEPT + { + components = components_; + return *this; + } + + ImageViewCreateInfo & setSubresourceRange( VULKAN_HPP_NAMESPACE::ImageSubresourceRange const & subresourceRange_ ) VULKAN_HPP_NOEXCEPT + { + subresourceRange = subresourceRange_; + return *this; + } + + + operator VkImageViewCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageViewCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageViewCreateInfo const& ) const = default; +#else + bool operator==( ImageViewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( image == rhs.image ) + && ( viewType == rhs.viewType ) + && ( format == rhs.format ) + && ( components == rhs.components ) + && ( subresourceRange == rhs.subresourceRange ); + } + + bool operator!=( ImageViewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageViewCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageViewCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + VULKAN_HPP_NAMESPACE::ImageViewType viewType = VULKAN_HPP_NAMESPACE::ImageViewType::e1D; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::ComponentMapping components = {}; + VULKAN_HPP_NAMESPACE::ImageSubresourceRange subresourceRange = {}; + + }; + static_assert( sizeof( ImageViewCreateInfo ) == sizeof( VkImageViewCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageViewCreateInfo; + }; + + struct IndirectCommandsLayoutTokenNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eIndirectCommandsLayoutTokenNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutTokenNV(VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV tokenType_ = VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV::eShaderGroup, uint32_t stream_ = {}, uint32_t offset_ = {}, uint32_t vertexBindingUnit_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexDynamicStride_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout pushconstantPipelineLayout_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags pushconstantShaderStageFlags_ = {}, uint32_t pushconstantOffset_ = {}, uint32_t pushconstantSize_ = {}, VULKAN_HPP_NAMESPACE::IndirectStateFlagsNV indirectStateFlags_ = {}, uint32_t indexTypeCount_ = {}, const VULKAN_HPP_NAMESPACE::IndexType* pIndexTypes_ = {}, const uint32_t* pIndexTypeValues_ = {}) VULKAN_HPP_NOEXCEPT + : tokenType( tokenType_ ), stream( stream_ ), offset( offset_ ), vertexBindingUnit( vertexBindingUnit_ ), vertexDynamicStride( vertexDynamicStride_ ), pushconstantPipelineLayout( pushconstantPipelineLayout_ ), pushconstantShaderStageFlags( pushconstantShaderStageFlags_ ), pushconstantOffset( pushconstantOffset_ ), pushconstantSize( pushconstantSize_ ), indirectStateFlags( indirectStateFlags_ ), indexTypeCount( indexTypeCount_ ), pIndexTypes( pIndexTypes_ ), pIndexTypeValues( pIndexTypeValues_ ) + {} + + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutTokenNV( IndirectCommandsLayoutTokenNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + IndirectCommandsLayoutTokenNV( VkIndirectCommandsLayoutTokenNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutTokenNV( VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV tokenType_, uint32_t stream_, uint32_t offset_, uint32_t vertexBindingUnit_, VULKAN_HPP_NAMESPACE::Bool32 vertexDynamicStride_, VULKAN_HPP_NAMESPACE::PipelineLayout pushconstantPipelineLayout_, VULKAN_HPP_NAMESPACE::ShaderStageFlags pushconstantShaderStageFlags_, uint32_t pushconstantOffset_, uint32_t pushconstantSize_, VULKAN_HPP_NAMESPACE::IndirectStateFlagsNV indirectStateFlags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & indexTypes_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & indexTypeValues_ = {} ) + : tokenType( tokenType_ ), stream( stream_ ), offset( offset_ ), vertexBindingUnit( vertexBindingUnit_ ), vertexDynamicStride( vertexDynamicStride_ ), pushconstantPipelineLayout( pushconstantPipelineLayout_ ), pushconstantShaderStageFlags( pushconstantShaderStageFlags_ ), pushconstantOffset( pushconstantOffset_ ), pushconstantSize( pushconstantSize_ ), indirectStateFlags( indirectStateFlags_ ), indexTypeCount( static_cast( indexTypes_.size() ) ), pIndexTypes( indexTypes_.data() ), pIndexTypeValues( indexTypeValues_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( indexTypes_.size() == indexTypeValues_.size() ); +#else + if ( indexTypes_.size() != indexTypeValues_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::IndirectCommandsLayoutTokenNV::IndirectCommandsLayoutTokenNV: indexTypes_.size() != indexTypeValues_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + IndirectCommandsLayoutTokenNV & operator=( VkIndirectCommandsLayoutTokenNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + IndirectCommandsLayoutTokenNV & operator=( IndirectCommandsLayoutTokenNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( IndirectCommandsLayoutTokenNV ) ); + return *this; + } + + IndirectCommandsLayoutTokenNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setTokenType( VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV tokenType_ ) VULKAN_HPP_NOEXCEPT + { + tokenType = tokenType_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setStream( uint32_t stream_ ) VULKAN_HPP_NOEXCEPT + { + stream = stream_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setVertexBindingUnit( uint32_t vertexBindingUnit_ ) VULKAN_HPP_NOEXCEPT + { + vertexBindingUnit = vertexBindingUnit_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setVertexDynamicStride( VULKAN_HPP_NAMESPACE::Bool32 vertexDynamicStride_ ) VULKAN_HPP_NOEXCEPT + { + vertexDynamicStride = vertexDynamicStride_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setPushconstantPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pushconstantPipelineLayout_ ) VULKAN_HPP_NOEXCEPT + { + pushconstantPipelineLayout = pushconstantPipelineLayout_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setPushconstantShaderStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags pushconstantShaderStageFlags_ ) VULKAN_HPP_NOEXCEPT + { + pushconstantShaderStageFlags = pushconstantShaderStageFlags_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setPushconstantOffset( uint32_t pushconstantOffset_ ) VULKAN_HPP_NOEXCEPT + { + pushconstantOffset = pushconstantOffset_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setPushconstantSize( uint32_t pushconstantSize_ ) VULKAN_HPP_NOEXCEPT + { + pushconstantSize = pushconstantSize_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setIndirectStateFlags( VULKAN_HPP_NAMESPACE::IndirectStateFlagsNV indirectStateFlags_ ) VULKAN_HPP_NOEXCEPT + { + indirectStateFlags = indirectStateFlags_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setIndexTypeCount( uint32_t indexTypeCount_ ) VULKAN_HPP_NOEXCEPT + { + indexTypeCount = indexTypeCount_; + return *this; + } + + IndirectCommandsLayoutTokenNV & setPIndexTypes( const VULKAN_HPP_NAMESPACE::IndexType* pIndexTypes_ ) VULKAN_HPP_NOEXCEPT + { + pIndexTypes = pIndexTypes_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutTokenNV & setIndexTypes( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & indexTypes_ ) VULKAN_HPP_NOEXCEPT + { + indexTypeCount = static_cast( indexTypes_.size() ); + pIndexTypes = indexTypes_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + IndirectCommandsLayoutTokenNV & setPIndexTypeValues( const uint32_t* pIndexTypeValues_ ) VULKAN_HPP_NOEXCEPT + { + pIndexTypeValues = pIndexTypeValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutTokenNV & setIndexTypeValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & indexTypeValues_ ) VULKAN_HPP_NOEXCEPT + { + indexTypeCount = static_cast( indexTypeValues_.size() ); + pIndexTypeValues = indexTypeValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkIndirectCommandsLayoutTokenNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkIndirectCommandsLayoutTokenNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( IndirectCommandsLayoutTokenNV const& ) const = default; +#else + bool operator==( IndirectCommandsLayoutTokenNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( tokenType == rhs.tokenType ) + && ( stream == rhs.stream ) + && ( offset == rhs.offset ) + && ( vertexBindingUnit == rhs.vertexBindingUnit ) + && ( vertexDynamicStride == rhs.vertexDynamicStride ) + && ( pushconstantPipelineLayout == rhs.pushconstantPipelineLayout ) + && ( pushconstantShaderStageFlags == rhs.pushconstantShaderStageFlags ) + && ( pushconstantOffset == rhs.pushconstantOffset ) + && ( pushconstantSize == rhs.pushconstantSize ) + && ( indirectStateFlags == rhs.indirectStateFlags ) + && ( indexTypeCount == rhs.indexTypeCount ) + && ( pIndexTypes == rhs.pIndexTypes ) + && ( pIndexTypeValues == rhs.pIndexTypeValues ); + } + + bool operator!=( IndirectCommandsLayoutTokenNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eIndirectCommandsLayoutTokenNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV tokenType = VULKAN_HPP_NAMESPACE::IndirectCommandsTokenTypeNV::eShaderGroup; + uint32_t stream = {}; + uint32_t offset = {}; + uint32_t vertexBindingUnit = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexDynamicStride = {}; + VULKAN_HPP_NAMESPACE::PipelineLayout pushconstantPipelineLayout = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags pushconstantShaderStageFlags = {}; + uint32_t pushconstantOffset = {}; + uint32_t pushconstantSize = {}; + VULKAN_HPP_NAMESPACE::IndirectStateFlagsNV indirectStateFlags = {}; + uint32_t indexTypeCount = {}; + const VULKAN_HPP_NAMESPACE::IndexType* pIndexTypes = {}; + const uint32_t* pIndexTypeValues = {}; + + }; + static_assert( sizeof( IndirectCommandsLayoutTokenNV ) == sizeof( VkIndirectCommandsLayoutTokenNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = IndirectCommandsLayoutTokenNV; + }; + + struct IndirectCommandsLayoutCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eIndirectCommandsLayoutCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutCreateInfoNV(VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutUsageFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, uint32_t tokenCount_ = {}, const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutTokenNV* pTokens_ = {}, uint32_t streamCount_ = {}, const uint32_t* pStreamStrides_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), tokenCount( tokenCount_ ), pTokens( pTokens_ ), streamCount( streamCount_ ), pStreamStrides( pStreamStrides_ ) + {} + + VULKAN_HPP_CONSTEXPR IndirectCommandsLayoutCreateInfoNV( IndirectCommandsLayoutCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + IndirectCommandsLayoutCreateInfoNV( VkIndirectCommandsLayoutCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutCreateInfoNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutUsageFlagsNV flags_, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tokens_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & streamStrides_ = {} ) + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), tokenCount( static_cast( tokens_.size() ) ), pTokens( tokens_.data() ), streamCount( static_cast( streamStrides_.size() ) ), pStreamStrides( streamStrides_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + IndirectCommandsLayoutCreateInfoNV & operator=( VkIndirectCommandsLayoutCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & operator=( IndirectCommandsLayoutCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( IndirectCommandsLayoutCreateInfoNV ) ); + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutUsageFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setTokenCount( uint32_t tokenCount_ ) VULKAN_HPP_NOEXCEPT + { + tokenCount = tokenCount_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setPTokens( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutTokenNV* pTokens_ ) VULKAN_HPP_NOEXCEPT + { + pTokens = pTokens_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutCreateInfoNV & setTokens( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & tokens_ ) VULKAN_HPP_NOEXCEPT + { + tokenCount = static_cast( tokens_.size() ); + pTokens = tokens_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + IndirectCommandsLayoutCreateInfoNV & setStreamCount( uint32_t streamCount_ ) VULKAN_HPP_NOEXCEPT + { + streamCount = streamCount_; + return *this; + } + + IndirectCommandsLayoutCreateInfoNV & setPStreamStrides( const uint32_t* pStreamStrides_ ) VULKAN_HPP_NOEXCEPT + { + pStreamStrides = pStreamStrides_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + IndirectCommandsLayoutCreateInfoNV & setStreamStrides( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & streamStrides_ ) VULKAN_HPP_NOEXCEPT + { + streamCount = static_cast( streamStrides_.size() ); + pStreamStrides = streamStrides_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkIndirectCommandsLayoutCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkIndirectCommandsLayoutCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( IndirectCommandsLayoutCreateInfoNV const& ) const = default; +#else + bool operator==( IndirectCommandsLayoutCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( tokenCount == rhs.tokenCount ) + && ( pTokens == rhs.pTokens ) + && ( streamCount == rhs.streamCount ) + && ( pStreamStrides == rhs.pStreamStrides ); + } + + bool operator!=( IndirectCommandsLayoutCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eIndirectCommandsLayoutCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutUsageFlagsNV flags = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + uint32_t tokenCount = {}; + const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutTokenNV* pTokens = {}; + uint32_t streamCount = {}; + const uint32_t* pStreamStrides = {}; + + }; + static_assert( sizeof( IndirectCommandsLayoutCreateInfoNV ) == sizeof( VkIndirectCommandsLayoutCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = IndirectCommandsLayoutCreateInfoNV; + }; + + struct PipelineCacheCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCacheCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCacheCreateInfo(VULKAN_HPP_NAMESPACE::PipelineCacheCreateFlags flags_ = {}, size_t initialDataSize_ = {}, const void* pInitialData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), initialDataSize( initialDataSize_ ), pInitialData( pInitialData_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCacheCreateInfo( PipelineCacheCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCacheCreateInfo( VkPipelineCacheCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + PipelineCacheCreateInfo( VULKAN_HPP_NAMESPACE::PipelineCacheCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & initialData_ ) + : flags( flags_ ), initialDataSize( initialData_.size() * sizeof(T) ), pInitialData( initialData_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCacheCreateInfo & operator=( VkPipelineCacheCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCacheCreateInfo & operator=( PipelineCacheCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCacheCreateInfo ) ); + return *this; + } + + PipelineCacheCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCacheCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineCacheCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineCacheCreateInfo & setInitialDataSize( size_t initialDataSize_ ) VULKAN_HPP_NOEXCEPT + { + initialDataSize = initialDataSize_; + return *this; + } + + PipelineCacheCreateInfo & setPInitialData( const void* pInitialData_ ) VULKAN_HPP_NOEXCEPT + { + pInitialData = pInitialData_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + PipelineCacheCreateInfo & setInitialData( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & initialData_ ) VULKAN_HPP_NOEXCEPT + { + initialDataSize = initialData_.size() * sizeof(T); + pInitialData = initialData_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineCacheCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCacheCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCacheCreateInfo const& ) const = default; +#else + bool operator==( PipelineCacheCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( initialDataSize == rhs.initialDataSize ) + && ( pInitialData == rhs.pInitialData ); + } + + bool operator!=( PipelineCacheCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCacheCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCacheCreateFlags flags = {}; + size_t initialDataSize = {}; + const void* pInitialData = {}; + + }; + static_assert( sizeof( PipelineCacheCreateInfo ) == sizeof( VkPipelineCacheCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCacheCreateInfo; + }; + + struct PushConstantRange + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PushConstantRange(VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ = {}, uint32_t offset_ = {}, uint32_t size_ = {}) VULKAN_HPP_NOEXCEPT + : stageFlags( stageFlags_ ), offset( offset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR PushConstantRange( PushConstantRange const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PushConstantRange( VkPushConstantRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PushConstantRange & operator=( VkPushConstantRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PushConstantRange & operator=( PushConstantRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PushConstantRange ) ); + return *this; + } + + PushConstantRange & setStageFlags( VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags_ ) VULKAN_HPP_NOEXCEPT + { + stageFlags = stageFlags_; + return *this; + } + + PushConstantRange & setOffset( uint32_t offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + PushConstantRange & setSize( uint32_t size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkPushConstantRange const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPushConstantRange &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PushConstantRange const& ) const = default; +#else + bool operator==( PushConstantRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( stageFlags == rhs.stageFlags ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( PushConstantRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags = {}; + uint32_t offset = {}; + uint32_t size = {}; + + }; + static_assert( sizeof( PushConstantRange ) == sizeof( VkPushConstantRange ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineLayoutCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineLayoutCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineLayoutCreateInfo(VULKAN_HPP_NAMESPACE::PipelineLayoutCreateFlags flags_ = {}, uint32_t setLayoutCount_ = {}, const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts_ = {}, uint32_t pushConstantRangeCount_ = {}, const VULKAN_HPP_NAMESPACE::PushConstantRange* pPushConstantRanges_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), setLayoutCount( setLayoutCount_ ), pSetLayouts( pSetLayouts_ ), pushConstantRangeCount( pushConstantRangeCount_ ), pPushConstantRanges( pPushConstantRanges_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineLayoutCreateInfo( PipelineLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineLayoutCreateInfo( VkPipelineLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineLayoutCreateInfo( VULKAN_HPP_NAMESPACE::PipelineLayoutCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & setLayouts_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pushConstantRanges_ = {} ) + : flags( flags_ ), setLayoutCount( static_cast( setLayouts_.size() ) ), pSetLayouts( setLayouts_.data() ), pushConstantRangeCount( static_cast( pushConstantRanges_.size() ) ), pPushConstantRanges( pushConstantRanges_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineLayoutCreateInfo & operator=( VkPipelineLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineLayoutCreateInfo & operator=( PipelineLayoutCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineLayoutCreateInfo ) ); + return *this; + } + + PipelineLayoutCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineLayoutCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::PipelineLayoutCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineLayoutCreateInfo & setSetLayoutCount( uint32_t setLayoutCount_ ) VULKAN_HPP_NOEXCEPT + { + setLayoutCount = setLayoutCount_; + return *this; + } + + PipelineLayoutCreateInfo & setPSetLayouts( const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts_ ) VULKAN_HPP_NOEXCEPT + { + pSetLayouts = pSetLayouts_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineLayoutCreateInfo & setSetLayouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & setLayouts_ ) VULKAN_HPP_NOEXCEPT + { + setLayoutCount = static_cast( setLayouts_.size() ); + pSetLayouts = setLayouts_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PipelineLayoutCreateInfo & setPushConstantRangeCount( uint32_t pushConstantRangeCount_ ) VULKAN_HPP_NOEXCEPT + { + pushConstantRangeCount = pushConstantRangeCount_; + return *this; + } + + PipelineLayoutCreateInfo & setPPushConstantRanges( const VULKAN_HPP_NAMESPACE::PushConstantRange* pPushConstantRanges_ ) VULKAN_HPP_NOEXCEPT + { + pPushConstantRanges = pPushConstantRanges_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineLayoutCreateInfo & setPushConstantRanges( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pushConstantRanges_ ) VULKAN_HPP_NOEXCEPT + { + pushConstantRangeCount = static_cast( pushConstantRanges_.size() ); + pPushConstantRanges = pushConstantRanges_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineLayoutCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineLayoutCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineLayoutCreateInfo const& ) const = default; +#else + bool operator==( PipelineLayoutCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( setLayoutCount == rhs.setLayoutCount ) + && ( pSetLayouts == rhs.pSetLayouts ) + && ( pushConstantRangeCount == rhs.pushConstantRangeCount ) + && ( pPushConstantRanges == rhs.pPushConstantRanges ); + } + + bool operator!=( PipelineLayoutCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineLayoutCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineLayoutCreateFlags flags = {}; + uint32_t setLayoutCount = {}; + const VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayouts = {}; + uint32_t pushConstantRangeCount = {}; + const VULKAN_HPP_NAMESPACE::PushConstantRange* pPushConstantRanges = {}; + + }; + static_assert( sizeof( PipelineLayoutCreateInfo ) == sizeof( VkPipelineLayoutCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineLayoutCreateInfo; + }; + + struct PrivateDataSlotCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePrivateDataSlotCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PrivateDataSlotCreateInfoEXT(VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagsEXT flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR PrivateDataSlotCreateInfoEXT( PrivateDataSlotCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PrivateDataSlotCreateInfoEXT( VkPrivateDataSlotCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PrivateDataSlotCreateInfoEXT & operator=( VkPrivateDataSlotCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PrivateDataSlotCreateInfoEXT & operator=( PrivateDataSlotCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PrivateDataSlotCreateInfoEXT ) ); + return *this; + } + + PrivateDataSlotCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PrivateDataSlotCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkPrivateDataSlotCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPrivateDataSlotCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PrivateDataSlotCreateInfoEXT const& ) const = default; +#else + bool operator==( PrivateDataSlotCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( PrivateDataSlotCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePrivateDataSlotCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateFlagsEXT flags = {}; + + }; + static_assert( sizeof( PrivateDataSlotCreateInfoEXT ) == sizeof( VkPrivateDataSlotCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PrivateDataSlotCreateInfoEXT; + }; + + class PrivateDataSlotEXT + { + public: + using CType = VkPrivateDataSlotEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePrivateDataSlotEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR PrivateDataSlotEXT() VULKAN_HPP_NOEXCEPT + : m_privateDataSlotEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PrivateDataSlotEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_privateDataSlotEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PrivateDataSlotEXT( VkPrivateDataSlotEXT privateDataSlotEXT ) VULKAN_HPP_NOEXCEPT + : m_privateDataSlotEXT( privateDataSlotEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PrivateDataSlotEXT & operator=(VkPrivateDataSlotEXT privateDataSlotEXT) VULKAN_HPP_NOEXCEPT + { + m_privateDataSlotEXT = privateDataSlotEXT; + return *this; + } +#endif + + PrivateDataSlotEXT & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_privateDataSlotEXT = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PrivateDataSlotEXT const& ) const = default; +#else + bool operator==( PrivateDataSlotEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT == rhs.m_privateDataSlotEXT; + } + + bool operator!=(PrivateDataSlotEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT != rhs.m_privateDataSlotEXT; + } + + bool operator<(PrivateDataSlotEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT < rhs.m_privateDataSlotEXT; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPrivateDataSlotEXT() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_privateDataSlotEXT == VK_NULL_HANDLE; + } + + private: + VkPrivateDataSlotEXT m_privateDataSlotEXT; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT ) == sizeof( VkPrivateDataSlotEXT ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT; + }; + + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct QueryPoolCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueryPoolCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueryPoolCreateInfo(VULKAN_HPP_NAMESPACE::QueryPoolCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::QueryType queryType_ = VULKAN_HPP_NAMESPACE::QueryType::eOcclusion, uint32_t queryCount_ = {}, VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), queryType( queryType_ ), queryCount( queryCount_ ), pipelineStatistics( pipelineStatistics_ ) + {} + + VULKAN_HPP_CONSTEXPR QueryPoolCreateInfo( QueryPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueryPoolCreateInfo( VkQueryPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueryPoolCreateInfo & operator=( VkQueryPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueryPoolCreateInfo & operator=( QueryPoolCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueryPoolCreateInfo ) ); + return *this; + } + + QueryPoolCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + QueryPoolCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::QueryPoolCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + QueryPoolCreateInfo & setQueryType( VULKAN_HPP_NAMESPACE::QueryType queryType_ ) VULKAN_HPP_NOEXCEPT + { + queryType = queryType_; + return *this; + } + + QueryPoolCreateInfo & setQueryCount( uint32_t queryCount_ ) VULKAN_HPP_NOEXCEPT + { + queryCount = queryCount_; + return *this; + } + + QueryPoolCreateInfo & setPipelineStatistics( VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics_ ) VULKAN_HPP_NOEXCEPT + { + pipelineStatistics = pipelineStatistics_; + return *this; + } + + + operator VkQueryPoolCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueryPoolCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueryPoolCreateInfo const& ) const = default; +#else + bool operator==( QueryPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queryType == rhs.queryType ) + && ( queryCount == rhs.queryCount ) + && ( pipelineStatistics == rhs.pipelineStatistics ); + } + + bool operator!=( QueryPoolCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueryPoolCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::QueryPoolCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::QueryType queryType = VULKAN_HPP_NAMESPACE::QueryType::eOcclusion; + uint32_t queryCount = {}; + VULKAN_HPP_NAMESPACE::QueryPipelineStatisticFlags pipelineStatistics = {}; + + }; + static_assert( sizeof( QueryPoolCreateInfo ) == sizeof( VkQueryPoolCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = QueryPoolCreateInfo; + }; + + struct RayTracingShaderGroupCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingShaderGroupCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RayTracingShaderGroupCreateInfoKHR(VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type_ = VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR::eGeneral, uint32_t generalShader_ = {}, uint32_t closestHitShader_ = {}, uint32_t anyHitShader_ = {}, uint32_t intersectionShader_ = {}, const void* pShaderGroupCaptureReplayHandle_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), generalShader( generalShader_ ), closestHitShader( closestHitShader_ ), anyHitShader( anyHitShader_ ), intersectionShader( intersectionShader_ ), pShaderGroupCaptureReplayHandle( pShaderGroupCaptureReplayHandle_ ) + {} + + VULKAN_HPP_CONSTEXPR RayTracingShaderGroupCreateInfoKHR( RayTracingShaderGroupCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RayTracingShaderGroupCreateInfoKHR( VkRayTracingShaderGroupCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RayTracingShaderGroupCreateInfoKHR & operator=( VkRayTracingShaderGroupCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & operator=( RayTracingShaderGroupCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RayTracingShaderGroupCreateInfoKHR ) ); + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setType( VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setGeneralShader( uint32_t generalShader_ ) VULKAN_HPP_NOEXCEPT + { + generalShader = generalShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setClosestHitShader( uint32_t closestHitShader_ ) VULKAN_HPP_NOEXCEPT + { + closestHitShader = closestHitShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setAnyHitShader( uint32_t anyHitShader_ ) VULKAN_HPP_NOEXCEPT + { + anyHitShader = anyHitShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setIntersectionShader( uint32_t intersectionShader_ ) VULKAN_HPP_NOEXCEPT + { + intersectionShader = intersectionShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoKHR & setPShaderGroupCaptureReplayHandle( const void* pShaderGroupCaptureReplayHandle_ ) VULKAN_HPP_NOEXCEPT + { + pShaderGroupCaptureReplayHandle = pShaderGroupCaptureReplayHandle_; + return *this; + } + + + operator VkRayTracingShaderGroupCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRayTracingShaderGroupCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RayTracingShaderGroupCreateInfoKHR const& ) const = default; +#else + bool operator==( RayTracingShaderGroupCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( generalShader == rhs.generalShader ) + && ( closestHitShader == rhs.closestHitShader ) + && ( anyHitShader == rhs.anyHitShader ) + && ( intersectionShader == rhs.intersectionShader ) + && ( pShaderGroupCaptureReplayHandle == rhs.pShaderGroupCaptureReplayHandle ); + } + + bool operator!=( RayTracingShaderGroupCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingShaderGroupCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type = VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR::eGeneral; + uint32_t generalShader = {}; + uint32_t closestHitShader = {}; + uint32_t anyHitShader = {}; + uint32_t intersectionShader = {}; + const void* pShaderGroupCaptureReplayHandle = {}; + + }; + static_assert( sizeof( RayTracingShaderGroupCreateInfoKHR ) == sizeof( VkRayTracingShaderGroupCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RayTracingShaderGroupCreateInfoKHR; + }; + + struct PipelineLibraryCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineLibraryCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineLibraryCreateInfoKHR(uint32_t libraryCount_ = {}, const VULKAN_HPP_NAMESPACE::Pipeline* pLibraries_ = {}) VULKAN_HPP_NOEXCEPT + : libraryCount( libraryCount_ ), pLibraries( pLibraries_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineLibraryCreateInfoKHR( PipelineLibraryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineLibraryCreateInfoKHR( VkPipelineLibraryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineLibraryCreateInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & libraries_ ) + : libraryCount( static_cast( libraries_.size() ) ), pLibraries( libraries_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineLibraryCreateInfoKHR & operator=( VkPipelineLibraryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineLibraryCreateInfoKHR & operator=( PipelineLibraryCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineLibraryCreateInfoKHR ) ); + return *this; + } + + PipelineLibraryCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineLibraryCreateInfoKHR & setLibraryCount( uint32_t libraryCount_ ) VULKAN_HPP_NOEXCEPT + { + libraryCount = libraryCount_; + return *this; + } + + PipelineLibraryCreateInfoKHR & setPLibraries( const VULKAN_HPP_NAMESPACE::Pipeline* pLibraries_ ) VULKAN_HPP_NOEXCEPT + { + pLibraries = pLibraries_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineLibraryCreateInfoKHR & setLibraries( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & libraries_ ) VULKAN_HPP_NOEXCEPT + { + libraryCount = static_cast( libraries_.size() ); + pLibraries = libraries_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineLibraryCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineLibraryCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineLibraryCreateInfoKHR const& ) const = default; +#else + bool operator==( PipelineLibraryCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( libraryCount == rhs.libraryCount ) + && ( pLibraries == rhs.pLibraries ); + } + + bool operator!=( PipelineLibraryCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineLibraryCreateInfoKHR; + const void* pNext = {}; + uint32_t libraryCount = {}; + const VULKAN_HPP_NAMESPACE::Pipeline* pLibraries = {}; + + }; + static_assert( sizeof( PipelineLibraryCreateInfoKHR ) == sizeof( VkPipelineLibraryCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineLibraryCreateInfoKHR; + }; + + struct RayTracingPipelineInterfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingPipelineInterfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR(uint32_t maxPipelineRayPayloadSize_ = {}, uint32_t maxPipelineRayHitAttributeSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxPipelineRayPayloadSize( maxPipelineRayPayloadSize_ ), maxPipelineRayHitAttributeSize( maxPipelineRayHitAttributeSize_ ) + {} + + VULKAN_HPP_CONSTEXPR RayTracingPipelineInterfaceCreateInfoKHR( RayTracingPipelineInterfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RayTracingPipelineInterfaceCreateInfoKHR( VkRayTracingPipelineInterfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RayTracingPipelineInterfaceCreateInfoKHR & operator=( VkRayTracingPipelineInterfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RayTracingPipelineInterfaceCreateInfoKHR & operator=( RayTracingPipelineInterfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RayTracingPipelineInterfaceCreateInfoKHR ) ); + return *this; + } + + RayTracingPipelineInterfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RayTracingPipelineInterfaceCreateInfoKHR & setMaxPipelineRayPayloadSize( uint32_t maxPipelineRayPayloadSize_ ) VULKAN_HPP_NOEXCEPT + { + maxPipelineRayPayloadSize = maxPipelineRayPayloadSize_; + return *this; + } + + RayTracingPipelineInterfaceCreateInfoKHR & setMaxPipelineRayHitAttributeSize( uint32_t maxPipelineRayHitAttributeSize_ ) VULKAN_HPP_NOEXCEPT + { + maxPipelineRayHitAttributeSize = maxPipelineRayHitAttributeSize_; + return *this; + } + + + operator VkRayTracingPipelineInterfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRayTracingPipelineInterfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RayTracingPipelineInterfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( RayTracingPipelineInterfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxPipelineRayPayloadSize == rhs.maxPipelineRayPayloadSize ) + && ( maxPipelineRayHitAttributeSize == rhs.maxPipelineRayHitAttributeSize ); + } + + bool operator!=( RayTracingPipelineInterfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingPipelineInterfaceCreateInfoKHR; + const void* pNext = {}; + uint32_t maxPipelineRayPayloadSize = {}; + uint32_t maxPipelineRayHitAttributeSize = {}; + + }; + static_assert( sizeof( RayTracingPipelineInterfaceCreateInfoKHR ) == sizeof( VkRayTracingPipelineInterfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RayTracingPipelineInterfaceCreateInfoKHR; + }; + + struct RayTracingPipelineCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingPipelineCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoKHR(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, uint32_t groupCount_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups_ = {}, uint32_t maxPipelineRayRecursionDepth_ = {}, const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stageCount( stageCount_ ), pStages( pStages_ ), groupCount( groupCount_ ), pGroups( pGroups_ ), maxPipelineRayRecursionDepth( maxPipelineRayRecursionDepth_ ), pLibraryInfo( pLibraryInfo_ ), pLibraryInterface( pLibraryInterface_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoKHR( RayTracingPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RayTracingPipelineCreateInfoKHR( VkRayTracingPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoKHR( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_ = {}, uint32_t maxPipelineRayRecursionDepth_ = {}, const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ = {}, const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {} ) + : flags( flags_ ), stageCount( static_cast( stages_.size() ) ), pStages( stages_.data() ), groupCount( static_cast( groups_.size() ) ), pGroups( groups_.data() ), maxPipelineRayRecursionDepth( maxPipelineRayRecursionDepth_ ), pLibraryInfo( pLibraryInfo_ ), pLibraryInterface( pLibraryInterface_ ), pDynamicState( pDynamicState_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RayTracingPipelineCreateInfoKHR & operator=( VkRayTracingPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RayTracingPipelineCreateInfoKHR & operator=( RayTracingPipelineCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RayTracingPipelineCreateInfoKHR ) ); + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setStageCount( uint32_t stageCount_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = stageCount_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPStages( const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ ) VULKAN_HPP_NOEXCEPT + { + pStages = pStages_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoKHR & setStages( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = static_cast( stages_.size() ); + pStages = stages_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RayTracingPipelineCreateInfoKHR & setGroupCount( uint32_t groupCount_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = groupCount_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPGroups( const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups_ ) VULKAN_HPP_NOEXCEPT + { + pGroups = pGroups_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoKHR & setGroups( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = static_cast( groups_.size() ); + pGroups = groups_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RayTracingPipelineCreateInfoKHR & setMaxPipelineRayRecursionDepth( uint32_t maxPipelineRayRecursionDepth_ ) VULKAN_HPP_NOEXCEPT + { + maxPipelineRayRecursionDepth = maxPipelineRayRecursionDepth_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPLibraryInfo( const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo_ ) VULKAN_HPP_NOEXCEPT + { + pLibraryInfo = pLibraryInfo_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPLibraryInterface( const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface_ ) VULKAN_HPP_NOEXCEPT + { + pLibraryInterface = pLibraryInterface_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setPDynamicState( const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState_ ) VULKAN_HPP_NOEXCEPT + { + pDynamicState = pDynamicState_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setBasePipelineHandle( VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + RayTracingPipelineCreateInfoKHR & setBasePipelineIndex( int32_t basePipelineIndex_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + + operator VkRayTracingPipelineCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRayTracingPipelineCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RayTracingPipelineCreateInfoKHR const& ) const = default; +#else + bool operator==( RayTracingPipelineCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( groupCount == rhs.groupCount ) + && ( pGroups == rhs.pGroups ) + && ( maxPipelineRayRecursionDepth == rhs.maxPipelineRayRecursionDepth ) + && ( pLibraryInfo == rhs.pLibraryInfo ) + && ( pLibraryInterface == rhs.pLibraryInterface ) + && ( pDynamicState == rhs.pDynamicState ) + && ( layout == rhs.layout ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( RayTracingPipelineCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingPipelineCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags = {}; + uint32_t stageCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages = {}; + uint32_t groupCount = {}; + const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoKHR* pGroups = {}; + uint32_t maxPipelineRayRecursionDepth = {}; + const VULKAN_HPP_NAMESPACE::PipelineLibraryCreateInfoKHR* pLibraryInfo = {}; + const VULKAN_HPP_NAMESPACE::RayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface = {}; + const VULKAN_HPP_NAMESPACE::PipelineDynamicStateCreateInfo* pDynamicState = {}; + VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; + VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle = {}; + int32_t basePipelineIndex = {}; + + }; + static_assert( sizeof( RayTracingPipelineCreateInfoKHR ) == sizeof( VkRayTracingPipelineCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RayTracingPipelineCreateInfoKHR; + }; + + struct RayTracingShaderGroupCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingShaderGroupCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RayTracingShaderGroupCreateInfoNV(VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type_ = VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR::eGeneral, uint32_t generalShader_ = {}, uint32_t closestHitShader_ = {}, uint32_t anyHitShader_ = {}, uint32_t intersectionShader_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), generalShader( generalShader_ ), closestHitShader( closestHitShader_ ), anyHitShader( anyHitShader_ ), intersectionShader( intersectionShader_ ) + {} + + VULKAN_HPP_CONSTEXPR RayTracingShaderGroupCreateInfoNV( RayTracingShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RayTracingShaderGroupCreateInfoNV( VkRayTracingShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RayTracingShaderGroupCreateInfoNV & operator=( VkRayTracingShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RayTracingShaderGroupCreateInfoNV & operator=( RayTracingShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RayTracingShaderGroupCreateInfoNV ) ); + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setType( VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setGeneralShader( uint32_t generalShader_ ) VULKAN_HPP_NOEXCEPT + { + generalShader = generalShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setClosestHitShader( uint32_t closestHitShader_ ) VULKAN_HPP_NOEXCEPT + { + closestHitShader = closestHitShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setAnyHitShader( uint32_t anyHitShader_ ) VULKAN_HPP_NOEXCEPT + { + anyHitShader = anyHitShader_; + return *this; + } + + RayTracingShaderGroupCreateInfoNV & setIntersectionShader( uint32_t intersectionShader_ ) VULKAN_HPP_NOEXCEPT + { + intersectionShader = intersectionShader_; + return *this; + } + + + operator VkRayTracingShaderGroupCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRayTracingShaderGroupCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RayTracingShaderGroupCreateInfoNV const& ) const = default; +#else + bool operator==( RayTracingShaderGroupCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( type == rhs.type ) + && ( generalShader == rhs.generalShader ) + && ( closestHitShader == rhs.closestHitShader ) + && ( anyHitShader == rhs.anyHitShader ) + && ( intersectionShader == rhs.intersectionShader ); + } + + bool operator!=( RayTracingShaderGroupCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingShaderGroupCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR type = VULKAN_HPP_NAMESPACE::RayTracingShaderGroupTypeKHR::eGeneral; + uint32_t generalShader = {}; + uint32_t closestHitShader = {}; + uint32_t anyHitShader = {}; + uint32_t intersectionShader = {}; + + }; + static_assert( sizeof( RayTracingShaderGroupCreateInfoNV ) == sizeof( VkRayTracingShaderGroupCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RayTracingShaderGroupCreateInfoNV; + }; + + struct RayTracingPipelineCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRayTracingPipelineCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoNV(VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ = {}, uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, uint32_t groupCount_ = {}, const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoNV* pGroups_ = {}, uint32_t maxRecursionDepth_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), stageCount( stageCount_ ), pStages( pStages_ ), groupCount( groupCount_ ), pGroups( pGroups_ ), maxRecursionDepth( maxRecursionDepth_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR RayTracingPipelineCreateInfoNV( RayTracingPipelineCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RayTracingPipelineCreateInfoNV( VkRayTracingPipelineCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoNV( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_ = {}, uint32_t maxRecursionDepth_ = {}, VULKAN_HPP_NAMESPACE::PipelineLayout layout_ = {}, VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ = {}, int32_t basePipelineIndex_ = {} ) + : flags( flags_ ), stageCount( static_cast( stages_.size() ) ), pStages( stages_.data() ), groupCount( static_cast( groups_.size() ) ), pGroups( groups_.data() ), maxRecursionDepth( maxRecursionDepth_ ), layout( layout_ ), basePipelineHandle( basePipelineHandle_ ), basePipelineIndex( basePipelineIndex_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RayTracingPipelineCreateInfoNV & operator=( VkRayTracingPipelineCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RayTracingPipelineCreateInfoNV & operator=( RayTracingPipelineCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RayTracingPipelineCreateInfoNV ) ); + return *this; + } + + RayTracingPipelineCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setStageCount( uint32_t stageCount_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = stageCount_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setPStages( const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ ) VULKAN_HPP_NOEXCEPT + { + pStages = pStages_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoNV & setStages( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = static_cast( stages_.size() ); + pStages = stages_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RayTracingPipelineCreateInfoNV & setGroupCount( uint32_t groupCount_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = groupCount_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setPGroups( const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoNV* pGroups_ ) VULKAN_HPP_NOEXCEPT + { + pGroups = pGroups_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RayTracingPipelineCreateInfoNV & setGroups( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = static_cast( groups_.size() ); + pGroups = groups_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RayTracingPipelineCreateInfoNV & setMaxRecursionDepth( uint32_t maxRecursionDepth_ ) VULKAN_HPP_NOEXCEPT + { + maxRecursionDepth = maxRecursionDepth_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setLayout( VULKAN_HPP_NAMESPACE::PipelineLayout layout_ ) VULKAN_HPP_NOEXCEPT + { + layout = layout_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setBasePipelineHandle( VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineHandle = basePipelineHandle_; + return *this; + } + + RayTracingPipelineCreateInfoNV & setBasePipelineIndex( int32_t basePipelineIndex_ ) VULKAN_HPP_NOEXCEPT + { + basePipelineIndex = basePipelineIndex_; + return *this; + } + + + operator VkRayTracingPipelineCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRayTracingPipelineCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RayTracingPipelineCreateInfoNV const& ) const = default; +#else + bool operator==( RayTracingPipelineCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( groupCount == rhs.groupCount ) + && ( pGroups == rhs.pGroups ) + && ( maxRecursionDepth == rhs.maxRecursionDepth ) + && ( layout == rhs.layout ) + && ( basePipelineHandle == rhs.basePipelineHandle ) + && ( basePipelineIndex == rhs.basePipelineIndex ); + } + + bool operator!=( RayTracingPipelineCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRayTracingPipelineCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreateFlags flags = {}; + uint32_t stageCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages = {}; + uint32_t groupCount = {}; + const VULKAN_HPP_NAMESPACE::RayTracingShaderGroupCreateInfoNV* pGroups = {}; + uint32_t maxRecursionDepth = {}; + VULKAN_HPP_NAMESPACE::PipelineLayout layout = {}; + VULKAN_HPP_NAMESPACE::Pipeline basePipelineHandle = {}; + int32_t basePipelineIndex = {}; + + }; + static_assert( sizeof( RayTracingPipelineCreateInfoNV ) == sizeof( VkRayTracingPipelineCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RayTracingPipelineCreateInfoNV; + }; + + struct SubpassDescription + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassDescription(VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_ = {}, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, uint32_t inputAttachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference* pInputAttachments_ = {}, uint32_t colorAttachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference* pColorAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference* pResolveAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference* pDepthStencilAttachment_ = {}, uint32_t preserveAttachmentCount_ = {}, const uint32_t* pPreserveAttachments_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), inputAttachmentCount( inputAttachmentCount_ ), pInputAttachments( pInputAttachments_ ), colorAttachmentCount( colorAttachmentCount_ ), pColorAttachments( pColorAttachments_ ), pResolveAttachments( pResolveAttachments_ ), pDepthStencilAttachment( pDepthStencilAttachment_ ), preserveAttachmentCount( preserveAttachmentCount_ ), pPreserveAttachments( pPreserveAttachments_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassDescription( SubpassDescription const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassDescription( VkSubpassDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription( VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & inputAttachments_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachments_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & resolveAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference* pDepthStencilAttachment_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & preserveAttachments_ = {} ) + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), inputAttachmentCount( static_cast( inputAttachments_.size() ) ), pInputAttachments( inputAttachments_.data() ), colorAttachmentCount( static_cast( colorAttachments_.size() ) ), pColorAttachments( colorAttachments_.data() ), pResolveAttachments( resolveAttachments_.data() ), pDepthStencilAttachment( pDepthStencilAttachment_ ), preserveAttachmentCount( static_cast( preserveAttachments_.size() ) ), pPreserveAttachments( preserveAttachments_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( resolveAttachments_.empty() || ( colorAttachments_.size() == resolveAttachments_.size() ) ); +#else + if ( !resolveAttachments_.empty() && ( colorAttachments_.size() != resolveAttachments_.size() ) ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::SubpassDescription::SubpassDescription: !resolveAttachments_.empty() && ( colorAttachments_.size() != resolveAttachments_.size() )" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassDescription & operator=( VkSubpassDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassDescription & operator=( SubpassDescription const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassDescription ) ); + return *this; + } + + SubpassDescription & setFlags( VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + SubpassDescription & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + SubpassDescription & setInputAttachmentCount( uint32_t inputAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + inputAttachmentCount = inputAttachmentCount_; + return *this; + } + + SubpassDescription & setPInputAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference* pInputAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pInputAttachments = pInputAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription & setInputAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & inputAttachments_ ) VULKAN_HPP_NOEXCEPT + { + inputAttachmentCount = static_cast( inputAttachments_.size() ); + pInputAttachments = inputAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = colorAttachmentCount_; + return *this; + } + + SubpassDescription & setPColorAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference* pColorAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pColorAttachments = pColorAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription & setColorAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachments_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = static_cast( colorAttachments_.size() ); + pColorAttachments = colorAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription & setPResolveAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference* pResolveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pResolveAttachments = pResolveAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription & setResolveAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & resolveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = static_cast( resolveAttachments_.size() ); + pResolveAttachments = resolveAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription & setPDepthStencilAttachment( const VULKAN_HPP_NAMESPACE::AttachmentReference* pDepthStencilAttachment_ ) VULKAN_HPP_NOEXCEPT + { + pDepthStencilAttachment = pDepthStencilAttachment_; + return *this; + } + + SubpassDescription & setPreserveAttachmentCount( uint32_t preserveAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + preserveAttachmentCount = preserveAttachmentCount_; + return *this; + } + + SubpassDescription & setPPreserveAttachments( const uint32_t* pPreserveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pPreserveAttachments = pPreserveAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription & setPreserveAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & preserveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + preserveAttachmentCount = static_cast( preserveAttachments_.size() ); + pPreserveAttachments = preserveAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSubpassDescription const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassDescription &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassDescription const& ) const = default; +#else + bool operator==( SubpassDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( flags == rhs.flags ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( inputAttachmentCount == rhs.inputAttachmentCount ) + && ( pInputAttachments == rhs.pInputAttachments ) + && ( colorAttachmentCount == rhs.colorAttachmentCount ) + && ( pColorAttachments == rhs.pColorAttachments ) + && ( pResolveAttachments == rhs.pResolveAttachments ) + && ( pDepthStencilAttachment == rhs.pDepthStencilAttachment ) + && ( preserveAttachmentCount == rhs.preserveAttachmentCount ) + && ( pPreserveAttachments == rhs.pPreserveAttachments ); + } + + bool operator!=( SubpassDescription const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + uint32_t inputAttachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference* pInputAttachments = {}; + uint32_t colorAttachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference* pColorAttachments = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference* pResolveAttachments = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference* pDepthStencilAttachment = {}; + uint32_t preserveAttachmentCount = {}; + const uint32_t* pPreserveAttachments = {}; + + }; + static_assert( sizeof( SubpassDescription ) == sizeof( VkSubpassDescription ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SubpassDependency + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassDependency(uint32_t srcSubpass_ = {}, uint32_t dstSubpass_ = {}, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask_ = {}, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubpass( srcSubpass_ ), dstSubpass( dstSubpass_ ), srcStageMask( srcStageMask_ ), dstStageMask( dstStageMask_ ), srcAccessMask( srcAccessMask_ ), dstAccessMask( dstAccessMask_ ), dependencyFlags( dependencyFlags_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassDependency( SubpassDependency const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassDependency( VkSubpassDependency const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassDependency & operator=( VkSubpassDependency const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassDependency & operator=( SubpassDependency const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassDependency ) ); + return *this; + } + + SubpassDependency & setSrcSubpass( uint32_t srcSubpass_ ) VULKAN_HPP_NOEXCEPT + { + srcSubpass = srcSubpass_; + return *this; + } + + SubpassDependency & setDstSubpass( uint32_t dstSubpass_ ) VULKAN_HPP_NOEXCEPT + { + dstSubpass = dstSubpass_; + return *this; + } + + SubpassDependency & setSrcStageMask( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask_ ) VULKAN_HPP_NOEXCEPT + { + srcStageMask = srcStageMask_; + return *this; + } + + SubpassDependency & setDstStageMask( VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask_ ) VULKAN_HPP_NOEXCEPT + { + dstStageMask = dstStageMask_; + return *this; + } + + SubpassDependency & setSrcAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + srcAccessMask = srcAccessMask_; + return *this; + } + + SubpassDependency & setDstAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + dstAccessMask = dstAccessMask_; + return *this; + } + + SubpassDependency & setDependencyFlags( VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ ) VULKAN_HPP_NOEXCEPT + { + dependencyFlags = dependencyFlags_; + return *this; + } + + + operator VkSubpassDependency const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassDependency &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassDependency const& ) const = default; +#else + bool operator==( SubpassDependency const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( srcSubpass == rhs.srcSubpass ) + && ( dstSubpass == rhs.dstSubpass ) + && ( srcStageMask == rhs.srcStageMask ) + && ( dstStageMask == rhs.dstStageMask ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( dependencyFlags == rhs.dependencyFlags ); + } + + bool operator!=( SubpassDependency const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t srcSubpass = {}; + uint32_t dstSubpass = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask = {}; + VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags = {}; + + }; + static_assert( sizeof( SubpassDependency ) == sizeof( VkSubpassDependency ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct RenderPassCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassCreateInfo(VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_ = {}, uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentDescription* pAttachments_ = {}, uint32_t subpassCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassDescription* pSubpasses_ = {}, uint32_t dependencyCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassDependency* pDependencies_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), attachmentCount( attachmentCount_ ), pAttachments( pAttachments_ ), subpassCount( subpassCount_ ), pSubpasses( pSubpasses_ ), dependencyCount( dependencyCount_ ), pDependencies( pDependencies_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassCreateInfo( RenderPassCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassCreateInfo( VkRenderPassCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo( VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & subpasses_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dependencies_ = {} ) + : flags( flags_ ), attachmentCount( static_cast( attachments_.size() ) ), pAttachments( attachments_.data() ), subpassCount( static_cast( subpasses_.size() ) ), pSubpasses( subpasses_.data() ), dependencyCount( static_cast( dependencies_.size() ) ), pDependencies( dependencies_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassCreateInfo & operator=( VkRenderPassCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassCreateInfo & operator=( RenderPassCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassCreateInfo ) ); + return *this; + } + + RenderPassCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + RenderPassCreateInfo & setAttachmentCount( uint32_t attachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = attachmentCount_; + return *this; + } + + RenderPassCreateInfo & setPAttachments( const VULKAN_HPP_NAMESPACE::AttachmentDescription* pAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pAttachments = pAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo & setAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = static_cast( attachments_.size() ); + pAttachments = attachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassCreateInfo & setSubpassCount( uint32_t subpassCount_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassCreateInfo & setPSubpasses( const VULKAN_HPP_NAMESPACE::SubpassDescription* pSubpasses_ ) VULKAN_HPP_NOEXCEPT + { + pSubpasses = pSubpasses_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo & setSubpasses( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & subpasses_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = static_cast( subpasses_.size() ); + pSubpasses = subpasses_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassCreateInfo & setDependencyCount( uint32_t dependencyCount_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassCreateInfo & setPDependencies( const VULKAN_HPP_NAMESPACE::SubpassDependency* pDependencies_ ) VULKAN_HPP_NOEXCEPT + { + pDependencies = pDependencies_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo & setDependencies( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dependencies_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = static_cast( dependencies_.size() ); + pDependencies = dependencies_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassCreateInfo const& ) const = default; +#else + bool operator==( RenderPassCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( subpassCount == rhs.subpassCount ) + && ( pSubpasses == rhs.pSubpasses ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pDependencies == rhs.pDependencies ); + } + + bool operator!=( RenderPassCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags = {}; + uint32_t attachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentDescription* pAttachments = {}; + uint32_t subpassCount = {}; + const VULKAN_HPP_NAMESPACE::SubpassDescription* pSubpasses = {}; + uint32_t dependencyCount = {}; + const VULKAN_HPP_NAMESPACE::SubpassDependency* pDependencies = {}; + + }; + static_assert( sizeof( RenderPassCreateInfo ) == sizeof( VkRenderPassCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassCreateInfo; + }; + + struct SubpassDescription2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassDescription2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassDescription2(VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_ = {}, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, uint32_t viewMask_ = {}, uint32_t inputAttachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pInputAttachments_ = {}, uint32_t colorAttachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pColorAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pResolveAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilAttachment_ = {}, uint32_t preserveAttachmentCount_ = {}, const uint32_t* pPreserveAttachments_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), viewMask( viewMask_ ), inputAttachmentCount( inputAttachmentCount_ ), pInputAttachments( pInputAttachments_ ), colorAttachmentCount( colorAttachmentCount_ ), pColorAttachments( pColorAttachments_ ), pResolveAttachments( pResolveAttachments_ ), pDepthStencilAttachment( pDepthStencilAttachment_ ), preserveAttachmentCount( preserveAttachmentCount_ ), pPreserveAttachments( pPreserveAttachments_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassDescription2( SubpassDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassDescription2( VkSubpassDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription2( VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_, VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_, uint32_t viewMask_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & inputAttachments_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachments_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & resolveAttachments_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilAttachment_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & preserveAttachments_ = {} ) + : flags( flags_ ), pipelineBindPoint( pipelineBindPoint_ ), viewMask( viewMask_ ), inputAttachmentCount( static_cast( inputAttachments_.size() ) ), pInputAttachments( inputAttachments_.data() ), colorAttachmentCount( static_cast( colorAttachments_.size() ) ), pColorAttachments( colorAttachments_.data() ), pResolveAttachments( resolveAttachments_.data() ), pDepthStencilAttachment( pDepthStencilAttachment_ ), preserveAttachmentCount( static_cast( preserveAttachments_.size() ) ), pPreserveAttachments( preserveAttachments_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( resolveAttachments_.empty() || ( colorAttachments_.size() == resolveAttachments_.size() ) ); +#else + if ( !resolveAttachments_.empty() && ( colorAttachments_.size() != resolveAttachments_.size() ) ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::SubpassDescription2::SubpassDescription2: !resolveAttachments_.empty() && ( colorAttachments_.size() != resolveAttachments_.size() )" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassDescription2 & operator=( VkSubpassDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassDescription2 & operator=( SubpassDescription2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassDescription2 ) ); + return *this; + } + + SubpassDescription2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SubpassDescription2 & setFlags( VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + SubpassDescription2 & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + SubpassDescription2 & setViewMask( uint32_t viewMask_ ) VULKAN_HPP_NOEXCEPT + { + viewMask = viewMask_; + return *this; + } + + SubpassDescription2 & setInputAttachmentCount( uint32_t inputAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + inputAttachmentCount = inputAttachmentCount_; + return *this; + } + + SubpassDescription2 & setPInputAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pInputAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pInputAttachments = pInputAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription2 & setInputAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & inputAttachments_ ) VULKAN_HPP_NOEXCEPT + { + inputAttachmentCount = static_cast( inputAttachments_.size() ); + pInputAttachments = inputAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription2 & setColorAttachmentCount( uint32_t colorAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = colorAttachmentCount_; + return *this; + } + + SubpassDescription2 & setPColorAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pColorAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pColorAttachments = pColorAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription2 & setColorAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & colorAttachments_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = static_cast( colorAttachments_.size() ); + pColorAttachments = colorAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription2 & setPResolveAttachments( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pResolveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pResolveAttachments = pResolveAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription2 & setResolveAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & resolveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + colorAttachmentCount = static_cast( resolveAttachments_.size() ); + pResolveAttachments = resolveAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubpassDescription2 & setPDepthStencilAttachment( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilAttachment_ ) VULKAN_HPP_NOEXCEPT + { + pDepthStencilAttachment = pDepthStencilAttachment_; + return *this; + } + + SubpassDescription2 & setPreserveAttachmentCount( uint32_t preserveAttachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + preserveAttachmentCount = preserveAttachmentCount_; + return *this; + } + + SubpassDescription2 & setPPreserveAttachments( const uint32_t* pPreserveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pPreserveAttachments = pPreserveAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubpassDescription2 & setPreserveAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & preserveAttachments_ ) VULKAN_HPP_NOEXCEPT + { + preserveAttachmentCount = static_cast( preserveAttachments_.size() ); + pPreserveAttachments = preserveAttachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSubpassDescription2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassDescription2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassDescription2 const& ) const = default; +#else + bool operator==( SubpassDescription2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( viewMask == rhs.viewMask ) + && ( inputAttachmentCount == rhs.inputAttachmentCount ) + && ( pInputAttachments == rhs.pInputAttachments ) + && ( colorAttachmentCount == rhs.colorAttachmentCount ) + && ( pColorAttachments == rhs.pColorAttachments ) + && ( pResolveAttachments == rhs.pResolveAttachments ) + && ( pDepthStencilAttachment == rhs.pDepthStencilAttachment ) + && ( preserveAttachmentCount == rhs.preserveAttachmentCount ) + && ( pPreserveAttachments == rhs.pPreserveAttachments ); + } + + bool operator!=( SubpassDescription2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubpassDescription2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SubpassDescriptionFlags flags = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + uint32_t viewMask = {}; + uint32_t inputAttachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pInputAttachments = {}; + uint32_t colorAttachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pColorAttachments = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pResolveAttachments = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilAttachment = {}; + uint32_t preserveAttachmentCount = {}; + const uint32_t* pPreserveAttachments = {}; + + }; + static_assert( sizeof( SubpassDescription2 ) == sizeof( VkSubpassDescription2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubpassDescription2; + }; + using SubpassDescription2KHR = SubpassDescription2; + + struct SubpassDependency2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassDependency2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassDependency2(uint32_t srcSubpass_ = {}, uint32_t dstSubpass_ = {}, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask_ = {}, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ = {}, VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ = {}, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ = {}, int32_t viewOffset_ = {}) VULKAN_HPP_NOEXCEPT + : srcSubpass( srcSubpass_ ), dstSubpass( dstSubpass_ ), srcStageMask( srcStageMask_ ), dstStageMask( dstStageMask_ ), srcAccessMask( srcAccessMask_ ), dstAccessMask( dstAccessMask_ ), dependencyFlags( dependencyFlags_ ), viewOffset( viewOffset_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassDependency2( SubpassDependency2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassDependency2( VkSubpassDependency2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassDependency2 & operator=( VkSubpassDependency2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassDependency2 & operator=( SubpassDependency2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassDependency2 ) ); + return *this; + } + + SubpassDependency2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SubpassDependency2 & setSrcSubpass( uint32_t srcSubpass_ ) VULKAN_HPP_NOEXCEPT + { + srcSubpass = srcSubpass_; + return *this; + } + + SubpassDependency2 & setDstSubpass( uint32_t dstSubpass_ ) VULKAN_HPP_NOEXCEPT + { + dstSubpass = dstSubpass_; + return *this; + } + + SubpassDependency2 & setSrcStageMask( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask_ ) VULKAN_HPP_NOEXCEPT + { + srcStageMask = srcStageMask_; + return *this; + } + + SubpassDependency2 & setDstStageMask( VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask_ ) VULKAN_HPP_NOEXCEPT + { + dstStageMask = dstStageMask_; + return *this; + } + + SubpassDependency2 & setSrcAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + srcAccessMask = srcAccessMask_; + return *this; + } + + SubpassDependency2 & setDstAccessMask( VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask_ ) VULKAN_HPP_NOEXCEPT + { + dstAccessMask = dstAccessMask_; + return *this; + } + + SubpassDependency2 & setDependencyFlags( VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags_ ) VULKAN_HPP_NOEXCEPT + { + dependencyFlags = dependencyFlags_; + return *this; + } + + SubpassDependency2 & setViewOffset( int32_t viewOffset_ ) VULKAN_HPP_NOEXCEPT + { + viewOffset = viewOffset_; + return *this; + } + + + operator VkSubpassDependency2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassDependency2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassDependency2 const& ) const = default; +#else + bool operator==( SubpassDependency2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcSubpass == rhs.srcSubpass ) + && ( dstSubpass == rhs.dstSubpass ) + && ( srcStageMask == rhs.srcStageMask ) + && ( dstStageMask == rhs.dstStageMask ) + && ( srcAccessMask == rhs.srcAccessMask ) + && ( dstAccessMask == rhs.dstAccessMask ) + && ( dependencyFlags == rhs.dependencyFlags ) + && ( viewOffset == rhs.viewOffset ); + } + + bool operator!=( SubpassDependency2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubpassDependency2; + const void* pNext = {}; + uint32_t srcSubpass = {}; + uint32_t dstSubpass = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags srcAccessMask = {}; + VULKAN_HPP_NAMESPACE::AccessFlags dstAccessMask = {}; + VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags = {}; + int32_t viewOffset = {}; + + }; + static_assert( sizeof( SubpassDependency2 ) == sizeof( VkSubpassDependency2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubpassDependency2; + }; + using SubpassDependency2KHR = SubpassDependency2; + + struct RenderPassCreateInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassCreateInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassCreateInfo2(VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_ = {}, uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentDescription2* pAttachments_ = {}, uint32_t subpassCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassDescription2* pSubpasses_ = {}, uint32_t dependencyCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassDependency2* pDependencies_ = {}, uint32_t correlatedViewMaskCount_ = {}, const uint32_t* pCorrelatedViewMasks_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), attachmentCount( attachmentCount_ ), pAttachments( pAttachments_ ), subpassCount( subpassCount_ ), pSubpasses( pSubpasses_ ), dependencyCount( dependencyCount_ ), pDependencies( pDependencies_ ), correlatedViewMaskCount( correlatedViewMaskCount_ ), pCorrelatedViewMasks( pCorrelatedViewMasks_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassCreateInfo2( RenderPassCreateInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassCreateInfo2( VkRenderPassCreateInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo2( VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & subpasses_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dependencies_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & correlatedViewMasks_ = {} ) + : flags( flags_ ), attachmentCount( static_cast( attachments_.size() ) ), pAttachments( attachments_.data() ), subpassCount( static_cast( subpasses_.size() ) ), pSubpasses( subpasses_.data() ), dependencyCount( static_cast( dependencies_.size() ) ), pDependencies( dependencies_.data() ), correlatedViewMaskCount( static_cast( correlatedViewMasks_.size() ) ), pCorrelatedViewMasks( correlatedViewMasks_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassCreateInfo2 & operator=( VkRenderPassCreateInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassCreateInfo2 & operator=( RenderPassCreateInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassCreateInfo2 ) ); + return *this; + } + + RenderPassCreateInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassCreateInfo2 & setFlags( VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + RenderPassCreateInfo2 & setAttachmentCount( uint32_t attachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = attachmentCount_; + return *this; + } + + RenderPassCreateInfo2 & setPAttachments( const VULKAN_HPP_NAMESPACE::AttachmentDescription2* pAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pAttachments = pAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo2 & setAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = static_cast( attachments_.size() ); + pAttachments = attachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassCreateInfo2 & setSubpassCount( uint32_t subpassCount_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassCreateInfo2 & setPSubpasses( const VULKAN_HPP_NAMESPACE::SubpassDescription2* pSubpasses_ ) VULKAN_HPP_NOEXCEPT + { + pSubpasses = pSubpasses_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo2 & setSubpasses( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & subpasses_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = static_cast( subpasses_.size() ); + pSubpasses = subpasses_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassCreateInfo2 & setDependencyCount( uint32_t dependencyCount_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassCreateInfo2 & setPDependencies( const VULKAN_HPP_NAMESPACE::SubpassDependency2* pDependencies_ ) VULKAN_HPP_NOEXCEPT + { + pDependencies = pDependencies_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo2 & setDependencies( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & dependencies_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = static_cast( dependencies_.size() ); + pDependencies = dependencies_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassCreateInfo2 & setCorrelatedViewMaskCount( uint32_t correlatedViewMaskCount_ ) VULKAN_HPP_NOEXCEPT + { + correlatedViewMaskCount = correlatedViewMaskCount_; + return *this; + } + + RenderPassCreateInfo2 & setPCorrelatedViewMasks( const uint32_t* pCorrelatedViewMasks_ ) VULKAN_HPP_NOEXCEPT + { + pCorrelatedViewMasks = pCorrelatedViewMasks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassCreateInfo2 & setCorrelatedViewMasks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & correlatedViewMasks_ ) VULKAN_HPP_NOEXCEPT + { + correlatedViewMaskCount = static_cast( correlatedViewMasks_.size() ); + pCorrelatedViewMasks = correlatedViewMasks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassCreateInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassCreateInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassCreateInfo2 const& ) const = default; +#else + bool operator==( RenderPassCreateInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ) + && ( subpassCount == rhs.subpassCount ) + && ( pSubpasses == rhs.pSubpasses ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pDependencies == rhs.pDependencies ) + && ( correlatedViewMaskCount == rhs.correlatedViewMaskCount ) + && ( pCorrelatedViewMasks == rhs.pCorrelatedViewMasks ); + } + + bool operator!=( RenderPassCreateInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassCreateInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RenderPassCreateFlags flags = {}; + uint32_t attachmentCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentDescription2* pAttachments = {}; + uint32_t subpassCount = {}; + const VULKAN_HPP_NAMESPACE::SubpassDescription2* pSubpasses = {}; + uint32_t dependencyCount = {}; + const VULKAN_HPP_NAMESPACE::SubpassDependency2* pDependencies = {}; + uint32_t correlatedViewMaskCount = {}; + const uint32_t* pCorrelatedViewMasks = {}; + + }; + static_assert( sizeof( RenderPassCreateInfo2 ) == sizeof( VkRenderPassCreateInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassCreateInfo2; + }; + using RenderPassCreateInfo2KHR = RenderPassCreateInfo2; + + struct SamplerCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SamplerCreateInfo(VULKAN_HPP_NAMESPACE::SamplerCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::Filter magFilter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest, VULKAN_HPP_NAMESPACE::Filter minFilter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest, VULKAN_HPP_NAMESPACE::SamplerMipmapMode mipmapMode_ = VULKAN_HPP_NAMESPACE::SamplerMipmapMode::eNearest, VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeU_ = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat, VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeV_ = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat, VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeW_ = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat, float mipLodBias_ = {}, VULKAN_HPP_NAMESPACE::Bool32 anisotropyEnable_ = {}, float maxAnisotropy_ = {}, VULKAN_HPP_NAMESPACE::Bool32 compareEnable_ = {}, VULKAN_HPP_NAMESPACE::CompareOp compareOp_ = VULKAN_HPP_NAMESPACE::CompareOp::eNever, float minLod_ = {}, float maxLod_ = {}, VULKAN_HPP_NAMESPACE::BorderColor borderColor_ = VULKAN_HPP_NAMESPACE::BorderColor::eFloatTransparentBlack, VULKAN_HPP_NAMESPACE::Bool32 unnormalizedCoordinates_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), magFilter( magFilter_ ), minFilter( minFilter_ ), mipmapMode( mipmapMode_ ), addressModeU( addressModeU_ ), addressModeV( addressModeV_ ), addressModeW( addressModeW_ ), mipLodBias( mipLodBias_ ), anisotropyEnable( anisotropyEnable_ ), maxAnisotropy( maxAnisotropy_ ), compareEnable( compareEnable_ ), compareOp( compareOp_ ), minLod( minLod_ ), maxLod( maxLod_ ), borderColor( borderColor_ ), unnormalizedCoordinates( unnormalizedCoordinates_ ) + {} + + VULKAN_HPP_CONSTEXPR SamplerCreateInfo( SamplerCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerCreateInfo( VkSamplerCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerCreateInfo & operator=( VkSamplerCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerCreateInfo & operator=( SamplerCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerCreateInfo ) ); + return *this; + } + + SamplerCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SamplerCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::SamplerCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + SamplerCreateInfo & setMagFilter( VULKAN_HPP_NAMESPACE::Filter magFilter_ ) VULKAN_HPP_NOEXCEPT + { + magFilter = magFilter_; + return *this; + } + + SamplerCreateInfo & setMinFilter( VULKAN_HPP_NAMESPACE::Filter minFilter_ ) VULKAN_HPP_NOEXCEPT + { + minFilter = minFilter_; + return *this; + } + + SamplerCreateInfo & setMipmapMode( VULKAN_HPP_NAMESPACE::SamplerMipmapMode mipmapMode_ ) VULKAN_HPP_NOEXCEPT + { + mipmapMode = mipmapMode_; + return *this; + } + + SamplerCreateInfo & setAddressModeU( VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeU_ ) VULKAN_HPP_NOEXCEPT + { + addressModeU = addressModeU_; + return *this; + } + + SamplerCreateInfo & setAddressModeV( VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeV_ ) VULKAN_HPP_NOEXCEPT + { + addressModeV = addressModeV_; + return *this; + } + + SamplerCreateInfo & setAddressModeW( VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeW_ ) VULKAN_HPP_NOEXCEPT + { + addressModeW = addressModeW_; + return *this; + } + + SamplerCreateInfo & setMipLodBias( float mipLodBias_ ) VULKAN_HPP_NOEXCEPT + { + mipLodBias = mipLodBias_; + return *this; + } + + SamplerCreateInfo & setAnisotropyEnable( VULKAN_HPP_NAMESPACE::Bool32 anisotropyEnable_ ) VULKAN_HPP_NOEXCEPT + { + anisotropyEnable = anisotropyEnable_; + return *this; + } + + SamplerCreateInfo & setMaxAnisotropy( float maxAnisotropy_ ) VULKAN_HPP_NOEXCEPT + { + maxAnisotropy = maxAnisotropy_; + return *this; + } + + SamplerCreateInfo & setCompareEnable( VULKAN_HPP_NAMESPACE::Bool32 compareEnable_ ) VULKAN_HPP_NOEXCEPT + { + compareEnable = compareEnable_; + return *this; + } + + SamplerCreateInfo & setCompareOp( VULKAN_HPP_NAMESPACE::CompareOp compareOp_ ) VULKAN_HPP_NOEXCEPT + { + compareOp = compareOp_; + return *this; + } + + SamplerCreateInfo & setMinLod( float minLod_ ) VULKAN_HPP_NOEXCEPT + { + minLod = minLod_; + return *this; + } + + SamplerCreateInfo & setMaxLod( float maxLod_ ) VULKAN_HPP_NOEXCEPT + { + maxLod = maxLod_; + return *this; + } + + SamplerCreateInfo & setBorderColor( VULKAN_HPP_NAMESPACE::BorderColor borderColor_ ) VULKAN_HPP_NOEXCEPT + { + borderColor = borderColor_; + return *this; + } + + SamplerCreateInfo & setUnnormalizedCoordinates( VULKAN_HPP_NAMESPACE::Bool32 unnormalizedCoordinates_ ) VULKAN_HPP_NOEXCEPT + { + unnormalizedCoordinates = unnormalizedCoordinates_; + return *this; + } + + + operator VkSamplerCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerCreateInfo const& ) const = default; +#else + bool operator==( SamplerCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( magFilter == rhs.magFilter ) + && ( minFilter == rhs.minFilter ) + && ( mipmapMode == rhs.mipmapMode ) + && ( addressModeU == rhs.addressModeU ) + && ( addressModeV == rhs.addressModeV ) + && ( addressModeW == rhs.addressModeW ) + && ( mipLodBias == rhs.mipLodBias ) + && ( anisotropyEnable == rhs.anisotropyEnable ) + && ( maxAnisotropy == rhs.maxAnisotropy ) + && ( compareEnable == rhs.compareEnable ) + && ( compareOp == rhs.compareOp ) + && ( minLod == rhs.minLod ) + && ( maxLod == rhs.maxLod ) + && ( borderColor == rhs.borderColor ) + && ( unnormalizedCoordinates == rhs.unnormalizedCoordinates ); + } + + bool operator!=( SamplerCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SamplerCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::Filter magFilter = VULKAN_HPP_NAMESPACE::Filter::eNearest; + VULKAN_HPP_NAMESPACE::Filter minFilter = VULKAN_HPP_NAMESPACE::Filter::eNearest; + VULKAN_HPP_NAMESPACE::SamplerMipmapMode mipmapMode = VULKAN_HPP_NAMESPACE::SamplerMipmapMode::eNearest; + VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeU = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat; + VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeV = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat; + VULKAN_HPP_NAMESPACE::SamplerAddressMode addressModeW = VULKAN_HPP_NAMESPACE::SamplerAddressMode::eRepeat; + float mipLodBias = {}; + VULKAN_HPP_NAMESPACE::Bool32 anisotropyEnable = {}; + float maxAnisotropy = {}; + VULKAN_HPP_NAMESPACE::Bool32 compareEnable = {}; + VULKAN_HPP_NAMESPACE::CompareOp compareOp = VULKAN_HPP_NAMESPACE::CompareOp::eNever; + float minLod = {}; + float maxLod = {}; + VULKAN_HPP_NAMESPACE::BorderColor borderColor = VULKAN_HPP_NAMESPACE::BorderColor::eFloatTransparentBlack; + VULKAN_HPP_NAMESPACE::Bool32 unnormalizedCoordinates = {}; + + }; + static_assert( sizeof( SamplerCreateInfo ) == sizeof( VkSamplerCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerCreateInfo; + }; + + struct SamplerYcbcrConversionCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerYcbcrConversionCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionCreateInfo(VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion ycbcrModel_ = VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion::eRgbIdentity, VULKAN_HPP_NAMESPACE::SamplerYcbcrRange ycbcrRange_ = VULKAN_HPP_NAMESPACE::SamplerYcbcrRange::eItuFull, VULKAN_HPP_NAMESPACE::ComponentMapping components_ = {}, VULKAN_HPP_NAMESPACE::ChromaLocation xChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::ChromaLocation yChromaOffset_ = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven, VULKAN_HPP_NAMESPACE::Filter chromaFilter_ = VULKAN_HPP_NAMESPACE::Filter::eNearest, VULKAN_HPP_NAMESPACE::Bool32 forceExplicitReconstruction_ = {}) VULKAN_HPP_NOEXCEPT + : format( format_ ), ycbcrModel( ycbcrModel_ ), ycbcrRange( ycbcrRange_ ), components( components_ ), xChromaOffset( xChromaOffset_ ), yChromaOffset( yChromaOffset_ ), chromaFilter( chromaFilter_ ), forceExplicitReconstruction( forceExplicitReconstruction_ ) + {} + + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionCreateInfo( SamplerYcbcrConversionCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerYcbcrConversionCreateInfo( VkSamplerYcbcrConversionCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerYcbcrConversionCreateInfo & operator=( VkSamplerYcbcrConversionCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerYcbcrConversionCreateInfo & operator=( SamplerYcbcrConversionCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerYcbcrConversionCreateInfo ) ); + return *this; + } + + SamplerYcbcrConversionCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setYcbcrModel( VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion ycbcrModel_ ) VULKAN_HPP_NOEXCEPT + { + ycbcrModel = ycbcrModel_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setYcbcrRange( VULKAN_HPP_NAMESPACE::SamplerYcbcrRange ycbcrRange_ ) VULKAN_HPP_NOEXCEPT + { + ycbcrRange = ycbcrRange_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setComponents( VULKAN_HPP_NAMESPACE::ComponentMapping const & components_ ) VULKAN_HPP_NOEXCEPT + { + components = components_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setXChromaOffset( VULKAN_HPP_NAMESPACE::ChromaLocation xChromaOffset_ ) VULKAN_HPP_NOEXCEPT + { + xChromaOffset = xChromaOffset_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setYChromaOffset( VULKAN_HPP_NAMESPACE::ChromaLocation yChromaOffset_ ) VULKAN_HPP_NOEXCEPT + { + yChromaOffset = yChromaOffset_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setChromaFilter( VULKAN_HPP_NAMESPACE::Filter chromaFilter_ ) VULKAN_HPP_NOEXCEPT + { + chromaFilter = chromaFilter_; + return *this; + } + + SamplerYcbcrConversionCreateInfo & setForceExplicitReconstruction( VULKAN_HPP_NAMESPACE::Bool32 forceExplicitReconstruction_ ) VULKAN_HPP_NOEXCEPT + { + forceExplicitReconstruction = forceExplicitReconstruction_; + return *this; + } + + + operator VkSamplerYcbcrConversionCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerYcbcrConversionCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerYcbcrConversionCreateInfo const& ) const = default; +#else + bool operator==( SamplerYcbcrConversionCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( ycbcrModel == rhs.ycbcrModel ) + && ( ycbcrRange == rhs.ycbcrRange ) + && ( components == rhs.components ) + && ( xChromaOffset == rhs.xChromaOffset ) + && ( yChromaOffset == rhs.yChromaOffset ) + && ( chromaFilter == rhs.chromaFilter ) + && ( forceExplicitReconstruction == rhs.forceExplicitReconstruction ); + } + + bool operator!=( SamplerYcbcrConversionCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerYcbcrConversionCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion ycbcrModel = VULKAN_HPP_NAMESPACE::SamplerYcbcrModelConversion::eRgbIdentity; + VULKAN_HPP_NAMESPACE::SamplerYcbcrRange ycbcrRange = VULKAN_HPP_NAMESPACE::SamplerYcbcrRange::eItuFull; + VULKAN_HPP_NAMESPACE::ComponentMapping components = {}; + VULKAN_HPP_NAMESPACE::ChromaLocation xChromaOffset = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven; + VULKAN_HPP_NAMESPACE::ChromaLocation yChromaOffset = VULKAN_HPP_NAMESPACE::ChromaLocation::eCositedEven; + VULKAN_HPP_NAMESPACE::Filter chromaFilter = VULKAN_HPP_NAMESPACE::Filter::eNearest; + VULKAN_HPP_NAMESPACE::Bool32 forceExplicitReconstruction = {}; + + }; + static_assert( sizeof( SamplerYcbcrConversionCreateInfo ) == sizeof( VkSamplerYcbcrConversionCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerYcbcrConversionCreateInfo; + }; + using SamplerYcbcrConversionCreateInfoKHR = SamplerYcbcrConversionCreateInfo; + + class SamplerYcbcrConversion + { + public: + using CType = VkSamplerYcbcrConversion; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eSamplerYcbcrConversion; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSamplerYcbcrConversion; + + public: + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion() VULKAN_HPP_NOEXCEPT + : m_samplerYcbcrConversion(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversion( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_samplerYcbcrConversion(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SamplerYcbcrConversion( VkSamplerYcbcrConversion samplerYcbcrConversion ) VULKAN_HPP_NOEXCEPT + : m_samplerYcbcrConversion( samplerYcbcrConversion ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SamplerYcbcrConversion & operator=(VkSamplerYcbcrConversion samplerYcbcrConversion) VULKAN_HPP_NOEXCEPT + { + m_samplerYcbcrConversion = samplerYcbcrConversion; + return *this; + } +#endif + + SamplerYcbcrConversion & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_samplerYcbcrConversion = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerYcbcrConversion const& ) const = default; +#else + bool operator==( SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion == rhs.m_samplerYcbcrConversion; + } + + bool operator!=(SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion != rhs.m_samplerYcbcrConversion; + } + + bool operator<(SamplerYcbcrConversion const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion < rhs.m_samplerYcbcrConversion; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSamplerYcbcrConversion() const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_samplerYcbcrConversion == VK_NULL_HANDLE; + } + + private: + VkSamplerYcbcrConversion m_samplerYcbcrConversion; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ) == sizeof( VkSamplerYcbcrConversion ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + using SamplerYcbcrConversionKHR = SamplerYcbcrConversion; + + struct SemaphoreCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreCreateInfo(VULKAN_HPP_NAMESPACE::SemaphoreCreateFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreCreateInfo( SemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreCreateInfo( VkSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreCreateInfo & operator=( VkSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreCreateInfo & operator=( SemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreCreateInfo ) ); + return *this; + } + + SemaphoreCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::SemaphoreCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkSemaphoreCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreCreateInfo const& ) const = default; +#else + bool operator==( SemaphoreCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( SemaphoreCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SemaphoreCreateFlags flags = {}; + + }; + static_assert( sizeof( SemaphoreCreateInfo ) == sizeof( VkSemaphoreCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreCreateInfo; + }; + + struct ShaderModuleCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eShaderModuleCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ShaderModuleCreateInfo(VULKAN_HPP_NAMESPACE::ShaderModuleCreateFlags flags_ = {}, size_t codeSize_ = {}, const uint32_t* pCode_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), codeSize( codeSize_ ), pCode( pCode_ ) + {} + + VULKAN_HPP_CONSTEXPR ShaderModuleCreateInfo( ShaderModuleCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ShaderModuleCreateInfo( VkShaderModuleCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ShaderModuleCreateInfo( VULKAN_HPP_NAMESPACE::ShaderModuleCreateFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & code_ ) + : flags( flags_ ), codeSize( code_.size() * 4 ), pCode( code_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ShaderModuleCreateInfo & operator=( VkShaderModuleCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ShaderModuleCreateInfo & operator=( ShaderModuleCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ShaderModuleCreateInfo ) ); + return *this; + } + + ShaderModuleCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ShaderModuleCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::ShaderModuleCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ShaderModuleCreateInfo & setCodeSize( size_t codeSize_ ) VULKAN_HPP_NOEXCEPT + { + codeSize = codeSize_; + return *this; + } + + ShaderModuleCreateInfo & setPCode( const uint32_t* pCode_ ) VULKAN_HPP_NOEXCEPT + { + pCode = pCode_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ShaderModuleCreateInfo & setCode( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & code_ ) VULKAN_HPP_NOEXCEPT + { + codeSize = code_.size() * 4; + pCode = code_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkShaderModuleCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkShaderModuleCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShaderModuleCreateInfo const& ) const = default; +#else + bool operator==( ShaderModuleCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( codeSize == rhs.codeSize ) + && ( pCode == rhs.pCode ); + } + + bool operator!=( ShaderModuleCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eShaderModuleCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ShaderModuleCreateFlags flags = {}; + size_t codeSize = {}; + const uint32_t* pCode = {}; + + }; + static_assert( sizeof( ShaderModuleCreateInfo ) == sizeof( VkShaderModuleCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ShaderModuleCreateInfo; + }; + + class SurfaceKHR + { + public: + using CType = VkSurfaceKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eSurfaceKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eSurfaceKHR; + + public: + VULKAN_HPP_CONSTEXPR SurfaceKHR() VULKAN_HPP_NOEXCEPT + : m_surfaceKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR SurfaceKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_surfaceKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT SurfaceKHR( VkSurfaceKHR surfaceKHR ) VULKAN_HPP_NOEXCEPT + : m_surfaceKHR( surfaceKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + SurfaceKHR & operator=(VkSurfaceKHR surfaceKHR) VULKAN_HPP_NOEXCEPT + { + m_surfaceKHR = surfaceKHR; + return *this; + } +#endif + + SurfaceKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_surfaceKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceKHR const& ) const = default; +#else + bool operator==( SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR == rhs.m_surfaceKHR; + } + + bool operator!=(SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR != rhs.m_surfaceKHR; + } + + bool operator<(SurfaceKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR < rhs.m_surfaceKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkSurfaceKHR() const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_surfaceKHR == VK_NULL_HANDLE; + } + + private: + VkSurfaceKHR m_surfaceKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::SurfaceKHR ) == sizeof( VkSurfaceKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::SurfaceKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SurfaceKHR; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::SurfaceKHR; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct SwapchainCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSwapchainCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SwapchainCreateInfoKHR(VULKAN_HPP_NAMESPACE::SwapchainCreateFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::SurfaceKHR surface_ = {}, uint32_t minImageCount_ = {}, VULKAN_HPP_NAMESPACE::Format imageFormat_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ColorSpaceKHR imageColorSpace_ = VULKAN_HPP_NAMESPACE::ColorSpaceKHR::eSrgbNonlinear, VULKAN_HPP_NAMESPACE::Extent2D imageExtent_ = {}, uint32_t imageArrayLayers_ = {}, VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsage_ = {}, VULKAN_HPP_NAMESPACE::SharingMode imageSharingMode_ = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = {}, const uint32_t* pQueueFamilyIndices_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR preTransform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR compositeAlpha_ = VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR::eOpaque, VULKAN_HPP_NAMESPACE::PresentModeKHR presentMode_ = VULKAN_HPP_NAMESPACE::PresentModeKHR::eImmediate, VULKAN_HPP_NAMESPACE::Bool32 clipped_ = {}, VULKAN_HPP_NAMESPACE::SwapchainKHR oldSwapchain_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), surface( surface_ ), minImageCount( minImageCount_ ), imageFormat( imageFormat_ ), imageColorSpace( imageColorSpace_ ), imageExtent( imageExtent_ ), imageArrayLayers( imageArrayLayers_ ), imageUsage( imageUsage_ ), imageSharingMode( imageSharingMode_ ), queueFamilyIndexCount( queueFamilyIndexCount_ ), pQueueFamilyIndices( pQueueFamilyIndices_ ), preTransform( preTransform_ ), compositeAlpha( compositeAlpha_ ), presentMode( presentMode_ ), clipped( clipped_ ), oldSwapchain( oldSwapchain_ ) + {} + + VULKAN_HPP_CONSTEXPR SwapchainCreateInfoKHR( SwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SwapchainCreateInfoKHR( VkSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SwapchainCreateInfoKHR( VULKAN_HPP_NAMESPACE::SwapchainCreateFlagsKHR flags_, VULKAN_HPP_NAMESPACE::SurfaceKHR surface_, uint32_t minImageCount_, VULKAN_HPP_NAMESPACE::Format imageFormat_, VULKAN_HPP_NAMESPACE::ColorSpaceKHR imageColorSpace_, VULKAN_HPP_NAMESPACE::Extent2D imageExtent_, uint32_t imageArrayLayers_, VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsage_, VULKAN_HPP_NAMESPACE::SharingMode imageSharingMode_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR preTransform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR compositeAlpha_ = VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR::eOpaque, VULKAN_HPP_NAMESPACE::PresentModeKHR presentMode_ = VULKAN_HPP_NAMESPACE::PresentModeKHR::eImmediate, VULKAN_HPP_NAMESPACE::Bool32 clipped_ = {}, VULKAN_HPP_NAMESPACE::SwapchainKHR oldSwapchain_ = {} ) + : flags( flags_ ), surface( surface_ ), minImageCount( minImageCount_ ), imageFormat( imageFormat_ ), imageColorSpace( imageColorSpace_ ), imageExtent( imageExtent_ ), imageArrayLayers( imageArrayLayers_ ), imageUsage( imageUsage_ ), imageSharingMode( imageSharingMode_ ), queueFamilyIndexCount( static_cast( queueFamilyIndices_.size() ) ), pQueueFamilyIndices( queueFamilyIndices_.data() ), preTransform( preTransform_ ), compositeAlpha( compositeAlpha_ ), presentMode( presentMode_ ), clipped( clipped_ ), oldSwapchain( oldSwapchain_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SwapchainCreateInfoKHR & operator=( VkSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SwapchainCreateInfoKHR & operator=( SwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SwapchainCreateInfoKHR ) ); + return *this; + } + + SwapchainCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SwapchainCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::SwapchainCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + SwapchainCreateInfoKHR & setSurface( VULKAN_HPP_NAMESPACE::SurfaceKHR surface_ ) VULKAN_HPP_NOEXCEPT + { + surface = surface_; + return *this; + } + + SwapchainCreateInfoKHR & setMinImageCount( uint32_t minImageCount_ ) VULKAN_HPP_NOEXCEPT + { + minImageCount = minImageCount_; + return *this; + } + + SwapchainCreateInfoKHR & setImageFormat( VULKAN_HPP_NAMESPACE::Format imageFormat_ ) VULKAN_HPP_NOEXCEPT + { + imageFormat = imageFormat_; + return *this; + } + + SwapchainCreateInfoKHR & setImageColorSpace( VULKAN_HPP_NAMESPACE::ColorSpaceKHR imageColorSpace_ ) VULKAN_HPP_NOEXCEPT + { + imageColorSpace = imageColorSpace_; + return *this; + } + + SwapchainCreateInfoKHR & setImageExtent( VULKAN_HPP_NAMESPACE::Extent2D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + { + imageExtent = imageExtent_; + return *this; + } + + SwapchainCreateInfoKHR & setImageArrayLayers( uint32_t imageArrayLayers_ ) VULKAN_HPP_NOEXCEPT + { + imageArrayLayers = imageArrayLayers_; + return *this; + } + + SwapchainCreateInfoKHR & setImageUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsage_ ) VULKAN_HPP_NOEXCEPT + { + imageUsage = imageUsage_; + return *this; + } + + SwapchainCreateInfoKHR & setImageSharingMode( VULKAN_HPP_NAMESPACE::SharingMode imageSharingMode_ ) VULKAN_HPP_NOEXCEPT + { + imageSharingMode = imageSharingMode_; + return *this; + } + + SwapchainCreateInfoKHR & setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + SwapchainCreateInfoKHR & setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SwapchainCreateInfoKHR & setQueueFamilyIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = static_cast( queueFamilyIndices_.size() ); + pQueueFamilyIndices = queueFamilyIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SwapchainCreateInfoKHR & setPreTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR preTransform_ ) VULKAN_HPP_NOEXCEPT + { + preTransform = preTransform_; + return *this; + } + + SwapchainCreateInfoKHR & setCompositeAlpha( VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR compositeAlpha_ ) VULKAN_HPP_NOEXCEPT + { + compositeAlpha = compositeAlpha_; + return *this; + } + + SwapchainCreateInfoKHR & setPresentMode( VULKAN_HPP_NAMESPACE::PresentModeKHR presentMode_ ) VULKAN_HPP_NOEXCEPT + { + presentMode = presentMode_; + return *this; + } + + SwapchainCreateInfoKHR & setClipped( VULKAN_HPP_NAMESPACE::Bool32 clipped_ ) VULKAN_HPP_NOEXCEPT + { + clipped = clipped_; + return *this; + } + + SwapchainCreateInfoKHR & setOldSwapchain( VULKAN_HPP_NAMESPACE::SwapchainKHR oldSwapchain_ ) VULKAN_HPP_NOEXCEPT + { + oldSwapchain = oldSwapchain_; + return *this; + } + + + operator VkSwapchainCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSwapchainCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SwapchainCreateInfoKHR const& ) const = default; +#else + bool operator==( SwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( surface == rhs.surface ) + && ( minImageCount == rhs.minImageCount ) + && ( imageFormat == rhs.imageFormat ) + && ( imageColorSpace == rhs.imageColorSpace ) + && ( imageExtent == rhs.imageExtent ) + && ( imageArrayLayers == rhs.imageArrayLayers ) + && ( imageUsage == rhs.imageUsage ) + && ( imageSharingMode == rhs.imageSharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ) + && ( preTransform == rhs.preTransform ) + && ( compositeAlpha == rhs.compositeAlpha ) + && ( presentMode == rhs.presentMode ) + && ( clipped == rhs.clipped ) + && ( oldSwapchain == rhs.oldSwapchain ); + } + + bool operator!=( SwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSwapchainCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SwapchainCreateFlagsKHR flags = {}; + VULKAN_HPP_NAMESPACE::SurfaceKHR surface = {}; + uint32_t minImageCount = {}; + VULKAN_HPP_NAMESPACE::Format imageFormat = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::ColorSpaceKHR imageColorSpace = VULKAN_HPP_NAMESPACE::ColorSpaceKHR::eSrgbNonlinear; + VULKAN_HPP_NAMESPACE::Extent2D imageExtent = {}; + uint32_t imageArrayLayers = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags imageUsage = {}; + VULKAN_HPP_NAMESPACE::SharingMode imageSharingMode = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive; + uint32_t queueFamilyIndexCount = {}; + const uint32_t* pQueueFamilyIndices = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR preTransform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR compositeAlpha = VULKAN_HPP_NAMESPACE::CompositeAlphaFlagBitsKHR::eOpaque; + VULKAN_HPP_NAMESPACE::PresentModeKHR presentMode = VULKAN_HPP_NAMESPACE::PresentModeKHR::eImmediate; + VULKAN_HPP_NAMESPACE::Bool32 clipped = {}; + VULKAN_HPP_NAMESPACE::SwapchainKHR oldSwapchain = {}; + + }; + static_assert( sizeof( SwapchainCreateInfoKHR ) == sizeof( VkSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SwapchainCreateInfoKHR; + }; + + struct ValidationCacheCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eValidationCacheCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ValidationCacheCreateInfoEXT(VULKAN_HPP_NAMESPACE::ValidationCacheCreateFlagsEXT flags_ = {}, size_t initialDataSize_ = {}, const void* pInitialData_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), initialDataSize( initialDataSize_ ), pInitialData( pInitialData_ ) + {} + + VULKAN_HPP_CONSTEXPR ValidationCacheCreateInfoEXT( ValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ValidationCacheCreateInfoEXT( VkValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + ValidationCacheCreateInfoEXT( VULKAN_HPP_NAMESPACE::ValidationCacheCreateFlagsEXT flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & initialData_ ) + : flags( flags_ ), initialDataSize( initialData_.size() * sizeof(T) ), pInitialData( initialData_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ValidationCacheCreateInfoEXT & operator=( VkValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ValidationCacheCreateInfoEXT & operator=( ValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ValidationCacheCreateInfoEXT ) ); + return *this; + } + + ValidationCacheCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ValidationCacheCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::ValidationCacheCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ValidationCacheCreateInfoEXT & setInitialDataSize( size_t initialDataSize_ ) VULKAN_HPP_NOEXCEPT + { + initialDataSize = initialDataSize_; + return *this; + } + + ValidationCacheCreateInfoEXT & setPInitialData( const void* pInitialData_ ) VULKAN_HPP_NOEXCEPT + { + pInitialData = pInitialData_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + ValidationCacheCreateInfoEXT & setInitialData( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & initialData_ ) VULKAN_HPP_NOEXCEPT + { + initialDataSize = initialData_.size() * sizeof(T); + pInitialData = initialData_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkValidationCacheCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkValidationCacheCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ValidationCacheCreateInfoEXT const& ) const = default; +#else + bool operator==( ValidationCacheCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( initialDataSize == rhs.initialDataSize ) + && ( pInitialData == rhs.pInitialData ); + } + + bool operator!=( ValidationCacheCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eValidationCacheCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ValidationCacheCreateFlagsEXT flags = {}; + size_t initialDataSize = {}; + const void* pInitialData = {}; + + }; + static_assert( sizeof( ValidationCacheCreateInfoEXT ) == sizeof( VkValidationCacheCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ValidationCacheCreateInfoEXT; + }; + + class ValidationCacheEXT + { + public: + using CType = VkValidationCacheEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eValidationCacheEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eValidationCacheEXT; + + public: + VULKAN_HPP_CONSTEXPR ValidationCacheEXT() VULKAN_HPP_NOEXCEPT + : m_validationCacheEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR ValidationCacheEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_validationCacheEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT ValidationCacheEXT( VkValidationCacheEXT validationCacheEXT ) VULKAN_HPP_NOEXCEPT + : m_validationCacheEXT( validationCacheEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + ValidationCacheEXT & operator=(VkValidationCacheEXT validationCacheEXT) VULKAN_HPP_NOEXCEPT + { + m_validationCacheEXT = validationCacheEXT; + return *this; + } +#endif + + ValidationCacheEXT & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_validationCacheEXT = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ValidationCacheEXT const& ) const = default; +#else + bool operator==( ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT == rhs.m_validationCacheEXT; + } + + bool operator!=(ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT != rhs.m_validationCacheEXT; + } + + bool operator<(ValidationCacheEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT < rhs.m_validationCacheEXT; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkValidationCacheEXT() const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_validationCacheEXT == VK_NULL_HANDLE; + } + + private: + VkValidationCacheEXT m_validationCacheEXT; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::ValidationCacheEXT ) == sizeof( VkValidationCacheEXT ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::ValidationCacheEXT; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ValidationCacheEXT; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::ValidationCacheEXT; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DisplayPowerInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayPowerInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPowerInfoEXT(VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT powerState_ = VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT::eOff) VULKAN_HPP_NOEXCEPT + : powerState( powerState_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPowerInfoEXT( DisplayPowerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPowerInfoEXT( VkDisplayPowerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPowerInfoEXT & operator=( VkDisplayPowerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPowerInfoEXT & operator=( DisplayPowerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPowerInfoEXT ) ); + return *this; + } + + DisplayPowerInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplayPowerInfoEXT & setPowerState( VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT powerState_ ) VULKAN_HPP_NOEXCEPT + { + powerState = powerState_; + return *this; + } + + + operator VkDisplayPowerInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPowerInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPowerInfoEXT const& ) const = default; +#else + bool operator==( DisplayPowerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( powerState == rhs.powerState ); + } + + bool operator!=( DisplayPowerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayPowerInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT powerState = VULKAN_HPP_NAMESPACE::DisplayPowerStateEXT::eOff; + + }; + static_assert( sizeof( DisplayPowerInfoEXT ) == sizeof( VkDisplayPowerInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayPowerInfoEXT; + }; + + struct MappedMemoryRange + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMappedMemoryRange; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MappedMemoryRange(VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : memory( memory_ ), offset( offset_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR MappedMemoryRange( MappedMemoryRange const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MappedMemoryRange( VkMappedMemoryRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MappedMemoryRange & operator=( VkMappedMemoryRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MappedMemoryRange & operator=( MappedMemoryRange const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MappedMemoryRange ) ); + return *this; + } + + MappedMemoryRange & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MappedMemoryRange & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + MappedMemoryRange & setOffset( VULKAN_HPP_NAMESPACE::DeviceSize offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + MappedMemoryRange & setSize( VULKAN_HPP_NAMESPACE::DeviceSize size_ ) VULKAN_HPP_NOEXCEPT + { + size = size_; + return *this; + } + + + operator VkMappedMemoryRange const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMappedMemoryRange &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MappedMemoryRange const& ) const = default; +#else + bool operator==( MappedMemoryRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( offset == rhs.offset ) + && ( size == rhs.size ); + } + + bool operator!=( MappedMemoryRange const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMappedMemoryRange; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( MappedMemoryRange ) == sizeof( VkMappedMemoryRange ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MappedMemoryRange; + }; + + struct MemoryRequirements + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryRequirements(VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize alignment_ = {}, uint32_t memoryTypeBits_ = {}) VULKAN_HPP_NOEXCEPT + : size( size_ ), alignment( alignment_ ), memoryTypeBits( memoryTypeBits_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryRequirements( MemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryRequirements( VkMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryRequirements & operator=( VkMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryRequirements & operator=( MemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryRequirements ) ); + return *this; + } + + + operator VkMemoryRequirements const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryRequirements &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryRequirements const& ) const = default; +#else + bool operator==( MemoryRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( size == rhs.size ) + && ( alignment == rhs.alignment ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::DeviceSize alignment = {}; + uint32_t memoryTypeBits = {}; + + }; + static_assert( sizeof( MemoryRequirements ) == sizeof( VkMemoryRequirements ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct MemoryRequirements2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryRequirements2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryRequirements2(VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements_ = {}) VULKAN_HPP_NOEXCEPT + : memoryRequirements( memoryRequirements_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryRequirements2( MemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryRequirements2( VkMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryRequirements2 & operator=( VkMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryRequirements2 & operator=( MemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryRequirements2 ) ); + return *this; + } + + + operator VkMemoryRequirements2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryRequirements2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryRequirements2 const& ) const = default; +#else + bool operator==( MemoryRequirements2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryRequirements == rhs.memoryRequirements ); + } + + bool operator!=( MemoryRequirements2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryRequirements2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements = {}; + + }; + static_assert( sizeof( MemoryRequirements2 ) == sizeof( VkMemoryRequirements2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryRequirements2; + }; + using MemoryRequirements2KHR = MemoryRequirements2; + + struct DeviceGroupPresentCapabilitiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupPresentCapabilitiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 DeviceGroupPresentCapabilitiesKHR(std::array const& presentMask_ = {}, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes_ = {}) VULKAN_HPP_NOEXCEPT + : presentMask( presentMask_ ), modes( modes_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 DeviceGroupPresentCapabilitiesKHR( DeviceGroupPresentCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupPresentCapabilitiesKHR( VkDeviceGroupPresentCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupPresentCapabilitiesKHR & operator=( VkDeviceGroupPresentCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupPresentCapabilitiesKHR & operator=( DeviceGroupPresentCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupPresentCapabilitiesKHR ) ); + return *this; + } + + + operator VkDeviceGroupPresentCapabilitiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupPresentCapabilitiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupPresentCapabilitiesKHR const& ) const = default; +#else + bool operator==( DeviceGroupPresentCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( presentMask == rhs.presentMask ) + && ( modes == rhs.modes ); + } + + bool operator!=( DeviceGroupPresentCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupPresentCapabilitiesKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D presentMask = {}; + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes = {}; + + }; + static_assert( sizeof( DeviceGroupPresentCapabilitiesKHR ) == sizeof( VkDeviceGroupPresentCapabilitiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupPresentCapabilitiesKHR; + }; + + struct PhysicalDeviceSurfaceInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSurfaceInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSurfaceInfo2KHR(VULKAN_HPP_NAMESPACE::SurfaceKHR surface_ = {}) VULKAN_HPP_NOEXCEPT + : surface( surface_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSurfaceInfo2KHR( PhysicalDeviceSurfaceInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSurfaceInfo2KHR( VkPhysicalDeviceSurfaceInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSurfaceInfo2KHR & operator=( VkPhysicalDeviceSurfaceInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSurfaceInfo2KHR & operator=( PhysicalDeviceSurfaceInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSurfaceInfo2KHR ) ); + return *this; + } + + PhysicalDeviceSurfaceInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSurfaceInfo2KHR & setSurface( VULKAN_HPP_NAMESPACE::SurfaceKHR surface_ ) VULKAN_HPP_NOEXCEPT + { + surface = surface_; + return *this; + } + + + operator VkPhysicalDeviceSurfaceInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSurfaceInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSurfaceInfo2KHR const& ) const = default; +#else + bool operator==( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surface == rhs.surface ); + } + + bool operator!=( PhysicalDeviceSurfaceInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSurfaceInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceKHR surface = {}; + + }; + static_assert( sizeof( PhysicalDeviceSurfaceInfo2KHR ) == sizeof( VkPhysicalDeviceSurfaceInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSurfaceInfo2KHR; + }; + + struct DeviceMemoryOpaqueCaptureAddressInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceMemoryOpaqueCaptureAddressInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceMemoryOpaqueCaptureAddressInfo(VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}) VULKAN_HPP_NOEXCEPT + : memory( memory_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemoryOpaqueCaptureAddressInfo( DeviceMemoryOpaqueCaptureAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceMemoryOpaqueCaptureAddressInfo( VkDeviceMemoryOpaqueCaptureAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceMemoryOpaqueCaptureAddressInfo & operator=( VkDeviceMemoryOpaqueCaptureAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceMemoryOpaqueCaptureAddressInfo & operator=( DeviceMemoryOpaqueCaptureAddressInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceMemoryOpaqueCaptureAddressInfo ) ); + return *this; + } + + DeviceMemoryOpaqueCaptureAddressInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceMemoryOpaqueCaptureAddressInfo & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + + operator VkDeviceMemoryOpaqueCaptureAddressInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceMemoryOpaqueCaptureAddressInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceMemoryOpaqueCaptureAddressInfo const& ) const = default; +#else + bool operator==( DeviceMemoryOpaqueCaptureAddressInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ); + } + + bool operator!=( DeviceMemoryOpaqueCaptureAddressInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceMemoryOpaqueCaptureAddressInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + + }; + static_assert( sizeof( DeviceMemoryOpaqueCaptureAddressInfo ) == sizeof( VkDeviceMemoryOpaqueCaptureAddressInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceMemoryOpaqueCaptureAddressInfo; + }; + using DeviceMemoryOpaqueCaptureAddressInfoKHR = DeviceMemoryOpaqueCaptureAddressInfo; + + struct PresentInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePresentInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentInfoKHR(uint32_t waitSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ = {}, uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains_ = {}, const uint32_t* pImageIndices_ = {}, VULKAN_HPP_NAMESPACE::Result* pResults_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreCount( waitSemaphoreCount_ ), pWaitSemaphores( pWaitSemaphores_ ), swapchainCount( swapchainCount_ ), pSwapchains( pSwapchains_ ), pImageIndices( pImageIndices_ ), pResults( pResults_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentInfoKHR( PresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentInfoKHR( VkPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & swapchains_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageIndices_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & results_ = {} ) + : waitSemaphoreCount( static_cast( waitSemaphores_.size() ) ), pWaitSemaphores( waitSemaphores_.data() ), swapchainCount( static_cast( swapchains_.size() ) ), pSwapchains( swapchains_.data() ), pImageIndices( imageIndices_.data() ), pResults( results_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( swapchains_.size() == imageIndices_.size() ); + VULKAN_HPP_ASSERT( results_.empty() || ( swapchains_.size() == results_.size() ) ); + VULKAN_HPP_ASSERT( results_.empty() || ( imageIndices_.size() == results_.size() ) ); +#else + if ( swapchains_.size() != imageIndices_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::PresentInfoKHR::PresentInfoKHR: swapchains_.size() != imageIndices_.size()" ); + } + if ( !results_.empty() && ( swapchains_.size() != results_.size() ) ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::PresentInfoKHR::PresentInfoKHR: !results_.empty() && ( swapchains_.size() != results_.size() )" ); + } + if ( !results_.empty() && ( imageIndices_.size() != results_.size() ) ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::PresentInfoKHR::PresentInfoKHR: !results_.empty() && ( imageIndices_.size() != results_.size() )" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentInfoKHR & operator=( VkPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentInfoKHR & operator=( PresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentInfoKHR ) ); + return *this; + } + + PresentInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PresentInfoKHR & setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + PresentInfoKHR & setPWaitSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentInfoKHR & setWaitSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = static_cast( waitSemaphores_.size() ); + pWaitSemaphores = waitSemaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PresentInfoKHR & setSwapchainCount( uint32_t swapchainCount_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentInfoKHR & setPSwapchains( const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains_ ) VULKAN_HPP_NOEXCEPT + { + pSwapchains = pSwapchains_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentInfoKHR & setSwapchains( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & swapchains_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( swapchains_.size() ); + pSwapchains = swapchains_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PresentInfoKHR & setPImageIndices( const uint32_t* pImageIndices_ ) VULKAN_HPP_NOEXCEPT + { + pImageIndices = pImageIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentInfoKHR & setImageIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & imageIndices_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( imageIndices_.size() ); + pImageIndices = imageIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + PresentInfoKHR & setPResults( VULKAN_HPP_NAMESPACE::Result* pResults_ ) VULKAN_HPP_NOEXCEPT + { + pResults = pResults_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentInfoKHR & setResults( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & results_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( results_.size() ); + pResults = results_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPresentInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentInfoKHR const& ) const = default; +#else + bool operator==( PresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pSwapchains == rhs.pSwapchains ) + && ( pImageIndices == rhs.pImageIndices ) + && ( pResults == rhs.pResults ); + } + + bool operator!=( PresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePresentInfoKHR; + const void* pNext = {}; + uint32_t waitSemaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores = {}; + uint32_t swapchainCount = {}; + const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains = {}; + const uint32_t* pImageIndices = {}; + VULKAN_HPP_NAMESPACE::Result* pResults = {}; + + }; + static_assert( sizeof( PresentInfoKHR ) == sizeof( VkPresentInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PresentInfoKHR; + }; + + struct SubmitInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubmitInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubmitInfo(uint32_t waitSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ = {}, const VULKAN_HPP_NAMESPACE::PipelineStageFlags* pWaitDstStageMask_ = {}, uint32_t commandBufferCount_ = {}, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers_ = {}, uint32_t signalSemaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreCount( waitSemaphoreCount_ ), pWaitSemaphores( pWaitSemaphores_ ), pWaitDstStageMask( pWaitDstStageMask_ ), commandBufferCount( commandBufferCount_ ), pCommandBuffers( pCommandBuffers_ ), signalSemaphoreCount( signalSemaphoreCount_ ), pSignalSemaphores( pSignalSemaphores_ ) + {} + + VULKAN_HPP_CONSTEXPR SubmitInfo( SubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubmitInfo( VkSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubmitInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitDstStageMask_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & commandBuffers_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphores_ = {} ) + : waitSemaphoreCount( static_cast( waitSemaphores_.size() ) ), pWaitSemaphores( waitSemaphores_.data() ), pWaitDstStageMask( waitDstStageMask_.data() ), commandBufferCount( static_cast( commandBuffers_.size() ) ), pCommandBuffers( commandBuffers_.data() ), signalSemaphoreCount( static_cast( signalSemaphores_.size() ) ), pSignalSemaphores( signalSemaphores_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( waitSemaphores_.size() == waitDstStageMask_.size() ); +#else + if ( waitSemaphores_.size() != waitDstStageMask_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::SubmitInfo::SubmitInfo: waitSemaphores_.size() != waitDstStageMask_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubmitInfo & operator=( VkSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubmitInfo & operator=( SubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubmitInfo ) ); + return *this; + } + + SubmitInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SubmitInfo & setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + SubmitInfo & setPWaitSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphores = pWaitSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubmitInfo & setWaitSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = static_cast( waitSemaphores_.size() ); + pWaitSemaphores = waitSemaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubmitInfo & setPWaitDstStageMask( const VULKAN_HPP_NAMESPACE::PipelineStageFlags* pWaitDstStageMask_ ) VULKAN_HPP_NOEXCEPT + { + pWaitDstStageMask = pWaitDstStageMask_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubmitInfo & setWaitDstStageMask( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitDstStageMask_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = static_cast( waitDstStageMask_.size() ); + pWaitDstStageMask = waitDstStageMask_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubmitInfo & setCommandBufferCount( uint32_t commandBufferCount_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferCount = commandBufferCount_; + return *this; + } + + SubmitInfo & setPCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers_ ) VULKAN_HPP_NOEXCEPT + { + pCommandBuffers = pCommandBuffers_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubmitInfo & setCommandBuffers( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & commandBuffers_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferCount = static_cast( commandBuffers_.size() ); + pCommandBuffers = commandBuffers_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SubmitInfo & setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + SubmitInfo & setPSignalSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pSignalSemaphores = pSignalSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SubmitInfo & setSignalSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = static_cast( signalSemaphores_.size() ); + pSignalSemaphores = signalSemaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSubmitInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubmitInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubmitInfo const& ) const = default; +#else + bool operator==( SubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphores == rhs.pWaitSemaphores ) + && ( pWaitDstStageMask == rhs.pWaitDstStageMask ) + && ( commandBufferCount == rhs.commandBufferCount ) + && ( pCommandBuffers == rhs.pCommandBuffers ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphores == rhs.pSignalSemaphores ); + } + + bool operator!=( SubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubmitInfo; + const void* pNext = {}; + uint32_t waitSemaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pWaitSemaphores = {}; + const VULKAN_HPP_NAMESPACE::PipelineStageFlags* pWaitDstStageMask = {}; + uint32_t commandBufferCount = {}; + const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers = {}; + uint32_t signalSemaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pSignalSemaphores = {}; + + }; + static_assert( sizeof( SubmitInfo ) == sizeof( VkSubmitInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubmitInfo; + }; + + class Queue + { + public: + using CType = VkQueue; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eQueue; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eQueue; + + public: + VULKAN_HPP_CONSTEXPR Queue() VULKAN_HPP_NOEXCEPT + : m_queue(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Queue( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_queue(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Queue( VkQueue queue ) VULKAN_HPP_NOEXCEPT + : m_queue( queue ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Queue & operator=(VkQueue queue) VULKAN_HPP_NOEXCEPT + { + m_queue = queue; + return *this; + } +#endif + + Queue & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_queue = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Queue const& ) const = default; +#else + bool operator==( Queue const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queue == rhs.m_queue; + } + + bool operator!=(Queue const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queue != rhs.m_queue; + } + + bool operator<(Queue const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_queue < rhs.m_queue; + } +#endif + + + template + void getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getCheckpointDataNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CheckpointDataNVAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getCheckpointDataNV( CheckpointDataNVAllocator & checkpointDataNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindSparse( ArrayProxy const & bindInfo, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void endDebugUtilsLabelEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result presentKHR( const PresentInfoKHR & presentInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type submit( ArrayProxy const & submits, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkQueue() const VULKAN_HPP_NOEXCEPT + { + return m_queue; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_queue != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_queue == VK_NULL_HANDLE; + } + + private: + VkQueue m_queue; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Queue ) == sizeof( VkQueue ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Queue; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Queue; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Queue; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DeviceQueueInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceQueueInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceQueueInfo2(VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags_ = {}, uint32_t queueFamilyIndex_ = {}, uint32_t queueIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), queueFamilyIndex( queueFamilyIndex_ ), queueIndex( queueIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceQueueInfo2( DeviceQueueInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceQueueInfo2( VkDeviceQueueInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceQueueInfo2 & operator=( VkDeviceQueueInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceQueueInfo2 & operator=( DeviceQueueInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceQueueInfo2 ) ); + return *this; + } + + DeviceQueueInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceQueueInfo2 & setFlags( VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DeviceQueueInfo2 & setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + DeviceQueueInfo2 & setQueueIndex( uint32_t queueIndex_ ) VULKAN_HPP_NOEXCEPT + { + queueIndex = queueIndex_; + return *this; + } + + + operator VkDeviceQueueInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceQueueInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceQueueInfo2 const& ) const = default; +#else + bool operator==( DeviceQueueInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ) + && ( queueIndex == rhs.queueIndex ); + } + + bool operator!=( DeviceQueueInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceQueueInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceQueueCreateFlags flags = {}; + uint32_t queueFamilyIndex = {}; + uint32_t queueIndex = {}; + + }; + static_assert( sizeof( DeviceQueueInfo2 ) == sizeof( VkDeviceQueueInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceQueueInfo2; + }; + + struct FenceGetFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFenceGetFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FenceGetFdInfoKHR(VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : fence( fence_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR FenceGetFdInfoKHR( FenceGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FenceGetFdInfoKHR( VkFenceGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FenceGetFdInfoKHR & operator=( VkFenceGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FenceGetFdInfoKHR & operator=( FenceGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FenceGetFdInfoKHR ) ); + return *this; + } + + FenceGetFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FenceGetFdInfoKHR & setFence( VULKAN_HPP_NAMESPACE::Fence fence_ ) VULKAN_HPP_NOEXCEPT + { + fence = fence_; + return *this; + } + + FenceGetFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkFenceGetFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFenceGetFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FenceGetFdInfoKHR const& ) const = default; +#else + bool operator==( FenceGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( FenceGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFenceGetFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Fence fence = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( FenceGetFdInfoKHR ) == sizeof( VkFenceGetFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FenceGetFdInfoKHR; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct FenceGetWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFenceGetWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FenceGetWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : fence( fence_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR FenceGetWin32HandleInfoKHR( FenceGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FenceGetWin32HandleInfoKHR( VkFenceGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FenceGetWin32HandleInfoKHR & operator=( VkFenceGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FenceGetWin32HandleInfoKHR & operator=( FenceGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FenceGetWin32HandleInfoKHR ) ); + return *this; + } + + FenceGetWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FenceGetWin32HandleInfoKHR & setFence( VULKAN_HPP_NAMESPACE::Fence fence_ ) VULKAN_HPP_NOEXCEPT + { + fence = fence_; + return *this; + } + + FenceGetWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkFenceGetWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFenceGetWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FenceGetWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( FenceGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( FenceGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFenceGetWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Fence fence = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( FenceGetWin32HandleInfoKHR ) == sizeof( VkFenceGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FenceGetWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct GeneratedCommandsMemoryRequirementsInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGeneratedCommandsMemoryRequirementsInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GeneratedCommandsMemoryRequirementsInfoNV(VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics, VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_ = {}, uint32_t maxSequencesCount_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineBindPoint( pipelineBindPoint_ ), pipeline( pipeline_ ), indirectCommandsLayout( indirectCommandsLayout_ ), maxSequencesCount( maxSequencesCount_ ) + {} + + VULKAN_HPP_CONSTEXPR GeneratedCommandsMemoryRequirementsInfoNV( GeneratedCommandsMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GeneratedCommandsMemoryRequirementsInfoNV( VkGeneratedCommandsMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GeneratedCommandsMemoryRequirementsInfoNV & operator=( VkGeneratedCommandsMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & operator=( GeneratedCommandsMemoryRequirementsInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GeneratedCommandsMemoryRequirementsInfoNV ) ); + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & setPipelineBindPoint( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint_ ) VULKAN_HPP_NOEXCEPT + { + pipelineBindPoint = pipelineBindPoint_; + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & setIndirectCommandsLayout( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout_ ) VULKAN_HPP_NOEXCEPT + { + indirectCommandsLayout = indirectCommandsLayout_; + return *this; + } + + GeneratedCommandsMemoryRequirementsInfoNV & setMaxSequencesCount( uint32_t maxSequencesCount_ ) VULKAN_HPP_NOEXCEPT + { + maxSequencesCount = maxSequencesCount_; + return *this; + } + + + operator VkGeneratedCommandsMemoryRequirementsInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGeneratedCommandsMemoryRequirementsInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GeneratedCommandsMemoryRequirementsInfoNV const& ) const = default; +#else + bool operator==( GeneratedCommandsMemoryRequirementsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineBindPoint == rhs.pipelineBindPoint ) + && ( pipeline == rhs.pipeline ) + && ( indirectCommandsLayout == rhs.indirectCommandsLayout ) + && ( maxSequencesCount == rhs.maxSequencesCount ); + } + + bool operator!=( GeneratedCommandsMemoryRequirementsInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGeneratedCommandsMemoryRequirementsInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint = VULKAN_HPP_NAMESPACE::PipelineBindPoint::eGraphics; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout = {}; + uint32_t maxSequencesCount = {}; + + }; + static_assert( sizeof( GeneratedCommandsMemoryRequirementsInfoNV ) == sizeof( VkGeneratedCommandsMemoryRequirementsInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GeneratedCommandsMemoryRequirementsInfoNV; + }; + + struct ImageDrmFormatModifierPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageDrmFormatModifierPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierPropertiesEXT(uint64_t drmFormatModifier_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifier( drmFormatModifier_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierPropertiesEXT( ImageDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageDrmFormatModifierPropertiesEXT( VkImageDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageDrmFormatModifierPropertiesEXT & operator=( VkImageDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageDrmFormatModifierPropertiesEXT & operator=( ImageDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageDrmFormatModifierPropertiesEXT ) ); + return *this; + } + + + operator VkImageDrmFormatModifierPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageDrmFormatModifierPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageDrmFormatModifierPropertiesEXT const& ) const = default; +#else + bool operator==( ImageDrmFormatModifierPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( drmFormatModifier == rhs.drmFormatModifier ); + } + + bool operator!=( ImageDrmFormatModifierPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageDrmFormatModifierPropertiesEXT; + void* pNext = {}; + uint64_t drmFormatModifier = {}; + + }; + static_assert( sizeof( ImageDrmFormatModifierPropertiesEXT ) == sizeof( VkImageDrmFormatModifierPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageDrmFormatModifierPropertiesEXT; + }; + + struct ImageMemoryRequirementsInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageMemoryRequirementsInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageMemoryRequirementsInfo2(VULKAN_HPP_NAMESPACE::Image image_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageMemoryRequirementsInfo2( ImageMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageMemoryRequirementsInfo2( VkImageMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageMemoryRequirementsInfo2 & operator=( VkImageMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageMemoryRequirementsInfo2 & operator=( ImageMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageMemoryRequirementsInfo2 ) ); + return *this; + } + + ImageMemoryRequirementsInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageMemoryRequirementsInfo2 & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + + operator VkImageMemoryRequirementsInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageMemoryRequirementsInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageMemoryRequirementsInfo2 const& ) const = default; +#else + bool operator==( ImageMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ); + } + + bool operator!=( ImageMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageMemoryRequirementsInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + + }; + static_assert( sizeof( ImageMemoryRequirementsInfo2 ) == sizeof( VkImageMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageMemoryRequirementsInfo2; + }; + using ImageMemoryRequirementsInfo2KHR = ImageMemoryRequirementsInfo2; + + struct SparseImageFormatProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageFormatProperties(VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}, VULKAN_HPP_NAMESPACE::Extent3D imageGranularity_ = {}, VULKAN_HPP_NAMESPACE::SparseImageFormatFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : aspectMask( aspectMask_ ), imageGranularity( imageGranularity_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageFormatProperties( SparseImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageFormatProperties( VkSparseImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageFormatProperties & operator=( VkSparseImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageFormatProperties & operator=( SparseImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageFormatProperties ) ); + return *this; + } + + + operator VkSparseImageFormatProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageFormatProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageFormatProperties const& ) const = default; +#else + bool operator==( SparseImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( aspectMask == rhs.aspectMask ) + && ( imageGranularity == rhs.imageGranularity ) + && ( flags == rhs.flags ); + } + + bool operator!=( SparseImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + VULKAN_HPP_NAMESPACE::Extent3D imageGranularity = {}; + VULKAN_HPP_NAMESPACE::SparseImageFormatFlags flags = {}; + + }; + static_assert( sizeof( SparseImageFormatProperties ) == sizeof( VkSparseImageFormatProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SparseImageMemoryRequirements + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageMemoryRequirements(VULKAN_HPP_NAMESPACE::SparseImageFormatProperties formatProperties_ = {}, uint32_t imageMipTailFirstLod_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailSize_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailOffset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailStride_ = {}) VULKAN_HPP_NOEXCEPT + : formatProperties( formatProperties_ ), imageMipTailFirstLod( imageMipTailFirstLod_ ), imageMipTailSize( imageMipTailSize_ ), imageMipTailOffset( imageMipTailOffset_ ), imageMipTailStride( imageMipTailStride_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageMemoryRequirements( SparseImageMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageMemoryRequirements( VkSparseImageMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageMemoryRequirements & operator=( VkSparseImageMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageMemoryRequirements & operator=( SparseImageMemoryRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageMemoryRequirements ) ); + return *this; + } + + + operator VkSparseImageMemoryRequirements const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageMemoryRequirements &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageMemoryRequirements const& ) const = default; +#else + bool operator==( SparseImageMemoryRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( formatProperties == rhs.formatProperties ) + && ( imageMipTailFirstLod == rhs.imageMipTailFirstLod ) + && ( imageMipTailSize == rhs.imageMipTailSize ) + && ( imageMipTailOffset == rhs.imageMipTailOffset ) + && ( imageMipTailStride == rhs.imageMipTailStride ); + } + + bool operator!=( SparseImageMemoryRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::SparseImageFormatProperties formatProperties = {}; + uint32_t imageMipTailFirstLod = {}; + VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailSize = {}; + VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailOffset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize imageMipTailStride = {}; + + }; + static_assert( sizeof( SparseImageMemoryRequirements ) == sizeof( VkSparseImageMemoryRequirements ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageSparseMemoryRequirementsInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageSparseMemoryRequirementsInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageSparseMemoryRequirementsInfo2(VULKAN_HPP_NAMESPACE::Image image_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageSparseMemoryRequirementsInfo2( ImageSparseMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageSparseMemoryRequirementsInfo2( VkImageSparseMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageSparseMemoryRequirementsInfo2 & operator=( VkImageSparseMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageSparseMemoryRequirementsInfo2 & operator=( ImageSparseMemoryRequirementsInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageSparseMemoryRequirementsInfo2 ) ); + return *this; + } + + ImageSparseMemoryRequirementsInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageSparseMemoryRequirementsInfo2 & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + + operator VkImageSparseMemoryRequirementsInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageSparseMemoryRequirementsInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageSparseMemoryRequirementsInfo2 const& ) const = default; +#else + bool operator==( ImageSparseMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ); + } + + bool operator!=( ImageSparseMemoryRequirementsInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageSparseMemoryRequirementsInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + + }; + static_assert( sizeof( ImageSparseMemoryRequirementsInfo2 ) == sizeof( VkImageSparseMemoryRequirementsInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageSparseMemoryRequirementsInfo2; + }; + using ImageSparseMemoryRequirementsInfo2KHR = ImageSparseMemoryRequirementsInfo2; + + struct SparseImageMemoryRequirements2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSparseImageMemoryRequirements2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageMemoryRequirements2(VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements memoryRequirements_ = {}) VULKAN_HPP_NOEXCEPT + : memoryRequirements( memoryRequirements_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageMemoryRequirements2( SparseImageMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageMemoryRequirements2( VkSparseImageMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageMemoryRequirements2 & operator=( VkSparseImageMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageMemoryRequirements2 & operator=( SparseImageMemoryRequirements2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageMemoryRequirements2 ) ); + return *this; + } + + + operator VkSparseImageMemoryRequirements2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageMemoryRequirements2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageMemoryRequirements2 const& ) const = default; +#else + bool operator==( SparseImageMemoryRequirements2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryRequirements == rhs.memoryRequirements ); + } + + bool operator!=( SparseImageMemoryRequirements2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSparseImageMemoryRequirements2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements memoryRequirements = {}; + + }; + static_assert( sizeof( SparseImageMemoryRequirements2 ) == sizeof( VkSparseImageMemoryRequirements2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SparseImageMemoryRequirements2; + }; + using SparseImageMemoryRequirements2KHR = SparseImageMemoryRequirements2; + + struct SubresourceLayout + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubresourceLayout(VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize rowPitch_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize arrayPitch_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize depthPitch_ = {}) VULKAN_HPP_NOEXCEPT + : offset( offset_ ), size( size_ ), rowPitch( rowPitch_ ), arrayPitch( arrayPitch_ ), depthPitch( depthPitch_ ) + {} + + VULKAN_HPP_CONSTEXPR SubresourceLayout( SubresourceLayout const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubresourceLayout( VkSubresourceLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubresourceLayout & operator=( VkSubresourceLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubresourceLayout & operator=( SubresourceLayout const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubresourceLayout ) ); + return *this; + } + + + operator VkSubresourceLayout const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubresourceLayout &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubresourceLayout const& ) const = default; +#else + bool operator==( SubresourceLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( offset == rhs.offset ) + && ( size == rhs.size ) + && ( rowPitch == rhs.rowPitch ) + && ( arrayPitch == rhs.arrayPitch ) + && ( depthPitch == rhs.depthPitch ); + } + + bool operator!=( SubresourceLayout const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::DeviceSize rowPitch = {}; + VULKAN_HPP_NAMESPACE::DeviceSize arrayPitch = {}; + VULKAN_HPP_NAMESPACE::DeviceSize depthPitch = {}; + + }; + static_assert( sizeof( SubresourceLayout ) == sizeof( VkSubresourceLayout ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ImageViewAddressPropertiesNVX + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageViewAddressPropertiesNVX; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageViewAddressPropertiesNVX(VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}) VULKAN_HPP_NOEXCEPT + : deviceAddress( deviceAddress_ ), size( size_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageViewAddressPropertiesNVX( ImageViewAddressPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageViewAddressPropertiesNVX( VkImageViewAddressPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageViewAddressPropertiesNVX & operator=( VkImageViewAddressPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageViewAddressPropertiesNVX & operator=( ImageViewAddressPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageViewAddressPropertiesNVX ) ); + return *this; + } + + + operator VkImageViewAddressPropertiesNVX const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageViewAddressPropertiesNVX &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageViewAddressPropertiesNVX const& ) const = default; +#else + bool operator==( ImageViewAddressPropertiesNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceAddress == rhs.deviceAddress ) + && ( size == rhs.size ); + } + + bool operator!=( ImageViewAddressPropertiesNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageViewAddressPropertiesNVX; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceAddress deviceAddress = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + + }; + static_assert( sizeof( ImageViewAddressPropertiesNVX ) == sizeof( VkImageViewAddressPropertiesNVX ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageViewAddressPropertiesNVX; + }; + + struct ImageViewHandleInfoNVX + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageViewHandleInfoNVX; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageViewHandleInfoNVX(VULKAN_HPP_NAMESPACE::ImageView imageView_ = {}, VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler, VULKAN_HPP_NAMESPACE::Sampler sampler_ = {}) VULKAN_HPP_NOEXCEPT + : imageView( imageView_ ), descriptorType( descriptorType_ ), sampler( sampler_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageViewHandleInfoNVX( ImageViewHandleInfoNVX const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageViewHandleInfoNVX( VkImageViewHandleInfoNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageViewHandleInfoNVX & operator=( VkImageViewHandleInfoNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageViewHandleInfoNVX & operator=( ImageViewHandleInfoNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageViewHandleInfoNVX ) ); + return *this; + } + + ImageViewHandleInfoNVX & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageViewHandleInfoNVX & setImageView( VULKAN_HPP_NAMESPACE::ImageView imageView_ ) VULKAN_HPP_NOEXCEPT + { + imageView = imageView_; + return *this; + } + + ImageViewHandleInfoNVX & setDescriptorType( VULKAN_HPP_NAMESPACE::DescriptorType descriptorType_ ) VULKAN_HPP_NOEXCEPT + { + descriptorType = descriptorType_; + return *this; + } + + ImageViewHandleInfoNVX & setSampler( VULKAN_HPP_NAMESPACE::Sampler sampler_ ) VULKAN_HPP_NOEXCEPT + { + sampler = sampler_; + return *this; + } + + + operator VkImageViewHandleInfoNVX const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageViewHandleInfoNVX &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageViewHandleInfoNVX const& ) const = default; +#else + bool operator==( ImageViewHandleInfoNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageView == rhs.imageView ) + && ( descriptorType == rhs.descriptorType ) + && ( sampler == rhs.sampler ); + } + + bool operator!=( ImageViewHandleInfoNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageViewHandleInfoNVX; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageView imageView = {}; + VULKAN_HPP_NAMESPACE::DescriptorType descriptorType = VULKAN_HPP_NAMESPACE::DescriptorType::eSampler; + VULKAN_HPP_NAMESPACE::Sampler sampler = {}; + + }; + static_assert( sizeof( ImageViewHandleInfoNVX ) == sizeof( VkImageViewHandleInfoNVX ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageViewHandleInfoNVX; + }; + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct MemoryGetAndroidHardwareBufferInfoANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryGetAndroidHardwareBufferInfoANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryGetAndroidHardwareBufferInfoANDROID(VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}) VULKAN_HPP_NOEXCEPT + : memory( memory_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryGetAndroidHardwareBufferInfoANDROID( MemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryGetAndroidHardwareBufferInfoANDROID( VkMemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryGetAndroidHardwareBufferInfoANDROID & operator=( VkMemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryGetAndroidHardwareBufferInfoANDROID & operator=( MemoryGetAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryGetAndroidHardwareBufferInfoANDROID ) ); + return *this; + } + + MemoryGetAndroidHardwareBufferInfoANDROID & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryGetAndroidHardwareBufferInfoANDROID & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + + operator VkMemoryGetAndroidHardwareBufferInfoANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryGetAndroidHardwareBufferInfoANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryGetAndroidHardwareBufferInfoANDROID const& ) const = default; +#else + bool operator==( MemoryGetAndroidHardwareBufferInfoANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ); + } + + bool operator!=( MemoryGetAndroidHardwareBufferInfoANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryGetAndroidHardwareBufferInfoANDROID; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + + }; + static_assert( sizeof( MemoryGetAndroidHardwareBufferInfoANDROID ) == sizeof( VkMemoryGetAndroidHardwareBufferInfoANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryGetAndroidHardwareBufferInfoANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + struct MemoryGetFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryGetFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryGetFdInfoKHR(VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : memory( memory_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryGetFdInfoKHR( MemoryGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryGetFdInfoKHR( VkMemoryGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryGetFdInfoKHR & operator=( VkMemoryGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryGetFdInfoKHR & operator=( MemoryGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryGetFdInfoKHR ) ); + return *this; + } + + MemoryGetFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryGetFdInfoKHR & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + MemoryGetFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkMemoryGetFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryGetFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryGetFdInfoKHR const& ) const = default; +#else + bool operator==( MemoryGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( MemoryGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryGetFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( MemoryGetFdInfoKHR ) == sizeof( VkMemoryGetFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryGetFdInfoKHR; + }; + + struct MemoryFdPropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryFdPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryFdPropertiesKHR(uint32_t memoryTypeBits_ = {}) VULKAN_HPP_NOEXCEPT + : memoryTypeBits( memoryTypeBits_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryFdPropertiesKHR( MemoryFdPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryFdPropertiesKHR( VkMemoryFdPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryFdPropertiesKHR & operator=( VkMemoryFdPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryFdPropertiesKHR & operator=( MemoryFdPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryFdPropertiesKHR ) ); + return *this; + } + + + operator VkMemoryFdPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryFdPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryFdPropertiesKHR const& ) const = default; +#else + bool operator==( MemoryFdPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryFdPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryFdPropertiesKHR; + void* pNext = {}; + uint32_t memoryTypeBits = {}; + + }; + static_assert( sizeof( MemoryFdPropertiesKHR ) == sizeof( VkMemoryFdPropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryFdPropertiesKHR; + }; + + struct MemoryHostPointerPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryHostPointerPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryHostPointerPropertiesEXT(uint32_t memoryTypeBits_ = {}) VULKAN_HPP_NOEXCEPT + : memoryTypeBits( memoryTypeBits_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryHostPointerPropertiesEXT( MemoryHostPointerPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryHostPointerPropertiesEXT( VkMemoryHostPointerPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryHostPointerPropertiesEXT & operator=( VkMemoryHostPointerPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryHostPointerPropertiesEXT & operator=( MemoryHostPointerPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryHostPointerPropertiesEXT ) ); + return *this; + } + + + operator VkMemoryHostPointerPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryHostPointerPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryHostPointerPropertiesEXT const& ) const = default; +#else + bool operator==( MemoryHostPointerPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryHostPointerPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryHostPointerPropertiesEXT; + void* pNext = {}; + uint32_t memoryTypeBits = {}; + + }; + static_assert( sizeof( MemoryHostPointerPropertiesEXT ) == sizeof( VkMemoryHostPointerPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryHostPointerPropertiesEXT; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct MemoryGetWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryGetWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryGetWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::DeviceMemory memory_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : memory( memory_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryGetWin32HandleInfoKHR( MemoryGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryGetWin32HandleInfoKHR( VkMemoryGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryGetWin32HandleInfoKHR & operator=( VkMemoryGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryGetWin32HandleInfoKHR & operator=( MemoryGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryGetWin32HandleInfoKHR ) ); + return *this; + } + + MemoryGetWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryGetWin32HandleInfoKHR & setMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory_ ) VULKAN_HPP_NOEXCEPT + { + memory = memory_; + return *this; + } + + MemoryGetWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkMemoryGetWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryGetWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryGetWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( MemoryGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memory == rhs.memory ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( MemoryGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryGetWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory memory = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( MemoryGetWin32HandleInfoKHR ) == sizeof( VkMemoryGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryGetWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct MemoryWin32HandlePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryWin32HandlePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryWin32HandlePropertiesKHR(uint32_t memoryTypeBits_ = {}) VULKAN_HPP_NOEXCEPT + : memoryTypeBits( memoryTypeBits_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryWin32HandlePropertiesKHR( MemoryWin32HandlePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryWin32HandlePropertiesKHR( VkMemoryWin32HandlePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryWin32HandlePropertiesKHR & operator=( VkMemoryWin32HandlePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryWin32HandlePropertiesKHR & operator=( MemoryWin32HandlePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryWin32HandlePropertiesKHR ) ); + return *this; + } + + + operator VkMemoryWin32HandlePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryWin32HandlePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryWin32HandlePropertiesKHR const& ) const = default; +#else + bool operator==( MemoryWin32HandlePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryTypeBits == rhs.memoryTypeBits ); + } + + bool operator!=( MemoryWin32HandlePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryWin32HandlePropertiesKHR; + void* pNext = {}; + uint32_t memoryTypeBits = {}; + + }; + static_assert( sizeof( MemoryWin32HandlePropertiesKHR ) == sizeof( VkMemoryWin32HandlePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryWin32HandlePropertiesKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct PastPresentationTimingGOOGLE + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PastPresentationTimingGOOGLE(uint32_t presentID_ = {}, uint64_t desiredPresentTime_ = {}, uint64_t actualPresentTime_ = {}, uint64_t earliestPresentTime_ = {}, uint64_t presentMargin_ = {}) VULKAN_HPP_NOEXCEPT + : presentID( presentID_ ), desiredPresentTime( desiredPresentTime_ ), actualPresentTime( actualPresentTime_ ), earliestPresentTime( earliestPresentTime_ ), presentMargin( presentMargin_ ) + {} + + VULKAN_HPP_CONSTEXPR PastPresentationTimingGOOGLE( PastPresentationTimingGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PastPresentationTimingGOOGLE( VkPastPresentationTimingGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PastPresentationTimingGOOGLE & operator=( VkPastPresentationTimingGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PastPresentationTimingGOOGLE & operator=( PastPresentationTimingGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PastPresentationTimingGOOGLE ) ); + return *this; + } + + + operator VkPastPresentationTimingGOOGLE const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPastPresentationTimingGOOGLE &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PastPresentationTimingGOOGLE const& ) const = default; +#else + bool operator==( PastPresentationTimingGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( presentID == rhs.presentID ) + && ( desiredPresentTime == rhs.desiredPresentTime ) + && ( actualPresentTime == rhs.actualPresentTime ) + && ( earliestPresentTime == rhs.earliestPresentTime ) + && ( presentMargin == rhs.presentMargin ); + } + + bool operator!=( PastPresentationTimingGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t presentID = {}; + uint64_t desiredPresentTime = {}; + uint64_t actualPresentTime = {}; + uint64_t earliestPresentTime = {}; + uint64_t presentMargin = {}; + + }; + static_assert( sizeof( PastPresentationTimingGOOGLE ) == sizeof( VkPastPresentationTimingGOOGLE ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + union PerformanceValueDataINTEL + { + PerformanceValueDataINTEL( VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL ) ); + } + + PerformanceValueDataINTEL( uint32_t value32_ = {} ) + : value32( value32_ ) + {} + + PerformanceValueDataINTEL( uint64_t value64_ ) + : value64( value64_ ) + {} + + PerformanceValueDataINTEL( float valueFloat_ ) + : valueFloat( valueFloat_ ) + {} + + PerformanceValueDataINTEL( const char* valueString_ ) + : valueString( valueString_ ) + {} + + PerformanceValueDataINTEL & setValue32( uint32_t value32_ ) VULKAN_HPP_NOEXCEPT + { + value32 = value32_; + return *this; + } + + PerformanceValueDataINTEL & setValue64( uint64_t value64_ ) VULKAN_HPP_NOEXCEPT + { + value64 = value64_; + return *this; + } + + PerformanceValueDataINTEL & setValueFloat( float valueFloat_ ) VULKAN_HPP_NOEXCEPT + { + valueFloat = valueFloat_; + return *this; + } + + PerformanceValueDataINTEL & setValueBool( VULKAN_HPP_NAMESPACE::Bool32 valueBool_ ) VULKAN_HPP_NOEXCEPT + { + valueBool = valueBool_; + return *this; + } + + PerformanceValueDataINTEL & setValueString( const char* valueString_ ) VULKAN_HPP_NOEXCEPT + { + valueString = valueString_; + return *this; + } + + VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL & operator=( VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL ) ); + return *this; + } + + operator VkPerformanceValueDataINTEL const&() const + { + return *reinterpret_cast(this); + } + + operator VkPerformanceValueDataINTEL &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + uint32_t value32; + uint64_t value64; + float valueFloat; + VULKAN_HPP_NAMESPACE::Bool32 valueBool; + const char* valueString; +#else + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct PerformanceValueINTEL + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + PerformanceValueINTEL(VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL type_ = VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL::eUint32, VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL data_ = {}) VULKAN_HPP_NOEXCEPT + : type( type_ ), data( data_ ) + {} + + PerformanceValueINTEL( PerformanceValueINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceValueINTEL( VkPerformanceValueINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceValueINTEL & operator=( VkPerformanceValueINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceValueINTEL & operator=( PerformanceValueINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceValueINTEL ) ); + return *this; + } + + PerformanceValueINTEL & setType( VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + PerformanceValueINTEL & setData( VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL const & data_ ) VULKAN_HPP_NOEXCEPT + { + data = data_; + return *this; + } + + + operator VkPerformanceValueINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceValueINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL type = VULKAN_HPP_NAMESPACE::PerformanceValueTypeINTEL::eUint32; + VULKAN_HPP_NAMESPACE::PerformanceValueDataINTEL data = {}; + + }; + static_assert( sizeof( PerformanceValueINTEL ) == sizeof( VkPerformanceValueINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineExecutableInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineExecutableInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineExecutableInfoKHR(VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}, uint32_t executableIndex_ = {}) VULKAN_HPP_NOEXCEPT + : pipeline( pipeline_ ), executableIndex( executableIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineExecutableInfoKHR( PipelineExecutableInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineExecutableInfoKHR( VkPipelineExecutableInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineExecutableInfoKHR & operator=( VkPipelineExecutableInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineExecutableInfoKHR & operator=( PipelineExecutableInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineExecutableInfoKHR ) ); + return *this; + } + + PipelineExecutableInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineExecutableInfoKHR & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } + + PipelineExecutableInfoKHR & setExecutableIndex( uint32_t executableIndex_ ) VULKAN_HPP_NOEXCEPT + { + executableIndex = executableIndex_; + return *this; + } + + + operator VkPipelineExecutableInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineExecutableInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineExecutableInfoKHR const& ) const = default; +#else + bool operator==( PipelineExecutableInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipeline == rhs.pipeline ) + && ( executableIndex == rhs.executableIndex ); + } + + bool operator!=( PipelineExecutableInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineExecutableInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + uint32_t executableIndex = {}; + + }; + static_assert( sizeof( PipelineExecutableInfoKHR ) == sizeof( VkPipelineExecutableInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineExecutableInfoKHR; + }; + + struct PipelineExecutableInternalRepresentationKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineExecutableInternalRepresentationKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineExecutableInternalRepresentationKHR(std::array const& name_ = {}, std::array const& description_ = {}, VULKAN_HPP_NAMESPACE::Bool32 isText_ = {}, size_t dataSize_ = {}, void* pData_ = {}) VULKAN_HPP_NOEXCEPT + : name( name_ ), description( description_ ), isText( isText_ ), dataSize( dataSize_ ), pData( pData_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineExecutableInternalRepresentationKHR( PipelineExecutableInternalRepresentationKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineExecutableInternalRepresentationKHR( VkPipelineExecutableInternalRepresentationKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + PipelineExecutableInternalRepresentationKHR( std::array const& name_, std::array const& description_, VULKAN_HPP_NAMESPACE::Bool32 isText_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) + : name( name_ ), description( description_ ), isText( isText_ ), dataSize( data_.size() * sizeof(T) ), pData( data_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineExecutableInternalRepresentationKHR & operator=( VkPipelineExecutableInternalRepresentationKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineExecutableInternalRepresentationKHR & operator=( PipelineExecutableInternalRepresentationKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineExecutableInternalRepresentationKHR ) ); + return *this; + } + + + operator VkPipelineExecutableInternalRepresentationKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineExecutableInternalRepresentationKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineExecutableInternalRepresentationKHR const& ) const = default; +#else + bool operator==( PipelineExecutableInternalRepresentationKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( name == rhs.name ) + && ( description == rhs.description ) + && ( isText == rhs.isText ) + && ( dataSize == rhs.dataSize ) + && ( pData == rhs.pData ); + } + + bool operator!=( PipelineExecutableInternalRepresentationKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineExecutableInternalRepresentationKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D name = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + VULKAN_HPP_NAMESPACE::Bool32 isText = {}; + size_t dataSize = {}; + void* pData = {}; + + }; + static_assert( sizeof( PipelineExecutableInternalRepresentationKHR ) == sizeof( VkPipelineExecutableInternalRepresentationKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineExecutableInternalRepresentationKHR; + }; + + struct PipelineInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineInfoKHR(VULKAN_HPP_NAMESPACE::Pipeline pipeline_ = {}) VULKAN_HPP_NOEXCEPT + : pipeline( pipeline_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineInfoKHR( PipelineInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineInfoKHR( VkPipelineInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineInfoKHR & operator=( VkPipelineInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineInfoKHR & operator=( PipelineInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineInfoKHR ) ); + return *this; + } + + PipelineInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineInfoKHR & setPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline_ ) VULKAN_HPP_NOEXCEPT + { + pipeline = pipeline_; + return *this; + } + + + operator VkPipelineInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineInfoKHR const& ) const = default; +#else + bool operator==( PipelineInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipeline == rhs.pipeline ); + } + + bool operator!=( PipelineInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Pipeline pipeline = {}; + + }; + static_assert( sizeof( PipelineInfoKHR ) == sizeof( VkPipelineInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineInfoKHR; + }; + + struct PipelineExecutablePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineExecutablePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineExecutablePropertiesKHR(VULKAN_HPP_NAMESPACE::ShaderStageFlags stages_ = {}, std::array const& name_ = {}, std::array const& description_ = {}, uint32_t subgroupSize_ = {}) VULKAN_HPP_NOEXCEPT + : stages( stages_ ), name( name_ ), description( description_ ), subgroupSize( subgroupSize_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineExecutablePropertiesKHR( PipelineExecutablePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineExecutablePropertiesKHR( VkPipelineExecutablePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineExecutablePropertiesKHR & operator=( VkPipelineExecutablePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineExecutablePropertiesKHR & operator=( PipelineExecutablePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineExecutablePropertiesKHR ) ); + return *this; + } + + + operator VkPipelineExecutablePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineExecutablePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineExecutablePropertiesKHR const& ) const = default; +#else + bool operator==( PipelineExecutablePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stages == rhs.stages ) + && ( name == rhs.name ) + && ( description == rhs.description ) + && ( subgroupSize == rhs.subgroupSize ); + } + + bool operator!=( PipelineExecutablePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineExecutablePropertiesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags stages = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D name = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + uint32_t subgroupSize = {}; + + }; + static_assert( sizeof( PipelineExecutablePropertiesKHR ) == sizeof( VkPipelineExecutablePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineExecutablePropertiesKHR; + }; + + union PipelineExecutableStatisticValueKHR + { + PipelineExecutableStatisticValueKHR( VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR ) ); + } + + PipelineExecutableStatisticValueKHR( VULKAN_HPP_NAMESPACE::Bool32 b32_ = {} ) + : b32( b32_ ) + {} + + PipelineExecutableStatisticValueKHR( int64_t i64_ ) + : i64( i64_ ) + {} + + PipelineExecutableStatisticValueKHR( uint64_t u64_ ) + : u64( u64_ ) + {} + + PipelineExecutableStatisticValueKHR( double f64_ ) + : f64( f64_ ) + {} + + PipelineExecutableStatisticValueKHR & setB32( VULKAN_HPP_NAMESPACE::Bool32 b32_ ) VULKAN_HPP_NOEXCEPT + { + b32 = b32_; + return *this; + } + + PipelineExecutableStatisticValueKHR & setI64( int64_t i64_ ) VULKAN_HPP_NOEXCEPT + { + i64 = i64_; + return *this; + } + + PipelineExecutableStatisticValueKHR & setU64( uint64_t u64_ ) VULKAN_HPP_NOEXCEPT + { + u64 = u64_; + return *this; + } + + PipelineExecutableStatisticValueKHR & setF64( double f64_ ) VULKAN_HPP_NOEXCEPT + { + f64 = f64_; + return *this; + } + + VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR & operator=( VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR ) ); + return *this; + } + + operator VkPipelineExecutableStatisticValueKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPipelineExecutableStatisticValueKHR &() + { + return *reinterpret_cast(this); + } + +#ifdef VULKAN_HPP_HAS_UNRESTRICTED_UNIONS + VULKAN_HPP_NAMESPACE::Bool32 b32; + int64_t i64; + uint64_t u64; + double f64; +#else + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +#endif /*VULKAN_HPP_HAS_UNRESTRICTED_UNIONS*/ + }; + + struct PipelineExecutableStatisticKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineExecutableStatisticKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + PipelineExecutableStatisticKHR(std::array const& name_ = {}, std::array const& description_ = {}, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR format_ = VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR::eBool32, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR value_ = {}) VULKAN_HPP_NOEXCEPT + : name( name_ ), description( description_ ), format( format_ ), value( value_ ) + {} + + PipelineExecutableStatisticKHR( PipelineExecutableStatisticKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineExecutableStatisticKHR( VkPipelineExecutableStatisticKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineExecutableStatisticKHR & operator=( VkPipelineExecutableStatisticKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineExecutableStatisticKHR & operator=( PipelineExecutableStatisticKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineExecutableStatisticKHR ) ); + return *this; + } + + + operator VkPipelineExecutableStatisticKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineExecutableStatisticKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineExecutableStatisticKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D name = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR format = VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticFormatKHR::eBool32; + VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticValueKHR value = {}; + + }; + static_assert( sizeof( PipelineExecutableStatisticKHR ) == sizeof( VkPipelineExecutableStatisticKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineExecutableStatisticKHR; + }; + + struct RefreshCycleDurationGOOGLE + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RefreshCycleDurationGOOGLE(uint64_t refreshDuration_ = {}) VULKAN_HPP_NOEXCEPT + : refreshDuration( refreshDuration_ ) + {} + + VULKAN_HPP_CONSTEXPR RefreshCycleDurationGOOGLE( RefreshCycleDurationGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RefreshCycleDurationGOOGLE( VkRefreshCycleDurationGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RefreshCycleDurationGOOGLE & operator=( VkRefreshCycleDurationGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RefreshCycleDurationGOOGLE & operator=( RefreshCycleDurationGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RefreshCycleDurationGOOGLE ) ); + return *this; + } + + + operator VkRefreshCycleDurationGOOGLE const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRefreshCycleDurationGOOGLE &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RefreshCycleDurationGOOGLE const& ) const = default; +#else + bool operator==( RefreshCycleDurationGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( refreshDuration == rhs.refreshDuration ); + } + + bool operator!=( RefreshCycleDurationGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint64_t refreshDuration = {}; + + }; + static_assert( sizeof( RefreshCycleDurationGOOGLE ) == sizeof( VkRefreshCycleDurationGOOGLE ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SemaphoreGetFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreGetFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreGetFdInfoKHR(VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : semaphore( semaphore_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreGetFdInfoKHR( SemaphoreGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreGetFdInfoKHR( VkSemaphoreGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreGetFdInfoKHR & operator=( VkSemaphoreGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreGetFdInfoKHR & operator=( SemaphoreGetFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreGetFdInfoKHR ) ); + return *this; + } + + SemaphoreGetFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreGetFdInfoKHR & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + SemaphoreGetFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkSemaphoreGetFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreGetFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreGetFdInfoKHR const& ) const = default; +#else + bool operator==( SemaphoreGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( SemaphoreGetFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreGetFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( SemaphoreGetFdInfoKHR ) == sizeof( VkSemaphoreGetFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreGetFdInfoKHR; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct SemaphoreGetWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreGetWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreGetWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : semaphore( semaphore_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreGetWin32HandleInfoKHR( SemaphoreGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreGetWin32HandleInfoKHR( VkSemaphoreGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreGetWin32HandleInfoKHR & operator=( VkSemaphoreGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreGetWin32HandleInfoKHR & operator=( SemaphoreGetWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreGetWin32HandleInfoKHR ) ); + return *this; + } + + SemaphoreGetWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreGetWin32HandleInfoKHR & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + SemaphoreGetWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkSemaphoreGetWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreGetWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreGetWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( SemaphoreGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( SemaphoreGetWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreGetWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( SemaphoreGetWin32HandleInfoKHR ) == sizeof( VkSemaphoreGetWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreGetWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ImportFenceFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportFenceFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportFenceFdInfoKHR(VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::FenceImportFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, int fd_ = {}) VULKAN_HPP_NOEXCEPT + : fence( fence_ ), flags( flags_ ), handleType( handleType_ ), fd( fd_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportFenceFdInfoKHR( ImportFenceFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportFenceFdInfoKHR( VkImportFenceFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportFenceFdInfoKHR & operator=( VkImportFenceFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportFenceFdInfoKHR & operator=( ImportFenceFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportFenceFdInfoKHR ) ); + return *this; + } + + ImportFenceFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportFenceFdInfoKHR & setFence( VULKAN_HPP_NAMESPACE::Fence fence_ ) VULKAN_HPP_NOEXCEPT + { + fence = fence_; + return *this; + } + + ImportFenceFdInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::FenceImportFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImportFenceFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportFenceFdInfoKHR & setFd( int fd_ ) VULKAN_HPP_NOEXCEPT + { + fd = fd_; + return *this; + } + + + operator VkImportFenceFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportFenceFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportFenceFdInfoKHR const& ) const = default; +#else + bool operator==( ImportFenceFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportFenceFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportFenceFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Fence fence = {}; + VULKAN_HPP_NAMESPACE::FenceImportFlags flags = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd; + int fd = {}; + + }; + static_assert( sizeof( ImportFenceFdInfoKHR ) == sizeof( VkImportFenceFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportFenceFdInfoKHR; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportFenceWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportFenceWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportFenceWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::Fence fence_ = {}, VULKAN_HPP_NAMESPACE::FenceImportFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd, HANDLE handle_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : fence( fence_ ), flags( flags_ ), handleType( handleType_ ), handle( handle_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportFenceWin32HandleInfoKHR( ImportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportFenceWin32HandleInfoKHR( VkImportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportFenceWin32HandleInfoKHR & operator=( VkImportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportFenceWin32HandleInfoKHR & operator=( ImportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportFenceWin32HandleInfoKHR ) ); + return *this; + } + + ImportFenceWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportFenceWin32HandleInfoKHR & setFence( VULKAN_HPP_NAMESPACE::Fence fence_ ) VULKAN_HPP_NOEXCEPT + { + fence = fence_; + return *this; + } + + ImportFenceWin32HandleInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::FenceImportFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImportFenceWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportFenceWin32HandleInfoKHR & setHandle( HANDLE handle_ ) VULKAN_HPP_NOEXCEPT + { + handle = handle_; + return *this; + } + + ImportFenceWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkImportFenceWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportFenceWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportFenceWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ImportFenceWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fence == rhs.fence ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportFenceWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportFenceWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Fence fence = {}; + VULKAN_HPP_NAMESPACE::FenceImportFlags flags = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd; + HANDLE handle = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ImportFenceWin32HandleInfoKHR ) == sizeof( VkImportFenceWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportFenceWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ImportSemaphoreFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportSemaphoreFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportSemaphoreFdInfoKHR(VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, int fd_ = {}) VULKAN_HPP_NOEXCEPT + : semaphore( semaphore_ ), flags( flags_ ), handleType( handleType_ ), fd( fd_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportSemaphoreFdInfoKHR( ImportSemaphoreFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportSemaphoreFdInfoKHR( VkImportSemaphoreFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportSemaphoreFdInfoKHR & operator=( VkImportSemaphoreFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportSemaphoreFdInfoKHR & operator=( ImportSemaphoreFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportSemaphoreFdInfoKHR ) ); + return *this; + } + + ImportSemaphoreFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportSemaphoreFdInfoKHR & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + ImportSemaphoreFdInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImportSemaphoreFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportSemaphoreFdInfoKHR & setFd( int fd_ ) VULKAN_HPP_NOEXCEPT + { + fd = fd_; + return *this; + } + + + operator VkImportSemaphoreFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportSemaphoreFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportSemaphoreFdInfoKHR const& ) const = default; +#else + bool operator==( ImportSemaphoreFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportSemaphoreFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportSemaphoreFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd; + int fd = {}; + + }; + static_assert( sizeof( ImportSemaphoreFdInfoKHR ) == sizeof( VkImportSemaphoreFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportSemaphoreFdInfoKHR; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportSemaphoreWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportSemaphoreWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportSemaphoreWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd, HANDLE handle_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : semaphore( semaphore_ ), flags( flags_ ), handleType( handleType_ ), handle( handle_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportSemaphoreWin32HandleInfoKHR( ImportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportSemaphoreWin32HandleInfoKHR( VkImportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportSemaphoreWin32HandleInfoKHR & operator=( VkImportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & operator=( ImportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportSemaphoreWin32HandleInfoKHR ) ); + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setHandle( HANDLE handle_ ) VULKAN_HPP_NOEXCEPT + { + handle = handle_; + return *this; + } + + ImportSemaphoreWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkImportSemaphoreWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportSemaphoreWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportSemaphoreWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ImportSemaphoreWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( flags == rhs.flags ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportSemaphoreWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportSemaphoreWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + VULKAN_HPP_NAMESPACE::SemaphoreImportFlags flags = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd; + HANDLE handle = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ImportSemaphoreWin32HandleInfoKHR ) == sizeof( VkImportSemaphoreWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportSemaphoreWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct InitializePerformanceApiInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eInitializePerformanceApiInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR InitializePerformanceApiInfoINTEL(void* pUserData_ = {}) VULKAN_HPP_NOEXCEPT + : pUserData( pUserData_ ) + {} + + VULKAN_HPP_CONSTEXPR InitializePerformanceApiInfoINTEL( InitializePerformanceApiInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + InitializePerformanceApiInfoINTEL( VkInitializePerformanceApiInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + InitializePerformanceApiInfoINTEL & operator=( VkInitializePerformanceApiInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + InitializePerformanceApiInfoINTEL & operator=( InitializePerformanceApiInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( InitializePerformanceApiInfoINTEL ) ); + return *this; + } + + InitializePerformanceApiInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + InitializePerformanceApiInfoINTEL & setPUserData( void* pUserData_ ) VULKAN_HPP_NOEXCEPT + { + pUserData = pUserData_; + return *this; + } + + + operator VkInitializePerformanceApiInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkInitializePerformanceApiInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( InitializePerformanceApiInfoINTEL const& ) const = default; +#else + bool operator==( InitializePerformanceApiInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pUserData == rhs.pUserData ); + } + + bool operator!=( InitializePerformanceApiInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eInitializePerformanceApiInfoINTEL; + const void* pNext = {}; + void* pUserData = {}; + + }; + static_assert( sizeof( InitializePerformanceApiInfoINTEL ) == sizeof( VkInitializePerformanceApiInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = InitializePerformanceApiInfoINTEL; + }; + + struct DisplayEventInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayEventInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayEventInfoEXT(VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT displayEvent_ = VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT::eFirstPixelOut) VULKAN_HPP_NOEXCEPT + : displayEvent( displayEvent_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayEventInfoEXT( DisplayEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayEventInfoEXT( VkDisplayEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayEventInfoEXT & operator=( VkDisplayEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayEventInfoEXT & operator=( DisplayEventInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayEventInfoEXT ) ); + return *this; + } + + DisplayEventInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplayEventInfoEXT & setDisplayEvent( VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT displayEvent_ ) VULKAN_HPP_NOEXCEPT + { + displayEvent = displayEvent_; + return *this; + } + + + operator VkDisplayEventInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayEventInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayEventInfoEXT const& ) const = default; +#else + bool operator==( DisplayEventInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayEvent == rhs.displayEvent ); + } + + bool operator!=( DisplayEventInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayEventInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT displayEvent = VULKAN_HPP_NAMESPACE::DisplayEventTypeEXT::eFirstPixelOut; + + }; + static_assert( sizeof( DisplayEventInfoEXT ) == sizeof( VkDisplayEventInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayEventInfoEXT; + }; + + struct XYColorEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR XYColorEXT(float x_ = {}, float y_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ) + {} + + VULKAN_HPP_CONSTEXPR XYColorEXT( XYColorEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + XYColorEXT( VkXYColorEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + XYColorEXT & operator=( VkXYColorEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + XYColorEXT & operator=( XYColorEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( XYColorEXT ) ); + return *this; + } + + XYColorEXT & setX( float x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + XYColorEXT & setY( float y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + + operator VkXYColorEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkXYColorEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( XYColorEXT const& ) const = default; +#else + bool operator==( XYColorEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ); + } + + bool operator!=( XYColorEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + float x = {}; + float y = {}; + + }; + static_assert( sizeof( XYColorEXT ) == sizeof( VkXYColorEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct HdrMetadataEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHdrMetadataEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR HdrMetadataEXT(VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryRed_ = {}, VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryGreen_ = {}, VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryBlue_ = {}, VULKAN_HPP_NAMESPACE::XYColorEXT whitePoint_ = {}, float maxLuminance_ = {}, float minLuminance_ = {}, float maxContentLightLevel_ = {}, float maxFrameAverageLightLevel_ = {}) VULKAN_HPP_NOEXCEPT + : displayPrimaryRed( displayPrimaryRed_ ), displayPrimaryGreen( displayPrimaryGreen_ ), displayPrimaryBlue( displayPrimaryBlue_ ), whitePoint( whitePoint_ ), maxLuminance( maxLuminance_ ), minLuminance( minLuminance_ ), maxContentLightLevel( maxContentLightLevel_ ), maxFrameAverageLightLevel( maxFrameAverageLightLevel_ ) + {} + + VULKAN_HPP_CONSTEXPR HdrMetadataEXT( HdrMetadataEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + HdrMetadataEXT( VkHdrMetadataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + HdrMetadataEXT & operator=( VkHdrMetadataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + HdrMetadataEXT & operator=( HdrMetadataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( HdrMetadataEXT ) ); + return *this; + } + + HdrMetadataEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + HdrMetadataEXT & setDisplayPrimaryRed( VULKAN_HPP_NAMESPACE::XYColorEXT const & displayPrimaryRed_ ) VULKAN_HPP_NOEXCEPT + { + displayPrimaryRed = displayPrimaryRed_; + return *this; + } + + HdrMetadataEXT & setDisplayPrimaryGreen( VULKAN_HPP_NAMESPACE::XYColorEXT const & displayPrimaryGreen_ ) VULKAN_HPP_NOEXCEPT + { + displayPrimaryGreen = displayPrimaryGreen_; + return *this; + } + + HdrMetadataEXT & setDisplayPrimaryBlue( VULKAN_HPP_NAMESPACE::XYColorEXT const & displayPrimaryBlue_ ) VULKAN_HPP_NOEXCEPT + { + displayPrimaryBlue = displayPrimaryBlue_; + return *this; + } + + HdrMetadataEXT & setWhitePoint( VULKAN_HPP_NAMESPACE::XYColorEXT const & whitePoint_ ) VULKAN_HPP_NOEXCEPT + { + whitePoint = whitePoint_; + return *this; + } + + HdrMetadataEXT & setMaxLuminance( float maxLuminance_ ) VULKAN_HPP_NOEXCEPT + { + maxLuminance = maxLuminance_; + return *this; + } + + HdrMetadataEXT & setMinLuminance( float minLuminance_ ) VULKAN_HPP_NOEXCEPT + { + minLuminance = minLuminance_; + return *this; + } + + HdrMetadataEXT & setMaxContentLightLevel( float maxContentLightLevel_ ) VULKAN_HPP_NOEXCEPT + { + maxContentLightLevel = maxContentLightLevel_; + return *this; + } + + HdrMetadataEXT & setMaxFrameAverageLightLevel( float maxFrameAverageLightLevel_ ) VULKAN_HPP_NOEXCEPT + { + maxFrameAverageLightLevel = maxFrameAverageLightLevel_; + return *this; + } + + + operator VkHdrMetadataEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkHdrMetadataEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( HdrMetadataEXT const& ) const = default; +#else + bool operator==( HdrMetadataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayPrimaryRed == rhs.displayPrimaryRed ) + && ( displayPrimaryGreen == rhs.displayPrimaryGreen ) + && ( displayPrimaryBlue == rhs.displayPrimaryBlue ) + && ( whitePoint == rhs.whitePoint ) + && ( maxLuminance == rhs.maxLuminance ) + && ( minLuminance == rhs.minLuminance ) + && ( maxContentLightLevel == rhs.maxContentLightLevel ) + && ( maxFrameAverageLightLevel == rhs.maxFrameAverageLightLevel ); + } + + bool operator!=( HdrMetadataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHdrMetadataEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryRed = {}; + VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryGreen = {}; + VULKAN_HPP_NAMESPACE::XYColorEXT displayPrimaryBlue = {}; + VULKAN_HPP_NAMESPACE::XYColorEXT whitePoint = {}; + float maxLuminance = {}; + float minLuminance = {}; + float maxContentLightLevel = {}; + float maxFrameAverageLightLevel = {}; + + }; + static_assert( sizeof( HdrMetadataEXT ) == sizeof( VkHdrMetadataEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = HdrMetadataEXT; + }; + + struct SemaphoreSignalInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreSignalInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreSignalInfo(VULKAN_HPP_NAMESPACE::Semaphore semaphore_ = {}, uint64_t value_ = {}) VULKAN_HPP_NOEXCEPT + : semaphore( semaphore_ ), value( value_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreSignalInfo( SemaphoreSignalInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreSignalInfo( VkSemaphoreSignalInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreSignalInfo & operator=( VkSemaphoreSignalInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreSignalInfo & operator=( SemaphoreSignalInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreSignalInfo ) ); + return *this; + } + + SemaphoreSignalInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreSignalInfo & setSemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore_ ) VULKAN_HPP_NOEXCEPT + { + semaphore = semaphore_; + return *this; + } + + SemaphoreSignalInfo & setValue( uint64_t value_ ) VULKAN_HPP_NOEXCEPT + { + value = value_; + return *this; + } + + + operator VkSemaphoreSignalInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreSignalInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreSignalInfo const& ) const = default; +#else + bool operator==( SemaphoreSignalInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphore == rhs.semaphore ) + && ( value == rhs.value ); + } + + bool operator!=( SemaphoreSignalInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreSignalInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Semaphore semaphore = {}; + uint64_t value = {}; + + }; + static_assert( sizeof( SemaphoreSignalInfo ) == sizeof( VkSemaphoreSignalInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreSignalInfo; + }; + using SemaphoreSignalInfoKHR = SemaphoreSignalInfo; + + struct SemaphoreWaitInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreWaitInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreWaitInfo(VULKAN_HPP_NAMESPACE::SemaphoreWaitFlags flags_ = {}, uint32_t semaphoreCount_ = {}, const VULKAN_HPP_NAMESPACE::Semaphore* pSemaphores_ = {}, const uint64_t* pValues_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), semaphoreCount( semaphoreCount_ ), pSemaphores( pSemaphores_ ), pValues( pValues_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreWaitInfo( SemaphoreWaitInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreWaitInfo( VkSemaphoreWaitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SemaphoreWaitInfo( VULKAN_HPP_NAMESPACE::SemaphoreWaitFlags flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & semaphores_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ = {} ) + : flags( flags_ ), semaphoreCount( static_cast( semaphores_.size() ) ), pSemaphores( semaphores_.data() ), pValues( values_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( semaphores_.size() == values_.size() ); +#else + if ( semaphores_.size() != values_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::SemaphoreWaitInfo::SemaphoreWaitInfo: semaphores_.size() != values_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreWaitInfo & operator=( VkSemaphoreWaitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreWaitInfo & operator=( SemaphoreWaitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreWaitInfo ) ); + return *this; + } + + SemaphoreWaitInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreWaitInfo & setFlags( VULKAN_HPP_NAMESPACE::SemaphoreWaitFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + SemaphoreWaitInfo & setSemaphoreCount( uint32_t semaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + semaphoreCount = semaphoreCount_; + return *this; + } + + SemaphoreWaitInfo & setPSemaphores( const VULKAN_HPP_NAMESPACE::Semaphore* pSemaphores_ ) VULKAN_HPP_NOEXCEPT + { + pSemaphores = pSemaphores_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SemaphoreWaitInfo & setSemaphores( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & semaphores_ ) VULKAN_HPP_NOEXCEPT + { + semaphoreCount = static_cast( semaphores_.size() ); + pSemaphores = semaphores_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + SemaphoreWaitInfo & setPValues( const uint64_t* pValues_ ) VULKAN_HPP_NOEXCEPT + { + pValues = pValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + SemaphoreWaitInfo & setValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & values_ ) VULKAN_HPP_NOEXCEPT + { + semaphoreCount = static_cast( values_.size() ); + pValues = values_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkSemaphoreWaitInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreWaitInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreWaitInfo const& ) const = default; +#else + bool operator==( SemaphoreWaitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( semaphoreCount == rhs.semaphoreCount ) + && ( pSemaphores == rhs.pSemaphores ) + && ( pValues == rhs.pValues ); + } + + bool operator!=( SemaphoreWaitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreWaitInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SemaphoreWaitFlags flags = {}; + uint32_t semaphoreCount = {}; + const VULKAN_HPP_NAMESPACE::Semaphore* pSemaphores = {}; + const uint64_t* pValues = {}; + + }; + static_assert( sizeof( SemaphoreWaitInfo ) == sizeof( VkSemaphoreWaitInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreWaitInfo; + }; + using SemaphoreWaitInfoKHR = SemaphoreWaitInfo; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + class Device; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueAccelerationStructureKHR = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueAccelerationStructureNV = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueBuffer = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueBufferView = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = PoolFree; }; + using UniqueCommandBuffer = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueCommandPool = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDeferredOperationKHR = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDescriptorPool = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = PoolFree; }; + using UniqueDescriptorSet = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDescriptorSetLayout = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDescriptorUpdateTemplate = UniqueHandle; + using UniqueDescriptorUpdateTemplateKHR = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectFree; }; + using UniqueDeviceMemory = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueEvent = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueFence = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueFramebuffer = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueImage = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueImageView = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueIndirectCommandsLayoutNV = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniquePipeline = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniquePipelineCache = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniquePipelineLayout = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniquePrivateDataSlotEXT = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueQueryPool = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueRenderPass = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueSampler = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueSamplerYcbcrConversion = UniqueHandle; + using UniqueSamplerYcbcrConversionKHR = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueSemaphore = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueShaderModule = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueSwapchainKHR = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueValidationCacheEXT = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class Device + { + public: + using CType = VkDevice; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDevice; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDevice; + + public: + VULKAN_HPP_CONSTEXPR Device() VULKAN_HPP_NOEXCEPT + : m_device(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Device( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_device(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Device( VkDevice device ) VULKAN_HPP_NOEXCEPT + : m_device( device ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Device & operator=(VkDevice device) VULKAN_HPP_NOEXCEPT + { + m_device = device; + return *this; + } +#endif + + Device & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_device = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Device const& ) const = default; +#else + bool operator==( Device const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_device == rhs.m_device; + } + + bool operator!=(Device const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_device != rhs.m_device; + } + + bool operator<(Device const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_device < rhs.m_device; + } +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + + template + VULKAN_HPP_NODISCARD Result acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD ResultValue acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD ResultValue acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type acquirePerformanceConfigurationINTELUnique( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CommandBufferAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, CommandBufferAllocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = CommandBufferAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, CommandBufferAllocator>>::type allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DescriptorSetAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, DescriptorSetAllocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = DescriptorSetAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, DescriptorSetAllocator>>::type allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindAccelerationStructureMemoryNV( ArrayProxy const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindBufferMemory2( ArrayProxy const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindBufferMemory2KHR( ArrayProxy const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindImageMemory2( ArrayProxy const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type bindImageMemory2KHR( ArrayProxy const & bindInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, ArrayProxy const & infos, ArrayProxy const & pBuildRangeInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createBuffer( const BufferCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createBufferUnique( const BufferCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createBufferView( const BufferViewCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createCommandPool( const CommandPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue> createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = PipelineAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue> createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createDeferredOperationKHR( Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createDeferredOperationKHRUnique( Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createEvent( const EventCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createEventUnique( const EventCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createFence( const FenceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createFenceUnique( const FenceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createFramebuffer( const FramebufferCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue> createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = PipelineAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue> createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createImage( const ImageCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createImageUnique( const ImageCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createImageView( const ImageViewCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createQueryPool( const QueryPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = PipelineAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = PipelineAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD ResultValue, PipelineAllocator>> createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue> createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createRenderPass( const RenderPassCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSampler( const SamplerCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createSamplerUnique( const SamplerCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSemaphore( const SemaphoreCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type createSharedSwapchainsKHR( ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SwapchainKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type createSharedSwapchainsKHR( ArrayProxy const & createInfos, Optional allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template >> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, SwapchainKHRAllocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy const & createInfos, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template >, typename B = SwapchainKHRAllocator, typename std::enable_if>::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, SwapchainKHRAllocator>>::type createSharedSwapchainsKHRUnique( ArrayProxy const & createInfos, Optional allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD Result deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyEvent( VULKAN_HPP_NAMESPACE::Event event VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Event event, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyFence( VULKAN_HPP_NAMESPACE::Fence fence VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyImage( VULKAN_HPP_NAMESPACE::Image image VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Image image, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type waitIdle( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type flushMappedMemoryRanges( ArrayProxy const & memoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy const & commandBuffers, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + Result freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy const & descriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + Result free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy const & descriptorSets, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR* pSizeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const AccelerationStructureBuildGeometryInfoKHR & buildInfo, ArrayProxy const & maxPrimitiveCounts, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + DeviceAddress getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceAddress getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, ArrayProxy const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD Result getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + DeviceAddress getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceAddress getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + DeviceAddress getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceAddress getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + DeviceAddress getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + DeviceAddress getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + uint64_t getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + uint64_t getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + uint64_t getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + uint64_t getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getCalibratedTimestampsEXT( ArrayProxy const ×tampInfos, ArrayProxy const ×tamps, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, uint64_t>>::type getCalibratedTimestampsEXT( ArrayProxy const & timestampInfos, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint64_tAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType, uint64_t>>::type getCalibratedTimestampsEXT( ArrayProxy const & timestampInfos, Uint64_tAllocator & uint64_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + uint32_t getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD Result getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + void getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionInfoKHR* pVersionInfo, VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR* pCompatibility, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getGroupPresentCapabilitiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::DeviceSize getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + uint64_t getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + uint64_t getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + uint64_t getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + uint64_t getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Queue getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Queue getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD Result getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD Result getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + void getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MemoryRequirements2 getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirementsAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, SparseImageMemoryRequirementsAllocator & sparseImageMemoryRequirementsAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirements2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageMemoryRequirements2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::SubresourceLayout getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + uint32_t getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + uint32_t getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD Result getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PastPresentationTimingGOOGLEAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, PastPresentationTimingGOOGLEAllocator & pastPresentationTimingGOOGLEAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutableInternalRepresentationKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableInternalRepresentationKHRAllocator & pipelineExecutableInternalRepresentationKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutablePropertiesKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, PipelineExecutablePropertiesKHRAllocator & pipelineExecutablePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PipelineExecutableStatisticKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableStatisticKHRAllocator & pipelineExecutableStatisticKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD uint64_t getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy const &data, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD ResultValue> getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD ResultValue getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + DeviceSize getRayTracingShaderGroupStackSizeKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + VULKAN_HPP_NODISCARD Result getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Extent2D getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ImageAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, ImageAllocator & imageAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD Result getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Uint8_tAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type invalidateMappedMemoryRanges( ArrayProxy const & memoryRanges, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy const & srcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy const & srcCaches, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + void releaseProfilingLockKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type resetFences( ArrayProxy const & fences, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + VULKAN_HPP_NODISCARD Result setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + void setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void setHdrMetadataEXT( ArrayProxy const & swapchains, ArrayProxy const & metadata, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD Result signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void uninitializePerformanceApiINTEL( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + template + void updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + + + template + void updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void updateDescriptorSets( ArrayProxy const & descriptorWrites, ArrayProxy const & descriptorCopies, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result waitForFences( ArrayProxy const & fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD Result waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type writeAccelerationStructuresPropertiesKHR( ArrayProxy const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy const &data, size_t stride, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type writeAccelerationStructuresPropertiesKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type writeAccelerationStructuresPropertyKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t stride, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDevice() const VULKAN_HPP_NOEXCEPT + { + return m_device; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_device != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_device == VK_NULL_HANDLE; + } + + private: + VkDevice m_device; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Device ) == sizeof( VkDevice ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Device; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Device; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Device; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DisplayModeParametersKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayModeParametersKHR(VULKAN_HPP_NAMESPACE::Extent2D visibleRegion_ = {}, uint32_t refreshRate_ = {}) VULKAN_HPP_NOEXCEPT + : visibleRegion( visibleRegion_ ), refreshRate( refreshRate_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayModeParametersKHR( DisplayModeParametersKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayModeParametersKHR( VkDisplayModeParametersKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayModeParametersKHR & operator=( VkDisplayModeParametersKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayModeParametersKHR & operator=( DisplayModeParametersKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayModeParametersKHR ) ); + return *this; + } + + DisplayModeParametersKHR & setVisibleRegion( VULKAN_HPP_NAMESPACE::Extent2D const & visibleRegion_ ) VULKAN_HPP_NOEXCEPT + { + visibleRegion = visibleRegion_; + return *this; + } + + DisplayModeParametersKHR & setRefreshRate( uint32_t refreshRate_ ) VULKAN_HPP_NOEXCEPT + { + refreshRate = refreshRate_; + return *this; + } + + + operator VkDisplayModeParametersKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayModeParametersKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayModeParametersKHR const& ) const = default; +#else + bool operator==( DisplayModeParametersKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( visibleRegion == rhs.visibleRegion ) + && ( refreshRate == rhs.refreshRate ); + } + + bool operator!=( DisplayModeParametersKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Extent2D visibleRegion = {}; + uint32_t refreshRate = {}; + + }; + static_assert( sizeof( DisplayModeParametersKHR ) == sizeof( VkDisplayModeParametersKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayModeCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayModeCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayModeCreateInfoKHR(VULKAN_HPP_NAMESPACE::DisplayModeCreateFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), parameters( parameters_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayModeCreateInfoKHR( DisplayModeCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayModeCreateInfoKHR( VkDisplayModeCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayModeCreateInfoKHR & operator=( VkDisplayModeCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayModeCreateInfoKHR & operator=( DisplayModeCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayModeCreateInfoKHR ) ); + return *this; + } + + DisplayModeCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplayModeCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::DisplayModeCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DisplayModeCreateInfoKHR & setParameters( VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR const & parameters_ ) VULKAN_HPP_NOEXCEPT + { + parameters = parameters_; + return *this; + } + + + operator VkDisplayModeCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayModeCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayModeCreateInfoKHR const& ) const = default; +#else + bool operator==( DisplayModeCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( parameters == rhs.parameters ); + } + + bool operator!=( DisplayModeCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayModeCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayModeCreateFlagsKHR flags = {}; + VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters = {}; + + }; + static_assert( sizeof( DisplayModeCreateInfoKHR ) == sizeof( VkDisplayModeCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayModeCreateInfoKHR; + }; + + class DisplayModeKHR + { + public: + using CType = VkDisplayModeKHR; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDisplayModeKHR; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDisplayModeKHR; + + public: + VULKAN_HPP_CONSTEXPR DisplayModeKHR() VULKAN_HPP_NOEXCEPT + : m_displayModeKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DisplayModeKHR( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_displayModeKHR(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DisplayModeKHR( VkDisplayModeKHR displayModeKHR ) VULKAN_HPP_NOEXCEPT + : m_displayModeKHR( displayModeKHR ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DisplayModeKHR & operator=(VkDisplayModeKHR displayModeKHR) VULKAN_HPP_NOEXCEPT + { + m_displayModeKHR = displayModeKHR; + return *this; + } +#endif + + DisplayModeKHR & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_displayModeKHR = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayModeKHR const& ) const = default; +#else + bool operator==( DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR == rhs.m_displayModeKHR; + } + + bool operator!=(DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR != rhs.m_displayModeKHR; + } + + bool operator<(DisplayModeKHR const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR < rhs.m_displayModeKHR; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDisplayModeKHR() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_displayModeKHR == VK_NULL_HANDLE; + } + + private: + VkDisplayModeKHR m_displayModeKHR; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DisplayModeKHR ) == sizeof( VkDisplayModeKHR ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DisplayModeKHR; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DisplayModeKHR; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DisplayModeKHR; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct ExtensionProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 ExtensionProperties(std::array const& extensionName_ = {}, uint32_t specVersion_ = {}) VULKAN_HPP_NOEXCEPT + : extensionName( extensionName_ ), specVersion( specVersion_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 ExtensionProperties( ExtensionProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExtensionProperties( VkExtensionProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExtensionProperties & operator=( VkExtensionProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExtensionProperties & operator=( ExtensionProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExtensionProperties ) ); + return *this; + } + + + operator VkExtensionProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExtensionProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExtensionProperties const& ) const = default; +#else + bool operator==( ExtensionProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( extensionName == rhs.extensionName ) + && ( specVersion == rhs.specVersion ); + } + + bool operator!=( ExtensionProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ArrayWrapper1D extensionName = {}; + uint32_t specVersion = {}; + + }; + static_assert( sizeof( ExtensionProperties ) == sizeof( VkExtensionProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct LayerProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 LayerProperties(std::array const& layerName_ = {}, uint32_t specVersion_ = {}, uint32_t implementationVersion_ = {}, std::array const& description_ = {}) VULKAN_HPP_NOEXCEPT + : layerName( layerName_ ), specVersion( specVersion_ ), implementationVersion( implementationVersion_ ), description( description_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 LayerProperties( LayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + LayerProperties( VkLayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + LayerProperties & operator=( VkLayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + LayerProperties & operator=( LayerProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( LayerProperties ) ); + return *this; + } + + + operator VkLayerProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkLayerProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( LayerProperties const& ) const = default; +#else + bool operator==( LayerProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( layerName == rhs.layerName ) + && ( specVersion == rhs.specVersion ) + && ( implementationVersion == rhs.implementationVersion ) + && ( description == rhs.description ); + } + + bool operator!=( LayerProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ArrayWrapper1D layerName = {}; + uint32_t specVersion = {}; + uint32_t implementationVersion = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + + }; + static_assert( sizeof( LayerProperties ) == sizeof( VkLayerProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PerformanceCounterKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceCounterKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PerformanceCounterKHR(VULKAN_HPP_NAMESPACE::PerformanceCounterUnitKHR unit_ = VULKAN_HPP_NAMESPACE::PerformanceCounterUnitKHR::eGeneric, VULKAN_HPP_NAMESPACE::PerformanceCounterScopeKHR scope_ = VULKAN_HPP_NAMESPACE::PerformanceCounterScopeKHR::eCommandBuffer, VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR storage_ = VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR::eInt32, std::array const& uuid_ = {}) VULKAN_HPP_NOEXCEPT + : unit( unit_ ), scope( scope_ ), storage( storage_ ), uuid( uuid_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PerformanceCounterKHR( PerformanceCounterKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceCounterKHR( VkPerformanceCounterKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceCounterKHR & operator=( VkPerformanceCounterKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceCounterKHR & operator=( PerformanceCounterKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceCounterKHR ) ); + return *this; + } + + + operator VkPerformanceCounterKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceCounterKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceCounterKHR const& ) const = default; +#else + bool operator==( PerformanceCounterKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( unit == rhs.unit ) + && ( scope == rhs.scope ) + && ( storage == rhs.storage ) + && ( uuid == rhs.uuid ); + } + + bool operator!=( PerformanceCounterKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceCounterKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PerformanceCounterUnitKHR unit = VULKAN_HPP_NAMESPACE::PerformanceCounterUnitKHR::eGeneric; + VULKAN_HPP_NAMESPACE::PerformanceCounterScopeKHR scope = VULKAN_HPP_NAMESPACE::PerformanceCounterScopeKHR::eCommandBuffer; + VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR storage = VULKAN_HPP_NAMESPACE::PerformanceCounterStorageKHR::eInt32; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D uuid = {}; + + }; + static_assert( sizeof( PerformanceCounterKHR ) == sizeof( VkPerformanceCounterKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceCounterKHR; + }; + + struct PerformanceCounterDescriptionKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceCounterDescriptionKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PerformanceCounterDescriptionKHR(VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionFlagsKHR flags_ = {}, std::array const& name_ = {}, std::array const& category_ = {}, std::array const& description_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), name( name_ ), category( category_ ), description( description_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PerformanceCounterDescriptionKHR( PerformanceCounterDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceCounterDescriptionKHR( VkPerformanceCounterDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceCounterDescriptionKHR & operator=( VkPerformanceCounterDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceCounterDescriptionKHR & operator=( PerformanceCounterDescriptionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceCounterDescriptionKHR ) ); + return *this; + } + + + operator VkPerformanceCounterDescriptionKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceCounterDescriptionKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceCounterDescriptionKHR const& ) const = default; +#else + bool operator==( PerformanceCounterDescriptionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( name == rhs.name ) + && ( category == rhs.category ) + && ( description == rhs.description ); + } + + bool operator!=( PerformanceCounterDescriptionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceCounterDescriptionKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionFlagsKHR flags = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D name = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D category = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + + }; + static_assert( sizeof( PerformanceCounterDescriptionKHR ) == sizeof( VkPerformanceCounterDescriptionKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceCounterDescriptionKHR; + }; + + struct DisplayModePropertiesKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayModePropertiesKHR(VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode_ = {}, VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters_ = {}) VULKAN_HPP_NOEXCEPT + : displayMode( displayMode_ ), parameters( parameters_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayModePropertiesKHR( DisplayModePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayModePropertiesKHR( VkDisplayModePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayModePropertiesKHR & operator=( VkDisplayModePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayModePropertiesKHR & operator=( DisplayModePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayModePropertiesKHR ) ); + return *this; + } + + + operator VkDisplayModePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayModePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayModePropertiesKHR const& ) const = default; +#else + bool operator==( DisplayModePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( displayMode == rhs.displayMode ) + && ( parameters == rhs.parameters ); + } + + bool operator!=( DisplayModePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode = {}; + VULKAN_HPP_NAMESPACE::DisplayModeParametersKHR parameters = {}; + + }; + static_assert( sizeof( DisplayModePropertiesKHR ) == sizeof( VkDisplayModePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayModeProperties2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayModeProperties2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayModeProperties2KHR(VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR displayModeProperties_ = {}) VULKAN_HPP_NOEXCEPT + : displayModeProperties( displayModeProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayModeProperties2KHR( DisplayModeProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayModeProperties2KHR( VkDisplayModeProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayModeProperties2KHR & operator=( VkDisplayModeProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayModeProperties2KHR & operator=( DisplayModeProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayModeProperties2KHR ) ); + return *this; + } + + + operator VkDisplayModeProperties2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayModeProperties2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayModeProperties2KHR const& ) const = default; +#else + bool operator==( DisplayModeProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayModeProperties == rhs.displayModeProperties ); + } + + bool operator!=( DisplayModeProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayModeProperties2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR displayModeProperties = {}; + + }; + static_assert( sizeof( DisplayModeProperties2KHR ) == sizeof( VkDisplayModeProperties2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayModeProperties2KHR; + }; + + struct DisplayPlaneInfo2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayPlaneInfo2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPlaneInfo2KHR(VULKAN_HPP_NAMESPACE::DisplayModeKHR mode_ = {}, uint32_t planeIndex_ = {}) VULKAN_HPP_NOEXCEPT + : mode( mode_ ), planeIndex( planeIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPlaneInfo2KHR( DisplayPlaneInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPlaneInfo2KHR( VkDisplayPlaneInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPlaneInfo2KHR & operator=( VkDisplayPlaneInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPlaneInfo2KHR & operator=( DisplayPlaneInfo2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPlaneInfo2KHR ) ); + return *this; + } + + DisplayPlaneInfo2KHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplayPlaneInfo2KHR & setMode( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + DisplayPlaneInfo2KHR & setPlaneIndex( uint32_t planeIndex_ ) VULKAN_HPP_NOEXCEPT + { + planeIndex = planeIndex_; + return *this; + } + + + operator VkDisplayPlaneInfo2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPlaneInfo2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPlaneInfo2KHR const& ) const = default; +#else + bool operator==( DisplayPlaneInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( mode == rhs.mode ) + && ( planeIndex == rhs.planeIndex ); + } + + bool operator!=( DisplayPlaneInfo2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayPlaneInfo2KHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayModeKHR mode = {}; + uint32_t planeIndex = {}; + + }; + static_assert( sizeof( DisplayPlaneInfo2KHR ) == sizeof( VkDisplayPlaneInfo2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayPlaneInfo2KHR; + }; + + struct DisplayPlaneCapabilitiesKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPlaneCapabilitiesKHR(VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagsKHR supportedAlpha_ = {}, VULKAN_HPP_NAMESPACE::Offset2D minSrcPosition_ = {}, VULKAN_HPP_NAMESPACE::Offset2D maxSrcPosition_ = {}, VULKAN_HPP_NAMESPACE::Extent2D minSrcExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxSrcExtent_ = {}, VULKAN_HPP_NAMESPACE::Offset2D minDstPosition_ = {}, VULKAN_HPP_NAMESPACE::Offset2D maxDstPosition_ = {}, VULKAN_HPP_NAMESPACE::Extent2D minDstExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxDstExtent_ = {}) VULKAN_HPP_NOEXCEPT + : supportedAlpha( supportedAlpha_ ), minSrcPosition( minSrcPosition_ ), maxSrcPosition( maxSrcPosition_ ), minSrcExtent( minSrcExtent_ ), maxSrcExtent( maxSrcExtent_ ), minDstPosition( minDstPosition_ ), maxDstPosition( maxDstPosition_ ), minDstExtent( minDstExtent_ ), maxDstExtent( maxDstExtent_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPlaneCapabilitiesKHR( DisplayPlaneCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPlaneCapabilitiesKHR( VkDisplayPlaneCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPlaneCapabilitiesKHR & operator=( VkDisplayPlaneCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPlaneCapabilitiesKHR & operator=( DisplayPlaneCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPlaneCapabilitiesKHR ) ); + return *this; + } + + + operator VkDisplayPlaneCapabilitiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPlaneCapabilitiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPlaneCapabilitiesKHR const& ) const = default; +#else + bool operator==( DisplayPlaneCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( supportedAlpha == rhs.supportedAlpha ) + && ( minSrcPosition == rhs.minSrcPosition ) + && ( maxSrcPosition == rhs.maxSrcPosition ) + && ( minSrcExtent == rhs.minSrcExtent ) + && ( maxSrcExtent == rhs.maxSrcExtent ) + && ( minDstPosition == rhs.minDstPosition ) + && ( maxDstPosition == rhs.maxDstPosition ) + && ( minDstExtent == rhs.minDstExtent ) + && ( maxDstExtent == rhs.maxDstExtent ); + } + + bool operator!=( DisplayPlaneCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagsKHR supportedAlpha = {}; + VULKAN_HPP_NAMESPACE::Offset2D minSrcPosition = {}; + VULKAN_HPP_NAMESPACE::Offset2D maxSrcPosition = {}; + VULKAN_HPP_NAMESPACE::Extent2D minSrcExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxSrcExtent = {}; + VULKAN_HPP_NAMESPACE::Offset2D minDstPosition = {}; + VULKAN_HPP_NAMESPACE::Offset2D maxDstPosition = {}; + VULKAN_HPP_NAMESPACE::Extent2D minDstExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxDstExtent = {}; + + }; + static_assert( sizeof( DisplayPlaneCapabilitiesKHR ) == sizeof( VkDisplayPlaneCapabilitiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayPlaneCapabilities2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayPlaneCapabilities2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPlaneCapabilities2KHR(VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities_ = {}) VULKAN_HPP_NOEXCEPT + : capabilities( capabilities_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPlaneCapabilities2KHR( DisplayPlaneCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPlaneCapabilities2KHR( VkDisplayPlaneCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPlaneCapabilities2KHR & operator=( VkDisplayPlaneCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPlaneCapabilities2KHR & operator=( DisplayPlaneCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPlaneCapabilities2KHR ) ); + return *this; + } + + + operator VkDisplayPlaneCapabilities2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPlaneCapabilities2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPlaneCapabilities2KHR const& ) const = default; +#else + bool operator==( DisplayPlaneCapabilities2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( capabilities == rhs.capabilities ); + } + + bool operator!=( DisplayPlaneCapabilities2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayPlaneCapabilities2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities = {}; + + }; + static_assert( sizeof( DisplayPlaneCapabilities2KHR ) == sizeof( VkDisplayPlaneCapabilities2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayPlaneCapabilities2KHR; + }; + + struct DisplayPlanePropertiesKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPlanePropertiesKHR(VULKAN_HPP_NAMESPACE::DisplayKHR currentDisplay_ = {}, uint32_t currentStackIndex_ = {}) VULKAN_HPP_NOEXCEPT + : currentDisplay( currentDisplay_ ), currentStackIndex( currentStackIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPlanePropertiesKHR( DisplayPlanePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPlanePropertiesKHR( VkDisplayPlanePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPlanePropertiesKHR & operator=( VkDisplayPlanePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPlanePropertiesKHR & operator=( DisplayPlanePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPlanePropertiesKHR ) ); + return *this; + } + + + operator VkDisplayPlanePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPlanePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPlanePropertiesKHR const& ) const = default; +#else + bool operator==( DisplayPlanePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( currentDisplay == rhs.currentDisplay ) + && ( currentStackIndex == rhs.currentStackIndex ); + } + + bool operator!=( DisplayPlanePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DisplayKHR currentDisplay = {}; + uint32_t currentStackIndex = {}; + + }; + static_assert( sizeof( DisplayPlanePropertiesKHR ) == sizeof( VkDisplayPlanePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayPlaneProperties2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayPlaneProperties2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPlaneProperties2KHR(VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR displayPlaneProperties_ = {}) VULKAN_HPP_NOEXCEPT + : displayPlaneProperties( displayPlaneProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPlaneProperties2KHR( DisplayPlaneProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPlaneProperties2KHR( VkDisplayPlaneProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPlaneProperties2KHR & operator=( VkDisplayPlaneProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPlaneProperties2KHR & operator=( DisplayPlaneProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPlaneProperties2KHR ) ); + return *this; + } + + + operator VkDisplayPlaneProperties2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPlaneProperties2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPlaneProperties2KHR const& ) const = default; +#else + bool operator==( DisplayPlaneProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayPlaneProperties == rhs.displayPlaneProperties ); + } + + bool operator!=( DisplayPlaneProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayPlaneProperties2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR displayPlaneProperties = {}; + + }; + static_assert( sizeof( DisplayPlaneProperties2KHR ) == sizeof( VkDisplayPlaneProperties2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayPlaneProperties2KHR; + }; + + struct DisplayPropertiesKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPropertiesKHR(VULKAN_HPP_NAMESPACE::DisplayKHR display_ = {}, const char* displayName_ = {}, VULKAN_HPP_NAMESPACE::Extent2D physicalDimensions_ = {}, VULKAN_HPP_NAMESPACE::Extent2D physicalResolution_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms_ = {}, VULKAN_HPP_NAMESPACE::Bool32 planeReorderPossible_ = {}, VULKAN_HPP_NAMESPACE::Bool32 persistentContent_ = {}) VULKAN_HPP_NOEXCEPT + : display( display_ ), displayName( displayName_ ), physicalDimensions( physicalDimensions_ ), physicalResolution( physicalResolution_ ), supportedTransforms( supportedTransforms_ ), planeReorderPossible( planeReorderPossible_ ), persistentContent( persistentContent_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPropertiesKHR( DisplayPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPropertiesKHR( VkDisplayPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPropertiesKHR & operator=( VkDisplayPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPropertiesKHR & operator=( DisplayPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPropertiesKHR ) ); + return *this; + } + + + operator VkDisplayPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPropertiesKHR const& ) const = default; +#else + bool operator==( DisplayPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( display == rhs.display ) + && ( displayName == rhs.displayName ) + && ( physicalDimensions == rhs.physicalDimensions ) + && ( physicalResolution == rhs.physicalResolution ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( planeReorderPossible == rhs.planeReorderPossible ) + && ( persistentContent == rhs.persistentContent ); + } + + bool operator!=( DisplayPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DisplayKHR display = {}; + const char* displayName = {}; + VULKAN_HPP_NAMESPACE::Extent2D physicalDimensions = {}; + VULKAN_HPP_NAMESPACE::Extent2D physicalResolution = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms = {}; + VULKAN_HPP_NAMESPACE::Bool32 planeReorderPossible = {}; + VULKAN_HPP_NAMESPACE::Bool32 persistentContent = {}; + + }; + static_assert( sizeof( DisplayPropertiesKHR ) == sizeof( VkDisplayPropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayProperties2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayProperties2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayProperties2KHR(VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR displayProperties_ = {}) VULKAN_HPP_NOEXCEPT + : displayProperties( displayProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayProperties2KHR( DisplayProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayProperties2KHR( VkDisplayProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayProperties2KHR & operator=( VkDisplayProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayProperties2KHR & operator=( DisplayProperties2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayProperties2KHR ) ); + return *this; + } + + + operator VkDisplayProperties2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayProperties2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayProperties2KHR const& ) const = default; +#else + bool operator==( DisplayProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( displayProperties == rhs.displayProperties ); + } + + bool operator!=( DisplayProperties2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayProperties2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR displayProperties = {}; + + }; + static_assert( sizeof( DisplayProperties2KHR ) == sizeof( VkDisplayProperties2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayProperties2KHR; + }; + + struct PhysicalDeviceExternalBufferInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExternalBufferInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalBufferInfo(VULKAN_HPP_NAMESPACE::BufferCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), usage( usage_ ), handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalBufferInfo( PhysicalDeviceExternalBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExternalBufferInfo( VkPhysicalDeviceExternalBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExternalBufferInfo & operator=( VkPhysicalDeviceExternalBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExternalBufferInfo & operator=( PhysicalDeviceExternalBufferInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExternalBufferInfo ) ); + return *this; + } + + PhysicalDeviceExternalBufferInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalBufferInfo & setFlags( VULKAN_HPP_NAMESPACE::BufferCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PhysicalDeviceExternalBufferInfo & setUsage( VULKAN_HPP_NAMESPACE::BufferUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + PhysicalDeviceExternalBufferInfo & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkPhysicalDeviceExternalBufferInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExternalBufferInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExternalBufferInfo const& ) const = default; +#else + bool operator==( PhysicalDeviceExternalBufferInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( usage == rhs.usage ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalBufferInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExternalBufferInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::BufferCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::BufferUsageFlags usage = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( PhysicalDeviceExternalBufferInfo ) == sizeof( VkPhysicalDeviceExternalBufferInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExternalBufferInfo; + }; + using PhysicalDeviceExternalBufferInfoKHR = PhysicalDeviceExternalBufferInfo; + + struct ExternalMemoryProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalMemoryProperties(VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlags externalMemoryFeatures_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags compatibleHandleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : externalMemoryFeatures( externalMemoryFeatures_ ), exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ), compatibleHandleTypes( compatibleHandleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalMemoryProperties( ExternalMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalMemoryProperties( VkExternalMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalMemoryProperties & operator=( VkExternalMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalMemoryProperties & operator=( ExternalMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalMemoryProperties ) ); + return *this; + } + + + operator VkExternalMemoryProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalMemoryProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalMemoryProperties const& ) const = default; +#else + bool operator==( ExternalMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( externalMemoryFeatures == rhs.externalMemoryFeatures ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ); + } + + bool operator!=( ExternalMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlags externalMemoryFeatures = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags exportFromImportedHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags compatibleHandleTypes = {}; + + }; + static_assert( sizeof( ExternalMemoryProperties ) == sizeof( VkExternalMemoryProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using ExternalMemoryPropertiesKHR = ExternalMemoryProperties; + + struct ExternalBufferProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalBufferProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalBufferProperties(VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties_ = {}) VULKAN_HPP_NOEXCEPT + : externalMemoryProperties( externalMemoryProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalBufferProperties( ExternalBufferProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalBufferProperties( VkExternalBufferProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalBufferProperties & operator=( VkExternalBufferProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalBufferProperties & operator=( ExternalBufferProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalBufferProperties ) ); + return *this; + } + + + operator VkExternalBufferProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalBufferProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalBufferProperties const& ) const = default; +#else + bool operator==( ExternalBufferProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalMemoryProperties == rhs.externalMemoryProperties ); + } + + bool operator!=( ExternalBufferProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalBufferProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties = {}; + + }; + static_assert( sizeof( ExternalBufferProperties ) == sizeof( VkExternalBufferProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalBufferProperties; + }; + using ExternalBufferPropertiesKHR = ExternalBufferProperties; + + struct PhysicalDeviceExternalFenceInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExternalFenceInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalFenceInfo(VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalFenceInfo( PhysicalDeviceExternalFenceInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExternalFenceInfo( VkPhysicalDeviceExternalFenceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExternalFenceInfo & operator=( VkPhysicalDeviceExternalFenceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExternalFenceInfo & operator=( PhysicalDeviceExternalFenceInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExternalFenceInfo ) ); + return *this; + } + + PhysicalDeviceExternalFenceInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalFenceInfo & setHandleType( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkPhysicalDeviceExternalFenceInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExternalFenceInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExternalFenceInfo const& ) const = default; +#else + bool operator==( PhysicalDeviceExternalFenceInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalFenceInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExternalFenceInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( PhysicalDeviceExternalFenceInfo ) == sizeof( VkPhysicalDeviceExternalFenceInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExternalFenceInfo; + }; + using PhysicalDeviceExternalFenceInfoKHR = PhysicalDeviceExternalFenceInfo; + + struct ExternalFenceProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalFenceProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalFenceProperties(VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags compatibleHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlags externalFenceFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ), compatibleHandleTypes( compatibleHandleTypes_ ), externalFenceFeatures( externalFenceFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalFenceProperties( ExternalFenceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalFenceProperties( VkExternalFenceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalFenceProperties & operator=( VkExternalFenceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalFenceProperties & operator=( ExternalFenceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalFenceProperties ) ); + return *this; + } + + + operator VkExternalFenceProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalFenceProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalFenceProperties const& ) const = default; +#else + bool operator==( ExternalFenceProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ) + && ( externalFenceFeatures == rhs.externalFenceFeatures ); + } + + bool operator!=( ExternalFenceProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalFenceProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags exportFromImportedHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags compatibleHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceFeatureFlags externalFenceFeatures = {}; + + }; + static_assert( sizeof( ExternalFenceProperties ) == sizeof( VkExternalFenceProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalFenceProperties; + }; + using ExternalFencePropertiesKHR = ExternalFenceProperties; + + struct ImageFormatProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageFormatProperties(VULKAN_HPP_NAMESPACE::Extent3D maxExtent_ = {}, uint32_t maxMipLevels_ = {}, uint32_t maxArrayLayers_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxResourceSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxExtent( maxExtent_ ), maxMipLevels( maxMipLevels_ ), maxArrayLayers( maxArrayLayers_ ), sampleCounts( sampleCounts_ ), maxResourceSize( maxResourceSize_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageFormatProperties( ImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageFormatProperties( VkImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageFormatProperties & operator=( VkImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageFormatProperties & operator=( ImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageFormatProperties ) ); + return *this; + } + + + operator VkImageFormatProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageFormatProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageFormatProperties const& ) const = default; +#else + bool operator==( ImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( maxExtent == rhs.maxExtent ) + && ( maxMipLevels == rhs.maxMipLevels ) + && ( maxArrayLayers == rhs.maxArrayLayers ) + && ( sampleCounts == rhs.sampleCounts ) + && ( maxResourceSize == rhs.maxResourceSize ); + } + + bool operator!=( ImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Extent3D maxExtent = {}; + uint32_t maxMipLevels = {}; + uint32_t maxArrayLayers = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts = {}; + VULKAN_HPP_NAMESPACE::DeviceSize maxResourceSize = {}; + + }; + static_assert( sizeof( ImageFormatProperties ) == sizeof( VkImageFormatProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ExternalImageFormatPropertiesNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalImageFormatPropertiesNV(VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagsNV externalMemoryFeatures_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV compatibleHandleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : imageFormatProperties( imageFormatProperties_ ), externalMemoryFeatures( externalMemoryFeatures_ ), exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ), compatibleHandleTypes( compatibleHandleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalImageFormatPropertiesNV( ExternalImageFormatPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalImageFormatPropertiesNV( VkExternalImageFormatPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalImageFormatPropertiesNV & operator=( VkExternalImageFormatPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalImageFormatPropertiesNV & operator=( ExternalImageFormatPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalImageFormatPropertiesNV ) ); + return *this; + } + + + operator VkExternalImageFormatPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalImageFormatPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalImageFormatPropertiesNV const& ) const = default; +#else + bool operator==( ExternalImageFormatPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( imageFormatProperties == rhs.imageFormatProperties ) + && ( externalMemoryFeatures == rhs.externalMemoryFeatures ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ); + } + + bool operator!=( ExternalImageFormatPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryFeatureFlagsNV externalMemoryFeatures = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV compatibleHandleTypes = {}; + + }; + static_assert( sizeof( ExternalImageFormatPropertiesNV ) == sizeof( VkExternalImageFormatPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceExternalSemaphoreInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExternalSemaphoreInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalSemaphoreInfo(VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalSemaphoreInfo( PhysicalDeviceExternalSemaphoreInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExternalSemaphoreInfo( VkPhysicalDeviceExternalSemaphoreInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExternalSemaphoreInfo & operator=( VkPhysicalDeviceExternalSemaphoreInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExternalSemaphoreInfo & operator=( PhysicalDeviceExternalSemaphoreInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExternalSemaphoreInfo ) ); + return *this; + } + + PhysicalDeviceExternalSemaphoreInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalSemaphoreInfo & setHandleType( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkPhysicalDeviceExternalSemaphoreInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExternalSemaphoreInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExternalSemaphoreInfo const& ) const = default; +#else + bool operator==( PhysicalDeviceExternalSemaphoreInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalSemaphoreInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExternalSemaphoreInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( PhysicalDeviceExternalSemaphoreInfo ) == sizeof( VkPhysicalDeviceExternalSemaphoreInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExternalSemaphoreInfo; + }; + using PhysicalDeviceExternalSemaphoreInfoKHR = PhysicalDeviceExternalSemaphoreInfo; + + struct ExternalSemaphoreProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalSemaphoreProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalSemaphoreProperties(VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags compatibleHandleTypes_ = {}, VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlags externalSemaphoreFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : exportFromImportedHandleTypes( exportFromImportedHandleTypes_ ), compatibleHandleTypes( compatibleHandleTypes_ ), externalSemaphoreFeatures( externalSemaphoreFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalSemaphoreProperties( ExternalSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalSemaphoreProperties( VkExternalSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalSemaphoreProperties & operator=( VkExternalSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalSemaphoreProperties & operator=( ExternalSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalSemaphoreProperties ) ); + return *this; + } + + + operator VkExternalSemaphoreProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalSemaphoreProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalSemaphoreProperties const& ) const = default; +#else + bool operator==( ExternalSemaphoreProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exportFromImportedHandleTypes == rhs.exportFromImportedHandleTypes ) + && ( compatibleHandleTypes == rhs.compatibleHandleTypes ) + && ( externalSemaphoreFeatures == rhs.externalSemaphoreFeatures ); + } + + bool operator!=( ExternalSemaphoreProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalSemaphoreProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags compatibleHandleTypes = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreFeatureFlags externalSemaphoreFeatures = {}; + + }; + static_assert( sizeof( ExternalSemaphoreProperties ) == sizeof( VkExternalSemaphoreProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalSemaphoreProperties; + }; + using ExternalSemaphorePropertiesKHR = ExternalSemaphoreProperties; + + struct PhysicalDeviceFeatures2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFeatures2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFeatures2(VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features_ = {}) VULKAN_HPP_NOEXCEPT + : features( features_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFeatures2( PhysicalDeviceFeatures2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFeatures2( VkPhysicalDeviceFeatures2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFeatures2 & operator=( VkPhysicalDeviceFeatures2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFeatures2 & operator=( PhysicalDeviceFeatures2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFeatures2 ) ); + return *this; + } + + PhysicalDeviceFeatures2 & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFeatures2 & setFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures const & features_ ) VULKAN_HPP_NOEXCEPT + { + features = features_; + return *this; + } + + + operator VkPhysicalDeviceFeatures2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFeatures2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFeatures2 const& ) const = default; +#else + bool operator==( PhysicalDeviceFeatures2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( features == rhs.features ); + } + + bool operator!=( PhysicalDeviceFeatures2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFeatures2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features = {}; + + }; + static_assert( sizeof( PhysicalDeviceFeatures2 ) == sizeof( VkPhysicalDeviceFeatures2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFeatures2; + }; + using PhysicalDeviceFeatures2KHR = PhysicalDeviceFeatures2; + + struct FormatProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FormatProperties(VULKAN_HPP_NAMESPACE::FormatFeatureFlags linearTilingFeatures_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags optimalTilingFeatures_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags bufferFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : linearTilingFeatures( linearTilingFeatures_ ), optimalTilingFeatures( optimalTilingFeatures_ ), bufferFeatures( bufferFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR FormatProperties( FormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FormatProperties( VkFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FormatProperties & operator=( VkFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FormatProperties & operator=( FormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FormatProperties ) ); + return *this; + } + + + operator VkFormatProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFormatProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FormatProperties const& ) const = default; +#else + bool operator==( FormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( linearTilingFeatures == rhs.linearTilingFeatures ) + && ( optimalTilingFeatures == rhs.optimalTilingFeatures ) + && ( bufferFeatures == rhs.bufferFeatures ); + } + + bool operator!=( FormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::FormatFeatureFlags linearTilingFeatures = {}; + VULKAN_HPP_NAMESPACE::FormatFeatureFlags optimalTilingFeatures = {}; + VULKAN_HPP_NAMESPACE::FormatFeatureFlags bufferFeatures = {}; + + }; + static_assert( sizeof( FormatProperties ) == sizeof( VkFormatProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct FormatProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFormatProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FormatProperties2(VULKAN_HPP_NAMESPACE::FormatProperties formatProperties_ = {}) VULKAN_HPP_NOEXCEPT + : formatProperties( formatProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR FormatProperties2( FormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FormatProperties2( VkFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FormatProperties2 & operator=( VkFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FormatProperties2 & operator=( FormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FormatProperties2 ) ); + return *this; + } + + + operator VkFormatProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFormatProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FormatProperties2 const& ) const = default; +#else + bool operator==( FormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( formatProperties == rhs.formatProperties ); + } + + bool operator!=( FormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFormatProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::FormatProperties formatProperties = {}; + + }; + static_assert( sizeof( FormatProperties2 ) == sizeof( VkFormatProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FormatProperties2; + }; + using FormatProperties2KHR = FormatProperties2; + + struct PhysicalDeviceFragmentShadingRateKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateKHR(VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts_ = {}, VULKAN_HPP_NAMESPACE::Extent2D fragmentSize_ = {}) VULKAN_HPP_NOEXCEPT + : sampleCounts( sampleCounts_ ), fragmentSize( fragmentSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateKHR( PhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateKHR( VkPhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateKHR & operator=( VkPhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateKHR & operator=( PhysicalDeviceFragmentShadingRateKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleCounts == rhs.sampleCounts ) + && ( fragmentSize == rhs.fragmentSize ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampleCounts = {}; + VULKAN_HPP_NAMESPACE::Extent2D fragmentSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRateKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShadingRateKHR; + }; + + struct PhysicalDeviceImageFormatInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageFormatInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageFormatInfo2(VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ImageType type_ = VULKAN_HPP_NAMESPACE::ImageType::e1D, VULKAN_HPP_NAMESPACE::ImageTiling tiling_ = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : format( format_ ), type( type_ ), tiling( tiling_ ), usage( usage_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageFormatInfo2( PhysicalDeviceImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageFormatInfo2( VkPhysicalDeviceImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceImageFormatInfo2 & operator=( VkPhysicalDeviceImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceImageFormatInfo2 & operator=( PhysicalDeviceImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceImageFormatInfo2 ) ); + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setType( VULKAN_HPP_NAMESPACE::ImageType type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setTiling( VULKAN_HPP_NAMESPACE::ImageTiling tiling_ ) VULKAN_HPP_NOEXCEPT + { + tiling = tiling_; + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + PhysicalDeviceImageFormatInfo2 & setFlags( VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkPhysicalDeviceImageFormatInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageFormatInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceImageFormatInfo2 const& ) const = default; +#else + bool operator==( PhysicalDeviceImageFormatInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( type == rhs.type ) + && ( tiling == rhs.tiling ) + && ( usage == rhs.usage ) + && ( flags == rhs.flags ); + } + + bool operator!=( PhysicalDeviceImageFormatInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageFormatInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::ImageType type = VULKAN_HPP_NAMESPACE::ImageType::e1D; + VULKAN_HPP_NAMESPACE::ImageTiling tiling = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal; + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage = {}; + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags = {}; + + }; + static_assert( sizeof( PhysicalDeviceImageFormatInfo2 ) == sizeof( VkPhysicalDeviceImageFormatInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceImageFormatInfo2; + }; + using PhysicalDeviceImageFormatInfo2KHR = PhysicalDeviceImageFormatInfo2; + + struct ImageFormatProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageFormatProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageFormatProperties2(VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties_ = {}) VULKAN_HPP_NOEXCEPT + : imageFormatProperties( imageFormatProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageFormatProperties2( ImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageFormatProperties2( VkImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageFormatProperties2 & operator=( VkImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageFormatProperties2 & operator=( ImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageFormatProperties2 ) ); + return *this; + } + + + operator VkImageFormatProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageFormatProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageFormatProperties2 const& ) const = default; +#else + bool operator==( ImageFormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageFormatProperties == rhs.imageFormatProperties ); + } + + bool operator!=( ImageFormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageFormatProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties = {}; + + }; + static_assert( sizeof( ImageFormatProperties2 ) == sizeof( VkImageFormatProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageFormatProperties2; + }; + using ImageFormatProperties2KHR = ImageFormatProperties2; + + struct MemoryType + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryType(VULKAN_HPP_NAMESPACE::MemoryPropertyFlags propertyFlags_ = {}, uint32_t heapIndex_ = {}) VULKAN_HPP_NOEXCEPT + : propertyFlags( propertyFlags_ ), heapIndex( heapIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryType( MemoryType const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryType( VkMemoryType const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryType & operator=( VkMemoryType const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryType & operator=( MemoryType const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryType ) ); + return *this; + } + + + operator VkMemoryType const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryType &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryType const& ) const = default; +#else + bool operator==( MemoryType const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( propertyFlags == rhs.propertyFlags ) + && ( heapIndex == rhs.heapIndex ); + } + + bool operator!=( MemoryType const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags propertyFlags = {}; + uint32_t heapIndex = {}; + + }; + static_assert( sizeof( MemoryType ) == sizeof( VkMemoryType ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct MemoryHeap + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryHeap(VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::MemoryHeapFlags flags_ = {}) VULKAN_HPP_NOEXCEPT + : size( size_ ), flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryHeap( MemoryHeap const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryHeap( VkMemoryHeap const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryHeap & operator=( VkMemoryHeap const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryHeap & operator=( MemoryHeap const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryHeap ) ); + return *this; + } + + + operator VkMemoryHeap const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryHeap &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryHeap const& ) const = default; +#else + bool operator==( MemoryHeap const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( size == rhs.size ) + && ( flags == rhs.flags ); + } + + bool operator!=( MemoryHeap const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::MemoryHeapFlags flags = {}; + + }; + static_assert( sizeof( MemoryHeap ) == sizeof( VkMemoryHeap ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceMemoryProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryProperties(uint32_t memoryTypeCount_ = {}, std::array const& memoryTypes_ = {}, uint32_t memoryHeapCount_ = {}, std::array const& memoryHeaps_ = {}) VULKAN_HPP_NOEXCEPT + : memoryTypeCount( memoryTypeCount_ ), memoryTypes( memoryTypes_ ), memoryHeapCount( memoryHeapCount_ ), memoryHeaps( memoryHeaps_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryProperties( PhysicalDeviceMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMemoryProperties( VkPhysicalDeviceMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMemoryProperties & operator=( VkPhysicalDeviceMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMemoryProperties & operator=( PhysicalDeviceMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMemoryProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceMemoryProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMemoryProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMemoryProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( memoryTypeCount == rhs.memoryTypeCount ) + && ( memoryTypes == rhs.memoryTypes ) + && ( memoryHeapCount == rhs.memoryHeapCount ) + && ( memoryHeaps == rhs.memoryHeaps ); + } + + bool operator!=( PhysicalDeviceMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t memoryTypeCount = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D memoryTypes = {}; + uint32_t memoryHeapCount = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D memoryHeaps = {}; + + }; + static_assert( sizeof( PhysicalDeviceMemoryProperties ) == sizeof( VkPhysicalDeviceMemoryProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceMemoryProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMemoryProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryProperties2(VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties_ = {}) VULKAN_HPP_NOEXCEPT + : memoryProperties( memoryProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryProperties2( PhysicalDeviceMemoryProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMemoryProperties2( VkPhysicalDeviceMemoryProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMemoryProperties2 & operator=( VkPhysicalDeviceMemoryProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMemoryProperties2 & operator=( PhysicalDeviceMemoryProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMemoryProperties2 ) ); + return *this; + } + + + operator VkPhysicalDeviceMemoryProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMemoryProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMemoryProperties2 const& ) const = default; +#else + bool operator==( PhysicalDeviceMemoryProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryProperties == rhs.memoryProperties ); + } + + bool operator!=( PhysicalDeviceMemoryProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMemoryProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties = {}; + + }; + static_assert( sizeof( PhysicalDeviceMemoryProperties2 ) == sizeof( VkPhysicalDeviceMemoryProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMemoryProperties2; + }; + using PhysicalDeviceMemoryProperties2KHR = PhysicalDeviceMemoryProperties2; + + struct MultisamplePropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMultisamplePropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MultisamplePropertiesEXT(VULKAN_HPP_NAMESPACE::Extent2D maxSampleLocationGridSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxSampleLocationGridSize( maxSampleLocationGridSize_ ) + {} + + VULKAN_HPP_CONSTEXPR MultisamplePropertiesEXT( MultisamplePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MultisamplePropertiesEXT( VkMultisamplePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MultisamplePropertiesEXT & operator=( VkMultisamplePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MultisamplePropertiesEXT & operator=( MultisamplePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MultisamplePropertiesEXT ) ); + return *this; + } + + + operator VkMultisamplePropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMultisamplePropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MultisamplePropertiesEXT const& ) const = default; +#else + bool operator==( MultisamplePropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxSampleLocationGridSize == rhs.maxSampleLocationGridSize ); + } + + bool operator!=( MultisamplePropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMultisamplePropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxSampleLocationGridSize = {}; + + }; + static_assert( sizeof( MultisamplePropertiesEXT ) == sizeof( VkMultisamplePropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MultisamplePropertiesEXT; + }; + + struct PhysicalDeviceLimits + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLimits(uint32_t maxImageDimension1D_ = {}, uint32_t maxImageDimension2D_ = {}, uint32_t maxImageDimension3D_ = {}, uint32_t maxImageDimensionCube_ = {}, uint32_t maxImageArrayLayers_ = {}, uint32_t maxTexelBufferElements_ = {}, uint32_t maxUniformBufferRange_ = {}, uint32_t maxStorageBufferRange_ = {}, uint32_t maxPushConstantsSize_ = {}, uint32_t maxMemoryAllocationCount_ = {}, uint32_t maxSamplerAllocationCount_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize bufferImageGranularity_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize sparseAddressSpaceSize_ = {}, uint32_t maxBoundDescriptorSets_ = {}, uint32_t maxPerStageDescriptorSamplers_ = {}, uint32_t maxPerStageDescriptorUniformBuffers_ = {}, uint32_t maxPerStageDescriptorStorageBuffers_ = {}, uint32_t maxPerStageDescriptorSampledImages_ = {}, uint32_t maxPerStageDescriptorStorageImages_ = {}, uint32_t maxPerStageDescriptorInputAttachments_ = {}, uint32_t maxPerStageResources_ = {}, uint32_t maxDescriptorSetSamplers_ = {}, uint32_t maxDescriptorSetUniformBuffers_ = {}, uint32_t maxDescriptorSetUniformBuffersDynamic_ = {}, uint32_t maxDescriptorSetStorageBuffers_ = {}, uint32_t maxDescriptorSetStorageBuffersDynamic_ = {}, uint32_t maxDescriptorSetSampledImages_ = {}, uint32_t maxDescriptorSetStorageImages_ = {}, uint32_t maxDescriptorSetInputAttachments_ = {}, uint32_t maxVertexInputAttributes_ = {}, uint32_t maxVertexInputBindings_ = {}, uint32_t maxVertexInputAttributeOffset_ = {}, uint32_t maxVertexInputBindingStride_ = {}, uint32_t maxVertexOutputComponents_ = {}, uint32_t maxTessellationGenerationLevel_ = {}, uint32_t maxTessellationPatchSize_ = {}, uint32_t maxTessellationControlPerVertexInputComponents_ = {}, uint32_t maxTessellationControlPerVertexOutputComponents_ = {}, uint32_t maxTessellationControlPerPatchOutputComponents_ = {}, uint32_t maxTessellationControlTotalOutputComponents_ = {}, uint32_t maxTessellationEvaluationInputComponents_ = {}, uint32_t maxTessellationEvaluationOutputComponents_ = {}, uint32_t maxGeometryShaderInvocations_ = {}, uint32_t maxGeometryInputComponents_ = {}, uint32_t maxGeometryOutputComponents_ = {}, uint32_t maxGeometryOutputVertices_ = {}, uint32_t maxGeometryTotalOutputComponents_ = {}, uint32_t maxFragmentInputComponents_ = {}, uint32_t maxFragmentOutputAttachments_ = {}, uint32_t maxFragmentDualSrcAttachments_ = {}, uint32_t maxFragmentCombinedOutputResources_ = {}, uint32_t maxComputeSharedMemorySize_ = {}, std::array const& maxComputeWorkGroupCount_ = {}, uint32_t maxComputeWorkGroupInvocations_ = {}, std::array const& maxComputeWorkGroupSize_ = {}, uint32_t subPixelPrecisionBits_ = {}, uint32_t subTexelPrecisionBits_ = {}, uint32_t mipmapPrecisionBits_ = {}, uint32_t maxDrawIndexedIndexValue_ = {}, uint32_t maxDrawIndirectCount_ = {}, float maxSamplerLodBias_ = {}, float maxSamplerAnisotropy_ = {}, uint32_t maxViewports_ = {}, std::array const& maxViewportDimensions_ = {}, std::array const& viewportBoundsRange_ = {}, uint32_t viewportSubPixelBits_ = {}, size_t minMemoryMapAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize minTexelBufferOffsetAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize minUniformBufferOffsetAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize minStorageBufferOffsetAlignment_ = {}, int32_t minTexelOffset_ = {}, uint32_t maxTexelOffset_ = {}, int32_t minTexelGatherOffset_ = {}, uint32_t maxTexelGatherOffset_ = {}, float minInterpolationOffset_ = {}, float maxInterpolationOffset_ = {}, uint32_t subPixelInterpolationOffsetBits_ = {}, uint32_t maxFramebufferWidth_ = {}, uint32_t maxFramebufferHeight_ = {}, uint32_t maxFramebufferLayers_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferColorSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferDepthSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferStencilSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferNoAttachmentsSampleCounts_ = {}, uint32_t maxColorAttachments_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageColorSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageIntegerSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageDepthSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageStencilSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags storageImageSampleCounts_ = {}, uint32_t maxSampleMaskWords_ = {}, VULKAN_HPP_NAMESPACE::Bool32 timestampComputeAndGraphics_ = {}, float timestampPeriod_ = {}, uint32_t maxClipDistances_ = {}, uint32_t maxCullDistances_ = {}, uint32_t maxCombinedClipAndCullDistances_ = {}, uint32_t discreteQueuePriorities_ = {}, std::array const& pointSizeRange_ = {}, std::array const& lineWidthRange_ = {}, float pointSizeGranularity_ = {}, float lineWidthGranularity_ = {}, VULKAN_HPP_NAMESPACE::Bool32 strictLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 standardSampleLocations_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyOffsetAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyRowPitchAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize nonCoherentAtomSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxImageDimension1D( maxImageDimension1D_ ), maxImageDimension2D( maxImageDimension2D_ ), maxImageDimension3D( maxImageDimension3D_ ), maxImageDimensionCube( maxImageDimensionCube_ ), maxImageArrayLayers( maxImageArrayLayers_ ), maxTexelBufferElements( maxTexelBufferElements_ ), maxUniformBufferRange( maxUniformBufferRange_ ), maxStorageBufferRange( maxStorageBufferRange_ ), maxPushConstantsSize( maxPushConstantsSize_ ), maxMemoryAllocationCount( maxMemoryAllocationCount_ ), maxSamplerAllocationCount( maxSamplerAllocationCount_ ), bufferImageGranularity( bufferImageGranularity_ ), sparseAddressSpaceSize( sparseAddressSpaceSize_ ), maxBoundDescriptorSets( maxBoundDescriptorSets_ ), maxPerStageDescriptorSamplers( maxPerStageDescriptorSamplers_ ), maxPerStageDescriptorUniformBuffers( maxPerStageDescriptorUniformBuffers_ ), maxPerStageDescriptorStorageBuffers( maxPerStageDescriptorStorageBuffers_ ), maxPerStageDescriptorSampledImages( maxPerStageDescriptorSampledImages_ ), maxPerStageDescriptorStorageImages( maxPerStageDescriptorStorageImages_ ), maxPerStageDescriptorInputAttachments( maxPerStageDescriptorInputAttachments_ ), maxPerStageResources( maxPerStageResources_ ), maxDescriptorSetSamplers( maxDescriptorSetSamplers_ ), maxDescriptorSetUniformBuffers( maxDescriptorSetUniformBuffers_ ), maxDescriptorSetUniformBuffersDynamic( maxDescriptorSetUniformBuffersDynamic_ ), maxDescriptorSetStorageBuffers( maxDescriptorSetStorageBuffers_ ), maxDescriptorSetStorageBuffersDynamic( maxDescriptorSetStorageBuffersDynamic_ ), maxDescriptorSetSampledImages( maxDescriptorSetSampledImages_ ), maxDescriptorSetStorageImages( maxDescriptorSetStorageImages_ ), maxDescriptorSetInputAttachments( maxDescriptorSetInputAttachments_ ), maxVertexInputAttributes( maxVertexInputAttributes_ ), maxVertexInputBindings( maxVertexInputBindings_ ), maxVertexInputAttributeOffset( maxVertexInputAttributeOffset_ ), maxVertexInputBindingStride( maxVertexInputBindingStride_ ), maxVertexOutputComponents( maxVertexOutputComponents_ ), maxTessellationGenerationLevel( maxTessellationGenerationLevel_ ), maxTessellationPatchSize( maxTessellationPatchSize_ ), maxTessellationControlPerVertexInputComponents( maxTessellationControlPerVertexInputComponents_ ), maxTessellationControlPerVertexOutputComponents( maxTessellationControlPerVertexOutputComponents_ ), maxTessellationControlPerPatchOutputComponents( maxTessellationControlPerPatchOutputComponents_ ), maxTessellationControlTotalOutputComponents( maxTessellationControlTotalOutputComponents_ ), maxTessellationEvaluationInputComponents( maxTessellationEvaluationInputComponents_ ), maxTessellationEvaluationOutputComponents( maxTessellationEvaluationOutputComponents_ ), maxGeometryShaderInvocations( maxGeometryShaderInvocations_ ), maxGeometryInputComponents( maxGeometryInputComponents_ ), maxGeometryOutputComponents( maxGeometryOutputComponents_ ), maxGeometryOutputVertices( maxGeometryOutputVertices_ ), maxGeometryTotalOutputComponents( maxGeometryTotalOutputComponents_ ), maxFragmentInputComponents( maxFragmentInputComponents_ ), maxFragmentOutputAttachments( maxFragmentOutputAttachments_ ), maxFragmentDualSrcAttachments( maxFragmentDualSrcAttachments_ ), maxFragmentCombinedOutputResources( maxFragmentCombinedOutputResources_ ), maxComputeSharedMemorySize( maxComputeSharedMemorySize_ ), maxComputeWorkGroupCount( maxComputeWorkGroupCount_ ), maxComputeWorkGroupInvocations( maxComputeWorkGroupInvocations_ ), maxComputeWorkGroupSize( maxComputeWorkGroupSize_ ), subPixelPrecisionBits( subPixelPrecisionBits_ ), subTexelPrecisionBits( subTexelPrecisionBits_ ), mipmapPrecisionBits( mipmapPrecisionBits_ ), maxDrawIndexedIndexValue( maxDrawIndexedIndexValue_ ), maxDrawIndirectCount( maxDrawIndirectCount_ ), maxSamplerLodBias( maxSamplerLodBias_ ), maxSamplerAnisotropy( maxSamplerAnisotropy_ ), maxViewports( maxViewports_ ), maxViewportDimensions( maxViewportDimensions_ ), viewportBoundsRange( viewportBoundsRange_ ), viewportSubPixelBits( viewportSubPixelBits_ ), minMemoryMapAlignment( minMemoryMapAlignment_ ), minTexelBufferOffsetAlignment( minTexelBufferOffsetAlignment_ ), minUniformBufferOffsetAlignment( minUniformBufferOffsetAlignment_ ), minStorageBufferOffsetAlignment( minStorageBufferOffsetAlignment_ ), minTexelOffset( minTexelOffset_ ), maxTexelOffset( maxTexelOffset_ ), minTexelGatherOffset( minTexelGatherOffset_ ), maxTexelGatherOffset( maxTexelGatherOffset_ ), minInterpolationOffset( minInterpolationOffset_ ), maxInterpolationOffset( maxInterpolationOffset_ ), subPixelInterpolationOffsetBits( subPixelInterpolationOffsetBits_ ), maxFramebufferWidth( maxFramebufferWidth_ ), maxFramebufferHeight( maxFramebufferHeight_ ), maxFramebufferLayers( maxFramebufferLayers_ ), framebufferColorSampleCounts( framebufferColorSampleCounts_ ), framebufferDepthSampleCounts( framebufferDepthSampleCounts_ ), framebufferStencilSampleCounts( framebufferStencilSampleCounts_ ), framebufferNoAttachmentsSampleCounts( framebufferNoAttachmentsSampleCounts_ ), maxColorAttachments( maxColorAttachments_ ), sampledImageColorSampleCounts( sampledImageColorSampleCounts_ ), sampledImageIntegerSampleCounts( sampledImageIntegerSampleCounts_ ), sampledImageDepthSampleCounts( sampledImageDepthSampleCounts_ ), sampledImageStencilSampleCounts( sampledImageStencilSampleCounts_ ), storageImageSampleCounts( storageImageSampleCounts_ ), maxSampleMaskWords( maxSampleMaskWords_ ), timestampComputeAndGraphics( timestampComputeAndGraphics_ ), timestampPeriod( timestampPeriod_ ), maxClipDistances( maxClipDistances_ ), maxCullDistances( maxCullDistances_ ), maxCombinedClipAndCullDistances( maxCombinedClipAndCullDistances_ ), discreteQueuePriorities( discreteQueuePriorities_ ), pointSizeRange( pointSizeRange_ ), lineWidthRange( lineWidthRange_ ), pointSizeGranularity( pointSizeGranularity_ ), lineWidthGranularity( lineWidthGranularity_ ), strictLines( strictLines_ ), standardSampleLocations( standardSampleLocations_ ), optimalBufferCopyOffsetAlignment( optimalBufferCopyOffsetAlignment_ ), optimalBufferCopyRowPitchAlignment( optimalBufferCopyRowPitchAlignment_ ), nonCoherentAtomSize( nonCoherentAtomSize_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceLimits( PhysicalDeviceLimits const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLimits( VkPhysicalDeviceLimits const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceLimits & operator=( VkPhysicalDeviceLimits const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceLimits & operator=( PhysicalDeviceLimits const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceLimits ) ); + return *this; + } + + + operator VkPhysicalDeviceLimits const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLimits &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceLimits const& ) const = default; +#else + bool operator==( PhysicalDeviceLimits const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( maxImageDimension1D == rhs.maxImageDimension1D ) + && ( maxImageDimension2D == rhs.maxImageDimension2D ) + && ( maxImageDimension3D == rhs.maxImageDimension3D ) + && ( maxImageDimensionCube == rhs.maxImageDimensionCube ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( maxTexelBufferElements == rhs.maxTexelBufferElements ) + && ( maxUniformBufferRange == rhs.maxUniformBufferRange ) + && ( maxStorageBufferRange == rhs.maxStorageBufferRange ) + && ( maxPushConstantsSize == rhs.maxPushConstantsSize ) + && ( maxMemoryAllocationCount == rhs.maxMemoryAllocationCount ) + && ( maxSamplerAllocationCount == rhs.maxSamplerAllocationCount ) + && ( bufferImageGranularity == rhs.bufferImageGranularity ) + && ( sparseAddressSpaceSize == rhs.sparseAddressSpaceSize ) + && ( maxBoundDescriptorSets == rhs.maxBoundDescriptorSets ) + && ( maxPerStageDescriptorSamplers == rhs.maxPerStageDescriptorSamplers ) + && ( maxPerStageDescriptorUniformBuffers == rhs.maxPerStageDescriptorUniformBuffers ) + && ( maxPerStageDescriptorStorageBuffers == rhs.maxPerStageDescriptorStorageBuffers ) + && ( maxPerStageDescriptorSampledImages == rhs.maxPerStageDescriptorSampledImages ) + && ( maxPerStageDescriptorStorageImages == rhs.maxPerStageDescriptorStorageImages ) + && ( maxPerStageDescriptorInputAttachments == rhs.maxPerStageDescriptorInputAttachments ) + && ( maxPerStageResources == rhs.maxPerStageResources ) + && ( maxDescriptorSetSamplers == rhs.maxDescriptorSetSamplers ) + && ( maxDescriptorSetUniformBuffers == rhs.maxDescriptorSetUniformBuffers ) + && ( maxDescriptorSetUniformBuffersDynamic == rhs.maxDescriptorSetUniformBuffersDynamic ) + && ( maxDescriptorSetStorageBuffers == rhs.maxDescriptorSetStorageBuffers ) + && ( maxDescriptorSetStorageBuffersDynamic == rhs.maxDescriptorSetStorageBuffersDynamic ) + && ( maxDescriptorSetSampledImages == rhs.maxDescriptorSetSampledImages ) + && ( maxDescriptorSetStorageImages == rhs.maxDescriptorSetStorageImages ) + && ( maxDescriptorSetInputAttachments == rhs.maxDescriptorSetInputAttachments ) + && ( maxVertexInputAttributes == rhs.maxVertexInputAttributes ) + && ( maxVertexInputBindings == rhs.maxVertexInputBindings ) + && ( maxVertexInputAttributeOffset == rhs.maxVertexInputAttributeOffset ) + && ( maxVertexInputBindingStride == rhs.maxVertexInputBindingStride ) + && ( maxVertexOutputComponents == rhs.maxVertexOutputComponents ) + && ( maxTessellationGenerationLevel == rhs.maxTessellationGenerationLevel ) + && ( maxTessellationPatchSize == rhs.maxTessellationPatchSize ) + && ( maxTessellationControlPerVertexInputComponents == rhs.maxTessellationControlPerVertexInputComponents ) + && ( maxTessellationControlPerVertexOutputComponents == rhs.maxTessellationControlPerVertexOutputComponents ) + && ( maxTessellationControlPerPatchOutputComponents == rhs.maxTessellationControlPerPatchOutputComponents ) + && ( maxTessellationControlTotalOutputComponents == rhs.maxTessellationControlTotalOutputComponents ) + && ( maxTessellationEvaluationInputComponents == rhs.maxTessellationEvaluationInputComponents ) + && ( maxTessellationEvaluationOutputComponents == rhs.maxTessellationEvaluationOutputComponents ) + && ( maxGeometryShaderInvocations == rhs.maxGeometryShaderInvocations ) + && ( maxGeometryInputComponents == rhs.maxGeometryInputComponents ) + && ( maxGeometryOutputComponents == rhs.maxGeometryOutputComponents ) + && ( maxGeometryOutputVertices == rhs.maxGeometryOutputVertices ) + && ( maxGeometryTotalOutputComponents == rhs.maxGeometryTotalOutputComponents ) + && ( maxFragmentInputComponents == rhs.maxFragmentInputComponents ) + && ( maxFragmentOutputAttachments == rhs.maxFragmentOutputAttachments ) + && ( maxFragmentDualSrcAttachments == rhs.maxFragmentDualSrcAttachments ) + && ( maxFragmentCombinedOutputResources == rhs.maxFragmentCombinedOutputResources ) + && ( maxComputeSharedMemorySize == rhs.maxComputeSharedMemorySize ) + && ( maxComputeWorkGroupCount == rhs.maxComputeWorkGroupCount ) + && ( maxComputeWorkGroupInvocations == rhs.maxComputeWorkGroupInvocations ) + && ( maxComputeWorkGroupSize == rhs.maxComputeWorkGroupSize ) + && ( subPixelPrecisionBits == rhs.subPixelPrecisionBits ) + && ( subTexelPrecisionBits == rhs.subTexelPrecisionBits ) + && ( mipmapPrecisionBits == rhs.mipmapPrecisionBits ) + && ( maxDrawIndexedIndexValue == rhs.maxDrawIndexedIndexValue ) + && ( maxDrawIndirectCount == rhs.maxDrawIndirectCount ) + && ( maxSamplerLodBias == rhs.maxSamplerLodBias ) + && ( maxSamplerAnisotropy == rhs.maxSamplerAnisotropy ) + && ( maxViewports == rhs.maxViewports ) + && ( maxViewportDimensions == rhs.maxViewportDimensions ) + && ( viewportBoundsRange == rhs.viewportBoundsRange ) + && ( viewportSubPixelBits == rhs.viewportSubPixelBits ) + && ( minMemoryMapAlignment == rhs.minMemoryMapAlignment ) + && ( minTexelBufferOffsetAlignment == rhs.minTexelBufferOffsetAlignment ) + && ( minUniformBufferOffsetAlignment == rhs.minUniformBufferOffsetAlignment ) + && ( minStorageBufferOffsetAlignment == rhs.minStorageBufferOffsetAlignment ) + && ( minTexelOffset == rhs.minTexelOffset ) + && ( maxTexelOffset == rhs.maxTexelOffset ) + && ( minTexelGatherOffset == rhs.minTexelGatherOffset ) + && ( maxTexelGatherOffset == rhs.maxTexelGatherOffset ) + && ( minInterpolationOffset == rhs.minInterpolationOffset ) + && ( maxInterpolationOffset == rhs.maxInterpolationOffset ) + && ( subPixelInterpolationOffsetBits == rhs.subPixelInterpolationOffsetBits ) + && ( maxFramebufferWidth == rhs.maxFramebufferWidth ) + && ( maxFramebufferHeight == rhs.maxFramebufferHeight ) + && ( maxFramebufferLayers == rhs.maxFramebufferLayers ) + && ( framebufferColorSampleCounts == rhs.framebufferColorSampleCounts ) + && ( framebufferDepthSampleCounts == rhs.framebufferDepthSampleCounts ) + && ( framebufferStencilSampleCounts == rhs.framebufferStencilSampleCounts ) + && ( framebufferNoAttachmentsSampleCounts == rhs.framebufferNoAttachmentsSampleCounts ) + && ( maxColorAttachments == rhs.maxColorAttachments ) + && ( sampledImageColorSampleCounts == rhs.sampledImageColorSampleCounts ) + && ( sampledImageIntegerSampleCounts == rhs.sampledImageIntegerSampleCounts ) + && ( sampledImageDepthSampleCounts == rhs.sampledImageDepthSampleCounts ) + && ( sampledImageStencilSampleCounts == rhs.sampledImageStencilSampleCounts ) + && ( storageImageSampleCounts == rhs.storageImageSampleCounts ) + && ( maxSampleMaskWords == rhs.maxSampleMaskWords ) + && ( timestampComputeAndGraphics == rhs.timestampComputeAndGraphics ) + && ( timestampPeriod == rhs.timestampPeriod ) + && ( maxClipDistances == rhs.maxClipDistances ) + && ( maxCullDistances == rhs.maxCullDistances ) + && ( maxCombinedClipAndCullDistances == rhs.maxCombinedClipAndCullDistances ) + && ( discreteQueuePriorities == rhs.discreteQueuePriorities ) + && ( pointSizeRange == rhs.pointSizeRange ) + && ( lineWidthRange == rhs.lineWidthRange ) + && ( pointSizeGranularity == rhs.pointSizeGranularity ) + && ( lineWidthGranularity == rhs.lineWidthGranularity ) + && ( strictLines == rhs.strictLines ) + && ( standardSampleLocations == rhs.standardSampleLocations ) + && ( optimalBufferCopyOffsetAlignment == rhs.optimalBufferCopyOffsetAlignment ) + && ( optimalBufferCopyRowPitchAlignment == rhs.optimalBufferCopyRowPitchAlignment ) + && ( nonCoherentAtomSize == rhs.nonCoherentAtomSize ); + } + + bool operator!=( PhysicalDeviceLimits const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t maxImageDimension1D = {}; + uint32_t maxImageDimension2D = {}; + uint32_t maxImageDimension3D = {}; + uint32_t maxImageDimensionCube = {}; + uint32_t maxImageArrayLayers = {}; + uint32_t maxTexelBufferElements = {}; + uint32_t maxUniformBufferRange = {}; + uint32_t maxStorageBufferRange = {}; + uint32_t maxPushConstantsSize = {}; + uint32_t maxMemoryAllocationCount = {}; + uint32_t maxSamplerAllocationCount = {}; + VULKAN_HPP_NAMESPACE::DeviceSize bufferImageGranularity = {}; + VULKAN_HPP_NAMESPACE::DeviceSize sparseAddressSpaceSize = {}; + uint32_t maxBoundDescriptorSets = {}; + uint32_t maxPerStageDescriptorSamplers = {}; + uint32_t maxPerStageDescriptorUniformBuffers = {}; + uint32_t maxPerStageDescriptorStorageBuffers = {}; + uint32_t maxPerStageDescriptorSampledImages = {}; + uint32_t maxPerStageDescriptorStorageImages = {}; + uint32_t maxPerStageDescriptorInputAttachments = {}; + uint32_t maxPerStageResources = {}; + uint32_t maxDescriptorSetSamplers = {}; + uint32_t maxDescriptorSetUniformBuffers = {}; + uint32_t maxDescriptorSetUniformBuffersDynamic = {}; + uint32_t maxDescriptorSetStorageBuffers = {}; + uint32_t maxDescriptorSetStorageBuffersDynamic = {}; + uint32_t maxDescriptorSetSampledImages = {}; + uint32_t maxDescriptorSetStorageImages = {}; + uint32_t maxDescriptorSetInputAttachments = {}; + uint32_t maxVertexInputAttributes = {}; + uint32_t maxVertexInputBindings = {}; + uint32_t maxVertexInputAttributeOffset = {}; + uint32_t maxVertexInputBindingStride = {}; + uint32_t maxVertexOutputComponents = {}; + uint32_t maxTessellationGenerationLevel = {}; + uint32_t maxTessellationPatchSize = {}; + uint32_t maxTessellationControlPerVertexInputComponents = {}; + uint32_t maxTessellationControlPerVertexOutputComponents = {}; + uint32_t maxTessellationControlPerPatchOutputComponents = {}; + uint32_t maxTessellationControlTotalOutputComponents = {}; + uint32_t maxTessellationEvaluationInputComponents = {}; + uint32_t maxTessellationEvaluationOutputComponents = {}; + uint32_t maxGeometryShaderInvocations = {}; + uint32_t maxGeometryInputComponents = {}; + uint32_t maxGeometryOutputComponents = {}; + uint32_t maxGeometryOutputVertices = {}; + uint32_t maxGeometryTotalOutputComponents = {}; + uint32_t maxFragmentInputComponents = {}; + uint32_t maxFragmentOutputAttachments = {}; + uint32_t maxFragmentDualSrcAttachments = {}; + uint32_t maxFragmentCombinedOutputResources = {}; + uint32_t maxComputeSharedMemorySize = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D maxComputeWorkGroupCount = {}; + uint32_t maxComputeWorkGroupInvocations = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D maxComputeWorkGroupSize = {}; + uint32_t subPixelPrecisionBits = {}; + uint32_t subTexelPrecisionBits = {}; + uint32_t mipmapPrecisionBits = {}; + uint32_t maxDrawIndexedIndexValue = {}; + uint32_t maxDrawIndirectCount = {}; + float maxSamplerLodBias = {}; + float maxSamplerAnisotropy = {}; + uint32_t maxViewports = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D maxViewportDimensions = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D viewportBoundsRange = {}; + uint32_t viewportSubPixelBits = {}; + size_t minMemoryMapAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize minTexelBufferOffsetAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize minUniformBufferOffsetAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize minStorageBufferOffsetAlignment = {}; + int32_t minTexelOffset = {}; + uint32_t maxTexelOffset = {}; + int32_t minTexelGatherOffset = {}; + uint32_t maxTexelGatherOffset = {}; + float minInterpolationOffset = {}; + float maxInterpolationOffset = {}; + uint32_t subPixelInterpolationOffsetBits = {}; + uint32_t maxFramebufferWidth = {}; + uint32_t maxFramebufferHeight = {}; + uint32_t maxFramebufferLayers = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferColorSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferDepthSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferStencilSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferNoAttachmentsSampleCounts = {}; + uint32_t maxColorAttachments = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageColorSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageIntegerSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageDepthSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampledImageStencilSampleCounts = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags storageImageSampleCounts = {}; + uint32_t maxSampleMaskWords = {}; + VULKAN_HPP_NAMESPACE::Bool32 timestampComputeAndGraphics = {}; + float timestampPeriod = {}; + uint32_t maxClipDistances = {}; + uint32_t maxCullDistances = {}; + uint32_t maxCombinedClipAndCullDistances = {}; + uint32_t discreteQueuePriorities = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D pointSizeRange = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D lineWidthRange = {}; + float pointSizeGranularity = {}; + float lineWidthGranularity = {}; + VULKAN_HPP_NAMESPACE::Bool32 strictLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 standardSampleLocations = {}; + VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyOffsetAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize optimalBufferCopyRowPitchAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize nonCoherentAtomSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceLimits ) == sizeof( VkPhysicalDeviceLimits ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceSparseProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties(VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape_ = {}, VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape_ = {}, VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape_ = {}, VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize_ = {}, VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict_ = {}) VULKAN_HPP_NOEXCEPT + : residencyStandard2DBlockShape( residencyStandard2DBlockShape_ ), residencyStandard2DMultisampleBlockShape( residencyStandard2DMultisampleBlockShape_ ), residencyStandard3DBlockShape( residencyStandard3DBlockShape_ ), residencyAlignedMipSize( residencyAlignedMipSize_ ), residencyNonResidentStrict( residencyNonResidentStrict_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseProperties( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSparseProperties( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSparseProperties & operator=( VkPhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSparseProperties & operator=( PhysicalDeviceSparseProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSparseProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceSparseProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSparseProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSparseProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceSparseProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( residencyStandard2DBlockShape == rhs.residencyStandard2DBlockShape ) + && ( residencyStandard2DMultisampleBlockShape == rhs.residencyStandard2DMultisampleBlockShape ) + && ( residencyStandard3DBlockShape == rhs.residencyStandard3DBlockShape ) + && ( residencyAlignedMipSize == rhs.residencyAlignedMipSize ) + && ( residencyNonResidentStrict == rhs.residencyNonResidentStrict ); + } + + bool operator!=( PhysicalDeviceSparseProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard2DMultisampleBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyStandard3DBlockShape = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyAlignedMipSize = {}; + VULKAN_HPP_NAMESPACE::Bool32 residencyNonResidentStrict = {}; + + }; + static_assert( sizeof( PhysicalDeviceSparseProperties ) == sizeof( VkPhysicalDeviceSparseProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties(uint32_t apiVersion_ = {}, uint32_t driverVersion_ = {}, uint32_t vendorID_ = {}, uint32_t deviceID_ = {}, VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType_ = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther, std::array const& deviceName_ = {}, std::array const& pipelineCacheUUID_ = {}, VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits_ = {}, VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties_ = {}) VULKAN_HPP_NOEXCEPT + : apiVersion( apiVersion_ ), driverVersion( driverVersion_ ), vendorID( vendorID_ ), deviceID( deviceID_ ), deviceType( deviceType_ ), deviceName( deviceName_ ), pipelineCacheUUID( pipelineCacheUUID_ ), limits( limits_ ), sparseProperties( sparseProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProperties( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceProperties & operator=( VkPhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceProperties & operator=( PhysicalDeviceProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( apiVersion == rhs.apiVersion ) + && ( driverVersion == rhs.driverVersion ) + && ( vendorID == rhs.vendorID ) + && ( deviceID == rhs.deviceID ) + && ( deviceType == rhs.deviceType ) + && ( deviceName == rhs.deviceName ) + && ( pipelineCacheUUID == rhs.pipelineCacheUUID ) + && ( limits == rhs.limits ) + && ( sparseProperties == rhs.sparseProperties ); + } + + bool operator!=( PhysicalDeviceProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t apiVersion = {}; + uint32_t driverVersion = {}; + uint32_t vendorID = {}; + uint32_t deviceID = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceType deviceType = VULKAN_HPP_NAMESPACE::PhysicalDeviceType::eOther; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D pipelineCacheUUID = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceLimits limits = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseProperties sparseProperties = {}; + + }; + static_assert( sizeof( PhysicalDeviceProperties ) == sizeof( VkPhysicalDeviceProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PhysicalDeviceProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2(VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties_ = {}) VULKAN_HPP_NOEXCEPT + : properties( properties_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceProperties2( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProperties2( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceProperties2 & operator=( VkPhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceProperties2 & operator=( PhysicalDeviceProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceProperties2 ) ); + return *this; + } + + + operator VkPhysicalDeviceProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceProperties2 const& ) const = default; +#else + bool operator==( PhysicalDeviceProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( properties == rhs.properties ); + } + + bool operator!=( PhysicalDeviceProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties = {}; + + }; + static_assert( sizeof( PhysicalDeviceProperties2 ) == sizeof( VkPhysicalDeviceProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceProperties2; + }; + using PhysicalDeviceProperties2KHR = PhysicalDeviceProperties2; + + struct QueryPoolPerformanceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueryPoolPerformanceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueryPoolPerformanceCreateInfoKHR(uint32_t queueFamilyIndex_ = {}, uint32_t counterIndexCount_ = {}, const uint32_t* pCounterIndices_ = {}) VULKAN_HPP_NOEXCEPT + : queueFamilyIndex( queueFamilyIndex_ ), counterIndexCount( counterIndexCount_ ), pCounterIndices( pCounterIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR QueryPoolPerformanceCreateInfoKHR( QueryPoolPerformanceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueryPoolPerformanceCreateInfoKHR( VkQueryPoolPerformanceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + QueryPoolPerformanceCreateInfoKHR( uint32_t queueFamilyIndex_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & counterIndices_ ) + : queueFamilyIndex( queueFamilyIndex_ ), counterIndexCount( static_cast( counterIndices_.size() ) ), pCounterIndices( counterIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueryPoolPerformanceCreateInfoKHR & operator=( VkQueryPoolPerformanceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueryPoolPerformanceCreateInfoKHR & operator=( QueryPoolPerformanceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueryPoolPerformanceCreateInfoKHR ) ); + return *this; + } + + QueryPoolPerformanceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + QueryPoolPerformanceCreateInfoKHR & setQueueFamilyIndex( uint32_t queueFamilyIndex_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndex = queueFamilyIndex_; + return *this; + } + + QueryPoolPerformanceCreateInfoKHR & setCounterIndexCount( uint32_t counterIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + counterIndexCount = counterIndexCount_; + return *this; + } + + QueryPoolPerformanceCreateInfoKHR & setPCounterIndices( const uint32_t* pCounterIndices_ ) VULKAN_HPP_NOEXCEPT + { + pCounterIndices = pCounterIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + QueryPoolPerformanceCreateInfoKHR & setCounterIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & counterIndices_ ) VULKAN_HPP_NOEXCEPT + { + counterIndexCount = static_cast( counterIndices_.size() ); + pCounterIndices = counterIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkQueryPoolPerformanceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueryPoolPerformanceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueryPoolPerformanceCreateInfoKHR const& ) const = default; +#else + bool operator==( QueryPoolPerformanceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( queueFamilyIndex == rhs.queueFamilyIndex ) + && ( counterIndexCount == rhs.counterIndexCount ) + && ( pCounterIndices == rhs.pCounterIndices ); + } + + bool operator!=( QueryPoolPerformanceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueryPoolPerformanceCreateInfoKHR; + const void* pNext = {}; + uint32_t queueFamilyIndex = {}; + uint32_t counterIndexCount = {}; + const uint32_t* pCounterIndices = {}; + + }; + static_assert( sizeof( QueryPoolPerformanceCreateInfoKHR ) == sizeof( VkQueryPoolPerformanceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = QueryPoolPerformanceCreateInfoKHR; + }; + + struct QueueFamilyProperties + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueueFamilyProperties(VULKAN_HPP_NAMESPACE::QueueFlags queueFlags_ = {}, uint32_t queueCount_ = {}, uint32_t timestampValidBits_ = {}, VULKAN_HPP_NAMESPACE::Extent3D minImageTransferGranularity_ = {}) VULKAN_HPP_NOEXCEPT + : queueFlags( queueFlags_ ), queueCount( queueCount_ ), timestampValidBits( timestampValidBits_ ), minImageTransferGranularity( minImageTransferGranularity_ ) + {} + + VULKAN_HPP_CONSTEXPR QueueFamilyProperties( QueueFamilyProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueueFamilyProperties( VkQueueFamilyProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueueFamilyProperties & operator=( VkQueueFamilyProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueueFamilyProperties & operator=( QueueFamilyProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueueFamilyProperties ) ); + return *this; + } + + + operator VkQueueFamilyProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueueFamilyProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueueFamilyProperties const& ) const = default; +#else + bool operator==( QueueFamilyProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( queueFlags == rhs.queueFlags ) + && ( queueCount == rhs.queueCount ) + && ( timestampValidBits == rhs.timestampValidBits ) + && ( minImageTransferGranularity == rhs.minImageTransferGranularity ); + } + + bool operator!=( QueueFamilyProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::QueueFlags queueFlags = {}; + uint32_t queueCount = {}; + uint32_t timestampValidBits = {}; + VULKAN_HPP_NAMESPACE::Extent3D minImageTransferGranularity = {}; + + }; + static_assert( sizeof( QueueFamilyProperties ) == sizeof( VkQueueFamilyProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct QueueFamilyProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueueFamilyProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueueFamilyProperties2(VULKAN_HPP_NAMESPACE::QueueFamilyProperties queueFamilyProperties_ = {}) VULKAN_HPP_NOEXCEPT + : queueFamilyProperties( queueFamilyProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR QueueFamilyProperties2( QueueFamilyProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueueFamilyProperties2( VkQueueFamilyProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueueFamilyProperties2 & operator=( VkQueueFamilyProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueueFamilyProperties2 & operator=( QueueFamilyProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueueFamilyProperties2 ) ); + return *this; + } + + + operator VkQueueFamilyProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueueFamilyProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueueFamilyProperties2 const& ) const = default; +#else + bool operator==( QueueFamilyProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( queueFamilyProperties == rhs.queueFamilyProperties ); + } + + bool operator!=( QueueFamilyProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueueFamilyProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::QueueFamilyProperties queueFamilyProperties = {}; + + }; + static_assert( sizeof( QueueFamilyProperties2 ) == sizeof( VkQueueFamilyProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = QueueFamilyProperties2; + }; + using QueueFamilyProperties2KHR = QueueFamilyProperties2; + + struct PhysicalDeviceSparseImageFormatInfo2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSparseImageFormatInfo2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseImageFormatInfo2(VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ImageType type_ = VULKAN_HPP_NAMESPACE::ImageType::e1D, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, VULKAN_HPP_NAMESPACE::ImageTiling tiling_ = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal) VULKAN_HPP_NOEXCEPT + : format( format_ ), type( type_ ), samples( samples_ ), usage( usage_ ), tiling( tiling_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSparseImageFormatInfo2( PhysicalDeviceSparseImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSparseImageFormatInfo2( VkPhysicalDeviceSparseImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSparseImageFormatInfo2 & operator=( VkPhysicalDeviceSparseImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & operator=( PhysicalDeviceSparseImageFormatInfo2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSparseImageFormatInfo2 ) ); + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setType( VULKAN_HPP_NAMESPACE::ImageType type_ ) VULKAN_HPP_NOEXCEPT + { + type = type_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setSamples( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples_ ) VULKAN_HPP_NOEXCEPT + { + samples = samples_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + PhysicalDeviceSparseImageFormatInfo2 & setTiling( VULKAN_HPP_NAMESPACE::ImageTiling tiling_ ) VULKAN_HPP_NOEXCEPT + { + tiling = tiling_; + return *this; + } + + + operator VkPhysicalDeviceSparseImageFormatInfo2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSparseImageFormatInfo2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSparseImageFormatInfo2 const& ) const = default; +#else + bool operator==( PhysicalDeviceSparseImageFormatInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( format == rhs.format ) + && ( type == rhs.type ) + && ( samples == rhs.samples ) + && ( usage == rhs.usage ) + && ( tiling == rhs.tiling ); + } + + bool operator!=( PhysicalDeviceSparseImageFormatInfo2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSparseImageFormatInfo2; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::ImageType type = VULKAN_HPP_NAMESPACE::ImageType::e1D; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage = {}; + VULKAN_HPP_NAMESPACE::ImageTiling tiling = VULKAN_HPP_NAMESPACE::ImageTiling::eOptimal; + + }; + static_assert( sizeof( PhysicalDeviceSparseImageFormatInfo2 ) == sizeof( VkPhysicalDeviceSparseImageFormatInfo2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSparseImageFormatInfo2; + }; + using PhysicalDeviceSparseImageFormatInfo2KHR = PhysicalDeviceSparseImageFormatInfo2; + + struct SparseImageFormatProperties2 + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSparseImageFormatProperties2; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SparseImageFormatProperties2(VULKAN_HPP_NAMESPACE::SparseImageFormatProperties properties_ = {}) VULKAN_HPP_NOEXCEPT + : properties( properties_ ) + {} + + VULKAN_HPP_CONSTEXPR SparseImageFormatProperties2( SparseImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SparseImageFormatProperties2( VkSparseImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SparseImageFormatProperties2 & operator=( VkSparseImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SparseImageFormatProperties2 & operator=( SparseImageFormatProperties2 const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SparseImageFormatProperties2 ) ); + return *this; + } + + + operator VkSparseImageFormatProperties2 const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSparseImageFormatProperties2 &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SparseImageFormatProperties2 const& ) const = default; +#else + bool operator==( SparseImageFormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( properties == rhs.properties ); + } + + bool operator!=( SparseImageFormatProperties2 const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSparseImageFormatProperties2; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SparseImageFormatProperties properties = {}; + + }; + static_assert( sizeof( SparseImageFormatProperties2 ) == sizeof( VkSparseImageFormatProperties2 ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SparseImageFormatProperties2; + }; + using SparseImageFormatProperties2KHR = SparseImageFormatProperties2; + + struct FramebufferMixedSamplesCombinationNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFramebufferMixedSamplesCombinationNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FramebufferMixedSamplesCombinationNV(VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode_ = VULKAN_HPP_NAMESPACE::CoverageReductionModeNV::eMerge, VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::SampleCountFlags depthStencilSamples_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags colorSamples_ = {}) VULKAN_HPP_NOEXCEPT + : coverageReductionMode( coverageReductionMode_ ), rasterizationSamples( rasterizationSamples_ ), depthStencilSamples( depthStencilSamples_ ), colorSamples( colorSamples_ ) + {} + + VULKAN_HPP_CONSTEXPR FramebufferMixedSamplesCombinationNV( FramebufferMixedSamplesCombinationNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FramebufferMixedSamplesCombinationNV( VkFramebufferMixedSamplesCombinationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FramebufferMixedSamplesCombinationNV & operator=( VkFramebufferMixedSamplesCombinationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FramebufferMixedSamplesCombinationNV & operator=( FramebufferMixedSamplesCombinationNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FramebufferMixedSamplesCombinationNV ) ); + return *this; + } + + + operator VkFramebufferMixedSamplesCombinationNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFramebufferMixedSamplesCombinationNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FramebufferMixedSamplesCombinationNV const& ) const = default; +#else + bool operator==( FramebufferMixedSamplesCombinationNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( coverageReductionMode == rhs.coverageReductionMode ) + && ( rasterizationSamples == rhs.rasterizationSamples ) + && ( depthStencilSamples == rhs.depthStencilSamples ) + && ( colorSamples == rhs.colorSamples ); + } + + bool operator!=( FramebufferMixedSamplesCombinationNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFramebufferMixedSamplesCombinationNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode = VULKAN_HPP_NAMESPACE::CoverageReductionModeNV::eMerge; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits rasterizationSamples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::SampleCountFlags depthStencilSamples = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags colorSamples = {}; + + }; + static_assert( sizeof( FramebufferMixedSamplesCombinationNV ) == sizeof( VkFramebufferMixedSamplesCombinationNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FramebufferMixedSamplesCombinationNV; + }; + + struct SurfaceCapabilities2EXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceCapabilities2EXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceCapabilities2EXT(uint32_t minImageCount_ = {}, uint32_t maxImageCount_ = {}, VULKAN_HPP_NAMESPACE::Extent2D currentExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D minImageExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxImageExtent_ = {}, uint32_t maxImageArrayLayers_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR currentTransform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::CompositeAlphaFlagsKHR supportedCompositeAlpha_ = {}, VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags_ = {}, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT supportedSurfaceCounters_ = {}) VULKAN_HPP_NOEXCEPT + : minImageCount( minImageCount_ ), maxImageCount( maxImageCount_ ), currentExtent( currentExtent_ ), minImageExtent( minImageExtent_ ), maxImageExtent( maxImageExtent_ ), maxImageArrayLayers( maxImageArrayLayers_ ), supportedTransforms( supportedTransforms_ ), currentTransform( currentTransform_ ), supportedCompositeAlpha( supportedCompositeAlpha_ ), supportedUsageFlags( supportedUsageFlags_ ), supportedSurfaceCounters( supportedSurfaceCounters_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceCapabilities2EXT( SurfaceCapabilities2EXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceCapabilities2EXT( VkSurfaceCapabilities2EXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceCapabilities2EXT & operator=( VkSurfaceCapabilities2EXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceCapabilities2EXT & operator=( SurfaceCapabilities2EXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceCapabilities2EXT ) ); + return *this; + } + + + operator VkSurfaceCapabilities2EXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceCapabilities2EXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceCapabilities2EXT const& ) const = default; +#else + bool operator==( SurfaceCapabilities2EXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minImageCount == rhs.minImageCount ) + && ( maxImageCount == rhs.maxImageCount ) + && ( currentExtent == rhs.currentExtent ) + && ( minImageExtent == rhs.minImageExtent ) + && ( maxImageExtent == rhs.maxImageExtent ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( currentTransform == rhs.currentTransform ) + && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha ) + && ( supportedUsageFlags == rhs.supportedUsageFlags ) + && ( supportedSurfaceCounters == rhs.supportedSurfaceCounters ); + } + + bool operator!=( SurfaceCapabilities2EXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceCapabilities2EXT; + void* pNext = {}; + uint32_t minImageCount = {}; + uint32_t maxImageCount = {}; + VULKAN_HPP_NAMESPACE::Extent2D currentExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D minImageExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxImageExtent = {}; + uint32_t maxImageArrayLayers = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR currentTransform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + VULKAN_HPP_NAMESPACE::CompositeAlphaFlagsKHR supportedCompositeAlpha = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags = {}; + VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT supportedSurfaceCounters = {}; + + }; + static_assert( sizeof( SurfaceCapabilities2EXT ) == sizeof( VkSurfaceCapabilities2EXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceCapabilities2EXT; + }; + + struct SurfaceCapabilitiesKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesKHR(uint32_t minImageCount_ = {}, uint32_t maxImageCount_ = {}, VULKAN_HPP_NAMESPACE::Extent2D currentExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D minImageExtent_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxImageExtent_ = {}, uint32_t maxImageArrayLayers_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR currentTransform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, VULKAN_HPP_NAMESPACE::CompositeAlphaFlagsKHR supportedCompositeAlpha_ = {}, VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags_ = {}) VULKAN_HPP_NOEXCEPT + : minImageCount( minImageCount_ ), maxImageCount( maxImageCount_ ), currentExtent( currentExtent_ ), minImageExtent( minImageExtent_ ), maxImageExtent( maxImageExtent_ ), maxImageArrayLayers( maxImageArrayLayers_ ), supportedTransforms( supportedTransforms_ ), currentTransform( currentTransform_ ), supportedCompositeAlpha( supportedCompositeAlpha_ ), supportedUsageFlags( supportedUsageFlags_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesKHR( SurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceCapabilitiesKHR( VkSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceCapabilitiesKHR & operator=( VkSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceCapabilitiesKHR & operator=( SurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceCapabilitiesKHR ) ); + return *this; + } + + + operator VkSurfaceCapabilitiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceCapabilitiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceCapabilitiesKHR const& ) const = default; +#else + bool operator==( SurfaceCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( minImageCount == rhs.minImageCount ) + && ( maxImageCount == rhs.maxImageCount ) + && ( currentExtent == rhs.currentExtent ) + && ( minImageExtent == rhs.minImageExtent ) + && ( maxImageExtent == rhs.maxImageExtent ) + && ( maxImageArrayLayers == rhs.maxImageArrayLayers ) + && ( supportedTransforms == rhs.supportedTransforms ) + && ( currentTransform == rhs.currentTransform ) + && ( supportedCompositeAlpha == rhs.supportedCompositeAlpha ) + && ( supportedUsageFlags == rhs.supportedUsageFlags ); + } + + bool operator!=( SurfaceCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t minImageCount = {}; + uint32_t maxImageCount = {}; + VULKAN_HPP_NAMESPACE::Extent2D currentExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D minImageExtent = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxImageExtent = {}; + uint32_t maxImageArrayLayers = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagsKHR supportedTransforms = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR currentTransform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + VULKAN_HPP_NAMESPACE::CompositeAlphaFlagsKHR supportedCompositeAlpha = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags supportedUsageFlags = {}; + + }; + static_assert( sizeof( SurfaceCapabilitiesKHR ) == sizeof( VkSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SurfaceCapabilities2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceCapabilities2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceCapabilities2KHR(VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities_ = {}) VULKAN_HPP_NOEXCEPT + : surfaceCapabilities( surfaceCapabilities_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceCapabilities2KHR( SurfaceCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceCapabilities2KHR( VkSurfaceCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceCapabilities2KHR & operator=( VkSurfaceCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceCapabilities2KHR & operator=( SurfaceCapabilities2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceCapabilities2KHR ) ); + return *this; + } + + + operator VkSurfaceCapabilities2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceCapabilities2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceCapabilities2KHR const& ) const = default; +#else + bool operator==( SurfaceCapabilities2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceCapabilities == rhs.surfaceCapabilities ); + } + + bool operator!=( SurfaceCapabilities2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceCapabilities2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities = {}; + + }; + static_assert( sizeof( SurfaceCapabilities2KHR ) == sizeof( VkSurfaceCapabilities2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceCapabilities2KHR; + }; + + struct SurfaceFormatKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceFormatKHR(VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined, VULKAN_HPP_NAMESPACE::ColorSpaceKHR colorSpace_ = VULKAN_HPP_NAMESPACE::ColorSpaceKHR::eSrgbNonlinear) VULKAN_HPP_NOEXCEPT + : format( format_ ), colorSpace( colorSpace_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceFormatKHR( SurfaceFormatKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceFormatKHR( VkSurfaceFormatKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceFormatKHR & operator=( VkSurfaceFormatKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceFormatKHR & operator=( SurfaceFormatKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceFormatKHR ) ); + return *this; + } + + + operator VkSurfaceFormatKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceFormatKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceFormatKHR const& ) const = default; +#else + bool operator==( SurfaceFormatKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( format == rhs.format ) + && ( colorSpace == rhs.colorSpace ); + } + + bool operator!=( SurfaceFormatKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + VULKAN_HPP_NAMESPACE::ColorSpaceKHR colorSpace = VULKAN_HPP_NAMESPACE::ColorSpaceKHR::eSrgbNonlinear; + + }; + static_assert( sizeof( SurfaceFormatKHR ) == sizeof( VkSurfaceFormatKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SurfaceFormat2KHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceFormat2KHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceFormat2KHR(VULKAN_HPP_NAMESPACE::SurfaceFormatKHR surfaceFormat_ = {}) VULKAN_HPP_NOEXCEPT + : surfaceFormat( surfaceFormat_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceFormat2KHR( SurfaceFormat2KHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceFormat2KHR( VkSurfaceFormat2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceFormat2KHR & operator=( VkSurfaceFormat2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceFormat2KHR & operator=( SurfaceFormat2KHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceFormat2KHR ) ); + return *this; + } + + + operator VkSurfaceFormat2KHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceFormat2KHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceFormat2KHR const& ) const = default; +#else + bool operator==( SurfaceFormat2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceFormat == rhs.surfaceFormat ); + } + + bool operator!=( SurfaceFormat2KHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceFormat2KHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceFormatKHR surfaceFormat = {}; + + }; + static_assert( sizeof( SurfaceFormat2KHR ) == sizeof( VkSurfaceFormat2KHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceFormat2KHR; + }; + + struct PhysicalDeviceToolPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceToolPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceToolPropertiesEXT(std::array const& name_ = {}, std::array const& version_ = {}, VULKAN_HPP_NAMESPACE::ToolPurposeFlagsEXT purposes_ = {}, std::array const& description_ = {}, std::array const& layer_ = {}) VULKAN_HPP_NOEXCEPT + : name( name_ ), version( version_ ), purposes( purposes_ ), description( description_ ), layer( layer_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceToolPropertiesEXT( PhysicalDeviceToolPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceToolPropertiesEXT( VkPhysicalDeviceToolPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceToolPropertiesEXT & operator=( VkPhysicalDeviceToolPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceToolPropertiesEXT & operator=( PhysicalDeviceToolPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceToolPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceToolPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceToolPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceToolPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceToolPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( name == rhs.name ) + && ( version == rhs.version ) + && ( purposes == rhs.purposes ) + && ( description == rhs.description ) + && ( layer == rhs.layer ); + } + + bool operator!=( PhysicalDeviceToolPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceToolPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D name = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D version = {}; + VULKAN_HPP_NAMESPACE::ToolPurposeFlagsEXT purposes = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D description = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D layer = {}; + + }; + static_assert( sizeof( PhysicalDeviceToolPropertiesEXT ) == sizeof( VkPhysicalDeviceToolPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceToolPropertiesEXT; + }; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDevice = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class PhysicalDevice + { + public: + using CType = VkPhysicalDevice; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::ePhysicalDevice; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::ePhysicalDevice; + + public: + VULKAN_HPP_CONSTEXPR PhysicalDevice() VULKAN_HPP_NOEXCEPT + : m_physicalDevice(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevice( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_physicalDevice(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT PhysicalDevice( VkPhysicalDevice physicalDevice ) VULKAN_HPP_NOEXCEPT + : m_physicalDevice( physicalDevice ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + PhysicalDevice & operator=(VkPhysicalDevice physicalDevice) VULKAN_HPP_NOEXCEPT + { + m_physicalDevice = physicalDevice; + return *this; + } +#endif + + PhysicalDevice & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_physicalDevice = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevice const& ) const = default; +#else + bool operator==( PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice == rhs.m_physicalDevice; + } + + bool operator!=(PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice != rhs.m_physicalDevice; + } + + bool operator<(PhysicalDevice const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice < rhs.m_physicalDevice; + } +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + template + VULKAN_HPP_NODISCARD Result acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + + template + VULKAN_HPP_NODISCARD Result createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDevice( const DeviceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDeviceUnique( const DeviceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDisplayModeKHRUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateDeviceExtensionProperties( Optional layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ExtensionPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateDeviceExtensionProperties( Optional layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateDeviceLayerProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = LayerPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateDeviceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy const &counters, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy const &counters, Allocator const& vectorAllocator, Dispatch const &d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename PerformanceCounterDescriptionKHRAllocator = std::allocator, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType, std::vector>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename PerformanceCounterDescriptionKHRAllocator = std::allocator, typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B1 = PerformanceCounterKHRAllocator, typename B2 = PerformanceCounterDescriptionKHRAllocator, typename std::enable_if::value && std::is_same::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType, std::vector>>::type enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & performanceCounterKHRAllocator, PerformanceCounterDescriptionKHRAllocator & performanceCounterDescriptionKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayModeProperties2KHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModeProperties2KHRAllocator & displayModeProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayModePropertiesKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModePropertiesKHRAllocator & displayModePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, DisplayKHRAllocator & displayKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getCalibrateableTimeDomainsEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = TimeDomainEXTAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getCalibrateableTimeDomainsEXT( TimeDomainEXTAllocator & timeDomainEXTAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getCooperativeMatrixPropertiesNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = CooperativeMatrixPropertiesNVAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getCooperativeMatrixPropertiesNV( CooperativeMatrixPropertiesNVAllocator & cooperativeMatrixPropertiesNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + template + Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlaneProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPlaneProperties2KHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlaneProperties2KHR( DisplayPlaneProperties2KHRAllocator & displayPlaneProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlanePropertiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPlanePropertiesKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPlanePropertiesKHR( DisplayPlanePropertiesKHRAllocator & displayPlanePropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayProperties2KHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayProperties2KHR( DisplayProperties2KHRAllocator & displayProperties2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPropertiesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = DisplayPropertiesKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getDisplayPropertiesKHR( DisplayPropertiesKHRAllocator & displayPropertiesKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalBufferProperties getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalFenceProperties getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures getFeatures( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getFeatures2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 getFeatures2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getFeatures2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::FormatProperties2 getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getFragmentShadingRatesKHR( uint32_t* pFragmentShadingRateCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getFragmentShadingRatesKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceFragmentShadingRateKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getFragmentShadingRatesKHR( PhysicalDeviceFragmentShadingRateKHRAllocator & physicalDeviceFragmentShadingRateKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties getMemoryProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getMemoryProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 getMemoryProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getMemoryProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = Rect2DAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Rect2DAllocator & rect2DAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties getProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 getProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; + template + VULKAN_HPP_NODISCARD StructureChain getProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD uint32_t getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties( QueueFamilyPropertiesAllocator & queueFamilyPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyProperties2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = StructureChainAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2( StructureChainAllocator & structureChainAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = QueueFamilyProperties2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2KHR( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2KHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = StructureChainAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getQueueFamilyProperties2KHR( StructureChainAllocator & structureChainAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, SparseImageFormatPropertiesAllocator & sparseImageFormatPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatProperties2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + void getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SparseImageFormatProperties2Allocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD std::vector getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSupportedFramebufferMixedSamplesCombinationsNV( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = FramebufferMixedSamplesCombinationNVAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSupportedFramebufferMixedSamplesCombinationsNV( FramebufferMixedSamplesCombinationNVAllocator & framebufferMixedSamplesCombinationNVAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType>::type getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SurfaceFormat2KHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, SurfaceFormat2KHRAllocator & surfaceFormat2KHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = SurfaceFormatKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, SurfaceFormatKHRAllocator & surfaceFormatKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PresentModeKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PresentModeKHRAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getToolPropertiesEXT( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceToolPropertiesEXTAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type getToolPropertiesEXT( PhysicalDeviceToolPropertiesEXTAllocator & physicalDeviceToolPropertiesEXTAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + Bool32 getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Bool32 getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + template + VULKAN_HPP_NODISCARD Result getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type getRandROutputDisplayEXTUnique( Display & dpy, RROutput rrOutput, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + Result releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#else + template + typename ResultValueType::type releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkPhysicalDevice() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_physicalDevice == VK_NULL_HANDLE; + } + + private: + VkPhysicalDevice m_physicalDevice; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::PhysicalDevice ) == sizeof( VkPhysicalDevice ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::PhysicalDevice; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PhysicalDevice; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::PhysicalDevice; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + struct DeviceGroupDeviceCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupDeviceCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupDeviceCreateInfo(uint32_t physicalDeviceCount_ = {}, const VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices_ = {}) VULKAN_HPP_NOEXCEPT + : physicalDeviceCount( physicalDeviceCount_ ), pPhysicalDevices( pPhysicalDevices_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupDeviceCreateInfo( DeviceGroupDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupDeviceCreateInfo( VkDeviceGroupDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupDeviceCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & physicalDevices_ ) + : physicalDeviceCount( static_cast( physicalDevices_.size() ) ), pPhysicalDevices( physicalDevices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupDeviceCreateInfo & operator=( VkDeviceGroupDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupDeviceCreateInfo & operator=( DeviceGroupDeviceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupDeviceCreateInfo ) ); + return *this; + } + + DeviceGroupDeviceCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupDeviceCreateInfo & setPhysicalDeviceCount( uint32_t physicalDeviceCount_ ) VULKAN_HPP_NOEXCEPT + { + physicalDeviceCount = physicalDeviceCount_; + return *this; + } + + DeviceGroupDeviceCreateInfo & setPPhysicalDevices( const VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices_ ) VULKAN_HPP_NOEXCEPT + { + pPhysicalDevices = pPhysicalDevices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupDeviceCreateInfo & setPhysicalDevices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & physicalDevices_ ) VULKAN_HPP_NOEXCEPT + { + physicalDeviceCount = static_cast( physicalDevices_.size() ); + pPhysicalDevices = physicalDevices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDeviceGroupDeviceCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupDeviceCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupDeviceCreateInfo const& ) const = default; +#else + bool operator==( DeviceGroupDeviceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( physicalDeviceCount == rhs.physicalDeviceCount ) + && ( pPhysicalDevices == rhs.pPhysicalDevices ); + } + + bool operator!=( DeviceGroupDeviceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupDeviceCreateInfo; + const void* pNext = {}; + uint32_t physicalDeviceCount = {}; + const VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices = {}; + + }; + static_assert( sizeof( DeviceGroupDeviceCreateInfo ) == sizeof( VkDeviceGroupDeviceCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupDeviceCreateInfo; + }; + using DeviceGroupDeviceCreateInfoKHR = DeviceGroupDeviceCreateInfo; + + struct DeviceGroupPresentInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupPresentInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupPresentInfoKHR(uint32_t swapchainCount_ = {}, const uint32_t* pDeviceMasks_ = {}, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR mode_ = VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR::eLocal) VULKAN_HPP_NOEXCEPT + : swapchainCount( swapchainCount_ ), pDeviceMasks( pDeviceMasks_ ), mode( mode_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupPresentInfoKHR( DeviceGroupPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupPresentInfoKHR( VkDeviceGroupPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupPresentInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceMasks_, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR mode_ = VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR::eLocal ) + : swapchainCount( static_cast( deviceMasks_.size() ) ), pDeviceMasks( deviceMasks_.data() ), mode( mode_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupPresentInfoKHR & operator=( VkDeviceGroupPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupPresentInfoKHR & operator=( DeviceGroupPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupPresentInfoKHR ) ); + return *this; + } + + DeviceGroupPresentInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupPresentInfoKHR & setSwapchainCount( uint32_t swapchainCount_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = swapchainCount_; + return *this; + } + + DeviceGroupPresentInfoKHR & setPDeviceMasks( const uint32_t* pDeviceMasks_ ) VULKAN_HPP_NOEXCEPT + { + pDeviceMasks = pDeviceMasks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupPresentInfoKHR & setDeviceMasks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceMasks_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( deviceMasks_.size() ); + pDeviceMasks = deviceMasks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceGroupPresentInfoKHR & setMode( VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR mode_ ) VULKAN_HPP_NOEXCEPT + { + mode = mode_; + return *this; + } + + + operator VkDeviceGroupPresentInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupPresentInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupPresentInfoKHR const& ) const = default; +#else + bool operator==( DeviceGroupPresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pDeviceMasks == rhs.pDeviceMasks ) + && ( mode == rhs.mode ); + } + + bool operator!=( DeviceGroupPresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupPresentInfoKHR; + const void* pNext = {}; + uint32_t swapchainCount = {}; + const uint32_t* pDeviceMasks = {}; + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR mode = VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagBitsKHR::eLocal; + + }; + static_assert( sizeof( DeviceGroupPresentInfoKHR ) == sizeof( VkDeviceGroupPresentInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupPresentInfoKHR; + }; + + struct DeviceGroupRenderPassBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupRenderPassBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupRenderPassBeginInfo(uint32_t deviceMask_ = {}, uint32_t deviceRenderAreaCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D* pDeviceRenderAreas_ = {}) VULKAN_HPP_NOEXCEPT + : deviceMask( deviceMask_ ), deviceRenderAreaCount( deviceRenderAreaCount_ ), pDeviceRenderAreas( pDeviceRenderAreas_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupRenderPassBeginInfo( DeviceGroupRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupRenderPassBeginInfo( VkDeviceGroupRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupRenderPassBeginInfo( uint32_t deviceMask_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceRenderAreas_ ) + : deviceMask( deviceMask_ ), deviceRenderAreaCount( static_cast( deviceRenderAreas_.size() ) ), pDeviceRenderAreas( deviceRenderAreas_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupRenderPassBeginInfo & operator=( VkDeviceGroupRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupRenderPassBeginInfo & operator=( DeviceGroupRenderPassBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupRenderPassBeginInfo ) ); + return *this; + } + + DeviceGroupRenderPassBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupRenderPassBeginInfo & setDeviceMask( uint32_t deviceMask_ ) VULKAN_HPP_NOEXCEPT + { + deviceMask = deviceMask_; + return *this; + } + + DeviceGroupRenderPassBeginInfo & setDeviceRenderAreaCount( uint32_t deviceRenderAreaCount_ ) VULKAN_HPP_NOEXCEPT + { + deviceRenderAreaCount = deviceRenderAreaCount_; + return *this; + } + + DeviceGroupRenderPassBeginInfo & setPDeviceRenderAreas( const VULKAN_HPP_NAMESPACE::Rect2D* pDeviceRenderAreas_ ) VULKAN_HPP_NOEXCEPT + { + pDeviceRenderAreas = pDeviceRenderAreas_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupRenderPassBeginInfo & setDeviceRenderAreas( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & deviceRenderAreas_ ) VULKAN_HPP_NOEXCEPT + { + deviceRenderAreaCount = static_cast( deviceRenderAreas_.size() ); + pDeviceRenderAreas = deviceRenderAreas_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDeviceGroupRenderPassBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupRenderPassBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupRenderPassBeginInfo const& ) const = default; +#else + bool operator==( DeviceGroupRenderPassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMask == rhs.deviceMask ) + && ( deviceRenderAreaCount == rhs.deviceRenderAreaCount ) + && ( pDeviceRenderAreas == rhs.pDeviceRenderAreas ); + } + + bool operator!=( DeviceGroupRenderPassBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupRenderPassBeginInfo; + const void* pNext = {}; + uint32_t deviceMask = {}; + uint32_t deviceRenderAreaCount = {}; + const VULKAN_HPP_NAMESPACE::Rect2D* pDeviceRenderAreas = {}; + + }; + static_assert( sizeof( DeviceGroupRenderPassBeginInfo ) == sizeof( VkDeviceGroupRenderPassBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupRenderPassBeginInfo; + }; + using DeviceGroupRenderPassBeginInfoKHR = DeviceGroupRenderPassBeginInfo; + + struct DeviceGroupSubmitInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupSubmitInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupSubmitInfo(uint32_t waitSemaphoreCount_ = {}, const uint32_t* pWaitSemaphoreDeviceIndices_ = {}, uint32_t commandBufferCount_ = {}, const uint32_t* pCommandBufferDeviceMasks_ = {}, uint32_t signalSemaphoreCount_ = {}, const uint32_t* pSignalSemaphoreDeviceIndices_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreCount( waitSemaphoreCount_ ), pWaitSemaphoreDeviceIndices( pWaitSemaphoreDeviceIndices_ ), commandBufferCount( commandBufferCount_ ), pCommandBufferDeviceMasks( pCommandBufferDeviceMasks_ ), signalSemaphoreCount( signalSemaphoreCount_ ), pSignalSemaphoreDeviceIndices( pSignalSemaphoreDeviceIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupSubmitInfo( DeviceGroupSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupSubmitInfo( VkDeviceGroupSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupSubmitInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreDeviceIndices_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & commandBufferDeviceMasks_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreDeviceIndices_ = {} ) + : waitSemaphoreCount( static_cast( waitSemaphoreDeviceIndices_.size() ) ), pWaitSemaphoreDeviceIndices( waitSemaphoreDeviceIndices_.data() ), commandBufferCount( static_cast( commandBufferDeviceMasks_.size() ) ), pCommandBufferDeviceMasks( commandBufferDeviceMasks_.data() ), signalSemaphoreCount( static_cast( signalSemaphoreDeviceIndices_.size() ) ), pSignalSemaphoreDeviceIndices( signalSemaphoreDeviceIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupSubmitInfo & operator=( VkDeviceGroupSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupSubmitInfo & operator=( DeviceGroupSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupSubmitInfo ) ); + return *this; + } + + DeviceGroupSubmitInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupSubmitInfo & setWaitSemaphoreCount( uint32_t waitSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = waitSemaphoreCount_; + return *this; + } + + DeviceGroupSubmitInfo & setPWaitSemaphoreDeviceIndices( const uint32_t* pWaitSemaphoreDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphoreDeviceIndices = pWaitSemaphoreDeviceIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupSubmitInfo & setWaitSemaphoreDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreCount = static_cast( waitSemaphoreDeviceIndices_.size() ); + pWaitSemaphoreDeviceIndices = waitSemaphoreDeviceIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceGroupSubmitInfo & setCommandBufferCount( uint32_t commandBufferCount_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferCount = commandBufferCount_; + return *this; + } + + DeviceGroupSubmitInfo & setPCommandBufferDeviceMasks( const uint32_t* pCommandBufferDeviceMasks_ ) VULKAN_HPP_NOEXCEPT + { + pCommandBufferDeviceMasks = pCommandBufferDeviceMasks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupSubmitInfo & setCommandBufferDeviceMasks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & commandBufferDeviceMasks_ ) VULKAN_HPP_NOEXCEPT + { + commandBufferCount = static_cast( commandBufferDeviceMasks_.size() ); + pCommandBufferDeviceMasks = commandBufferDeviceMasks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + DeviceGroupSubmitInfo & setSignalSemaphoreCount( uint32_t signalSemaphoreCount_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = signalSemaphoreCount_; + return *this; + } + + DeviceGroupSubmitInfo & setPSignalSemaphoreDeviceIndices( const uint32_t* pSignalSemaphoreDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + pSignalSemaphoreDeviceIndices = pSignalSemaphoreDeviceIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DeviceGroupSubmitInfo & setSignalSemaphoreDeviceIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreDeviceIndices_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreCount = static_cast( signalSemaphoreDeviceIndices_.size() ); + pSignalSemaphoreDeviceIndices = signalSemaphoreDeviceIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkDeviceGroupSubmitInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupSubmitInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupSubmitInfo const& ) const = default; +#else + bool operator==( DeviceGroupSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreCount == rhs.waitSemaphoreCount ) + && ( pWaitSemaphoreDeviceIndices == rhs.pWaitSemaphoreDeviceIndices ) + && ( commandBufferCount == rhs.commandBufferCount ) + && ( pCommandBufferDeviceMasks == rhs.pCommandBufferDeviceMasks ) + && ( signalSemaphoreCount == rhs.signalSemaphoreCount ) + && ( pSignalSemaphoreDeviceIndices == rhs.pSignalSemaphoreDeviceIndices ); + } + + bool operator!=( DeviceGroupSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupSubmitInfo; + const void* pNext = {}; + uint32_t waitSemaphoreCount = {}; + const uint32_t* pWaitSemaphoreDeviceIndices = {}; + uint32_t commandBufferCount = {}; + const uint32_t* pCommandBufferDeviceMasks = {}; + uint32_t signalSemaphoreCount = {}; + const uint32_t* pSignalSemaphoreDeviceIndices = {}; + + }; + static_assert( sizeof( DeviceGroupSubmitInfo ) == sizeof( VkDeviceGroupSubmitInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupSubmitInfo; + }; + using DeviceGroupSubmitInfoKHR = DeviceGroupSubmitInfo; + + struct DeviceGroupSwapchainCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceGroupSwapchainCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceGroupSwapchainCreateInfoKHR(VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes_ = {}) VULKAN_HPP_NOEXCEPT + : modes( modes_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceGroupSwapchainCreateInfoKHR( DeviceGroupSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceGroupSwapchainCreateInfoKHR( VkDeviceGroupSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceGroupSwapchainCreateInfoKHR & operator=( VkDeviceGroupSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceGroupSwapchainCreateInfoKHR & operator=( DeviceGroupSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceGroupSwapchainCreateInfoKHR ) ); + return *this; + } + + DeviceGroupSwapchainCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceGroupSwapchainCreateInfoKHR & setModes( VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes_ ) VULKAN_HPP_NOEXCEPT + { + modes = modes_; + return *this; + } + + + operator VkDeviceGroupSwapchainCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceGroupSwapchainCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceGroupSwapchainCreateInfoKHR const& ) const = default; +#else + bool operator==( DeviceGroupSwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( modes == rhs.modes ); + } + + bool operator!=( DeviceGroupSwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceGroupSwapchainCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes = {}; + + }; + static_assert( sizeof( DeviceGroupSwapchainCreateInfoKHR ) == sizeof( VkDeviceGroupSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceGroupSwapchainCreateInfoKHR; + }; + + struct DeviceMemoryOverallocationCreateInfoAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceMemoryOverallocationCreateInfoAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceMemoryOverallocationCreateInfoAMD(VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD overallocationBehavior_ = VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD::eDefault) VULKAN_HPP_NOEXCEPT + : overallocationBehavior( overallocationBehavior_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemoryOverallocationCreateInfoAMD( DeviceMemoryOverallocationCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceMemoryOverallocationCreateInfoAMD( VkDeviceMemoryOverallocationCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceMemoryOverallocationCreateInfoAMD & operator=( VkDeviceMemoryOverallocationCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceMemoryOverallocationCreateInfoAMD & operator=( DeviceMemoryOverallocationCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceMemoryOverallocationCreateInfoAMD ) ); + return *this; + } + + DeviceMemoryOverallocationCreateInfoAMD & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceMemoryOverallocationCreateInfoAMD & setOverallocationBehavior( VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD overallocationBehavior_ ) VULKAN_HPP_NOEXCEPT + { + overallocationBehavior = overallocationBehavior_; + return *this; + } + + + operator VkDeviceMemoryOverallocationCreateInfoAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceMemoryOverallocationCreateInfoAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceMemoryOverallocationCreateInfoAMD const& ) const = default; +#else + bool operator==( DeviceMemoryOverallocationCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( overallocationBehavior == rhs.overallocationBehavior ); + } + + bool operator!=( DeviceMemoryOverallocationCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceMemoryOverallocationCreateInfoAMD; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD overallocationBehavior = VULKAN_HPP_NAMESPACE::MemoryOverallocationBehaviorAMD::eDefault; + + }; + static_assert( sizeof( DeviceMemoryOverallocationCreateInfoAMD ) == sizeof( VkDeviceMemoryOverallocationCreateInfoAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceMemoryOverallocationCreateInfoAMD; + }; + + struct DeviceMemoryReportCallbackDataEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceMemoryReportCallbackDataEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceMemoryReportCallbackDataEXT(VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT type_ = VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT::eAllocate, uint64_t memoryObjectId_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize size_ = {}, VULKAN_HPP_NAMESPACE::ObjectType objectType_ = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown, uint64_t objectHandle_ = {}, uint32_t heapIndex_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), type( type_ ), memoryObjectId( memoryObjectId_ ), size( size_ ), objectType( objectType_ ), objectHandle( objectHandle_ ), heapIndex( heapIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemoryReportCallbackDataEXT( DeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceMemoryReportCallbackDataEXT( VkDeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceMemoryReportCallbackDataEXT & operator=( VkDeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceMemoryReportCallbackDataEXT & operator=( DeviceMemoryReportCallbackDataEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceMemoryReportCallbackDataEXT ) ); + return *this; + } + + + operator VkDeviceMemoryReportCallbackDataEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceMemoryReportCallbackDataEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceMemoryReportCallbackDataEXT const& ) const = default; +#else + bool operator==( DeviceMemoryReportCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( type == rhs.type ) + && ( memoryObjectId == rhs.memoryObjectId ) + && ( size == rhs.size ) + && ( objectType == rhs.objectType ) + && ( objectHandle == rhs.objectHandle ) + && ( heapIndex == rhs.heapIndex ); + } + + bool operator!=( DeviceMemoryReportCallbackDataEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceMemoryReportCallbackDataEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT type = VULKAN_HPP_NAMESPACE::DeviceMemoryReportEventTypeEXT::eAllocate; + uint64_t memoryObjectId = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eUnknown; + uint64_t objectHandle = {}; + uint32_t heapIndex = {}; + + }; + static_assert( sizeof( DeviceMemoryReportCallbackDataEXT ) == sizeof( VkDeviceMemoryReportCallbackDataEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceMemoryReportCallbackDataEXT; + }; + + struct DevicePrivateDataCreateInfoEXT + { + static const bool allowDuplicate = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDevicePrivateDataCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DevicePrivateDataCreateInfoEXT(uint32_t privateDataSlotRequestCount_ = {}) VULKAN_HPP_NOEXCEPT + : privateDataSlotRequestCount( privateDataSlotRequestCount_ ) + {} + + VULKAN_HPP_CONSTEXPR DevicePrivateDataCreateInfoEXT( DevicePrivateDataCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DevicePrivateDataCreateInfoEXT( VkDevicePrivateDataCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DevicePrivateDataCreateInfoEXT & operator=( VkDevicePrivateDataCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DevicePrivateDataCreateInfoEXT & operator=( DevicePrivateDataCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DevicePrivateDataCreateInfoEXT ) ); + return *this; + } + + DevicePrivateDataCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DevicePrivateDataCreateInfoEXT & setPrivateDataSlotRequestCount( uint32_t privateDataSlotRequestCount_ ) VULKAN_HPP_NOEXCEPT + { + privateDataSlotRequestCount = privateDataSlotRequestCount_; + return *this; + } + + + operator VkDevicePrivateDataCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDevicePrivateDataCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DevicePrivateDataCreateInfoEXT const& ) const = default; +#else + bool operator==( DevicePrivateDataCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( privateDataSlotRequestCount == rhs.privateDataSlotRequestCount ); + } + + bool operator!=( DevicePrivateDataCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDevicePrivateDataCreateInfoEXT; + const void* pNext = {}; + uint32_t privateDataSlotRequestCount = {}; + + }; + static_assert( sizeof( DevicePrivateDataCreateInfoEXT ) == sizeof( VkDevicePrivateDataCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DevicePrivateDataCreateInfoEXT; + }; + + struct DeviceQueueGlobalPriorityCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDeviceQueueGlobalPriorityCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceQueueGlobalPriorityCreateInfoEXT(VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT globalPriority_ = VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT::eLow) VULKAN_HPP_NOEXCEPT + : globalPriority( globalPriority_ ) + {} + + VULKAN_HPP_CONSTEXPR DeviceQueueGlobalPriorityCreateInfoEXT( DeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DeviceQueueGlobalPriorityCreateInfoEXT( VkDeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DeviceQueueGlobalPriorityCreateInfoEXT & operator=( VkDeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DeviceQueueGlobalPriorityCreateInfoEXT & operator=( DeviceQueueGlobalPriorityCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DeviceQueueGlobalPriorityCreateInfoEXT ) ); + return *this; + } + + DeviceQueueGlobalPriorityCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DeviceQueueGlobalPriorityCreateInfoEXT & setGlobalPriority( VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT globalPriority_ ) VULKAN_HPP_NOEXCEPT + { + globalPriority = globalPriority_; + return *this; + } + + + operator VkDeviceQueueGlobalPriorityCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDeviceQueueGlobalPriorityCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DeviceQueueGlobalPriorityCreateInfoEXT const& ) const = default; +#else + bool operator==( DeviceQueueGlobalPriorityCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( globalPriority == rhs.globalPriority ); + } + + bool operator!=( DeviceQueueGlobalPriorityCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDeviceQueueGlobalPriorityCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT globalPriority = VULKAN_HPP_NAMESPACE::QueueGlobalPriorityEXT::eLow; + + }; + static_assert( sizeof( DeviceQueueGlobalPriorityCreateInfoEXT ) == sizeof( VkDeviceQueueGlobalPriorityCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DeviceQueueGlobalPriorityCreateInfoEXT; + }; + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + struct DirectFBSurfaceCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDirectfbSurfaceCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DirectFBSurfaceCreateInfoEXT(VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateFlagsEXT flags_ = {}, IDirectFB* dfb_ = {}, IDirectFBSurface* surface_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), dfb( dfb_ ), surface( surface_ ) + {} + + VULKAN_HPP_CONSTEXPR DirectFBSurfaceCreateInfoEXT( DirectFBSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DirectFBSurfaceCreateInfoEXT( VkDirectFBSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DirectFBSurfaceCreateInfoEXT & operator=( VkDirectFBSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DirectFBSurfaceCreateInfoEXT & operator=( DirectFBSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DirectFBSurfaceCreateInfoEXT ) ); + return *this; + } + + DirectFBSurfaceCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DirectFBSurfaceCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DirectFBSurfaceCreateInfoEXT & setDfb( IDirectFB* dfb_ ) VULKAN_HPP_NOEXCEPT + { + dfb = dfb_; + return *this; + } + + DirectFBSurfaceCreateInfoEXT & setSurface( IDirectFBSurface* surface_ ) VULKAN_HPP_NOEXCEPT + { + surface = surface_; + return *this; + } + + + operator VkDirectFBSurfaceCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDirectFBSurfaceCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DirectFBSurfaceCreateInfoEXT const& ) const = default; +#else + bool operator==( DirectFBSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( dfb == rhs.dfb ) + && ( surface == rhs.surface ); + } + + bool operator!=( DirectFBSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDirectfbSurfaceCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateFlagsEXT flags = {}; + IDirectFB* dfb = {}; + IDirectFBSurface* surface = {}; + + }; + static_assert( sizeof( DirectFBSurfaceCreateInfoEXT ) == sizeof( VkDirectFBSurfaceCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DirectFBSurfaceCreateInfoEXT; + }; +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + struct DispatchIndirectCommand + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DispatchIndirectCommand(uint32_t x_ = {}, uint32_t y_ = {}, uint32_t z_ = {}) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ), z( z_ ) + {} + + VULKAN_HPP_CONSTEXPR DispatchIndirectCommand( DispatchIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DispatchIndirectCommand( VkDispatchIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DispatchIndirectCommand & operator=( VkDispatchIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DispatchIndirectCommand & operator=( DispatchIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DispatchIndirectCommand ) ); + return *this; + } + + DispatchIndirectCommand & setX( uint32_t x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + DispatchIndirectCommand & setY( uint32_t y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + DispatchIndirectCommand & setZ( uint32_t z_ ) VULKAN_HPP_NOEXCEPT + { + z = z_; + return *this; + } + + + operator VkDispatchIndirectCommand const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDispatchIndirectCommand &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DispatchIndirectCommand const& ) const = default; +#else + bool operator==( DispatchIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ); + } + + bool operator!=( DispatchIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t x = {}; + uint32_t y = {}; + uint32_t z = {}; + + }; + static_assert( sizeof( DispatchIndirectCommand ) == sizeof( VkDispatchIndirectCommand ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DisplayNativeHdrSurfaceCapabilitiesAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayNativeHdrSurfaceCapabilitiesAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayNativeHdrSurfaceCapabilitiesAMD(VULKAN_HPP_NAMESPACE::Bool32 localDimmingSupport_ = {}) VULKAN_HPP_NOEXCEPT + : localDimmingSupport( localDimmingSupport_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayNativeHdrSurfaceCapabilitiesAMD( DisplayNativeHdrSurfaceCapabilitiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayNativeHdrSurfaceCapabilitiesAMD( VkDisplayNativeHdrSurfaceCapabilitiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayNativeHdrSurfaceCapabilitiesAMD & operator=( VkDisplayNativeHdrSurfaceCapabilitiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayNativeHdrSurfaceCapabilitiesAMD & operator=( DisplayNativeHdrSurfaceCapabilitiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayNativeHdrSurfaceCapabilitiesAMD ) ); + return *this; + } + + + operator VkDisplayNativeHdrSurfaceCapabilitiesAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayNativeHdrSurfaceCapabilitiesAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayNativeHdrSurfaceCapabilitiesAMD const& ) const = default; +#else + bool operator==( DisplayNativeHdrSurfaceCapabilitiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( localDimmingSupport == rhs.localDimmingSupport ); + } + + bool operator!=( DisplayNativeHdrSurfaceCapabilitiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayNativeHdrSurfaceCapabilitiesAMD; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 localDimmingSupport = {}; + + }; + static_assert( sizeof( DisplayNativeHdrSurfaceCapabilitiesAMD ) == sizeof( VkDisplayNativeHdrSurfaceCapabilitiesAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayNativeHdrSurfaceCapabilitiesAMD; + }; + + struct DisplayPresentInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplayPresentInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplayPresentInfoKHR(VULKAN_HPP_NAMESPACE::Rect2D srcRect_ = {}, VULKAN_HPP_NAMESPACE::Rect2D dstRect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 persistent_ = {}) VULKAN_HPP_NOEXCEPT + : srcRect( srcRect_ ), dstRect( dstRect_ ), persistent( persistent_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplayPresentInfoKHR( DisplayPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplayPresentInfoKHR( VkDisplayPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplayPresentInfoKHR & operator=( VkDisplayPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplayPresentInfoKHR & operator=( DisplayPresentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplayPresentInfoKHR ) ); + return *this; + } + + DisplayPresentInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplayPresentInfoKHR & setSrcRect( VULKAN_HPP_NAMESPACE::Rect2D const & srcRect_ ) VULKAN_HPP_NOEXCEPT + { + srcRect = srcRect_; + return *this; + } + + DisplayPresentInfoKHR & setDstRect( VULKAN_HPP_NAMESPACE::Rect2D const & dstRect_ ) VULKAN_HPP_NOEXCEPT + { + dstRect = dstRect_; + return *this; + } + + DisplayPresentInfoKHR & setPersistent( VULKAN_HPP_NAMESPACE::Bool32 persistent_ ) VULKAN_HPP_NOEXCEPT + { + persistent = persistent_; + return *this; + } + + + operator VkDisplayPresentInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplayPresentInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplayPresentInfoKHR const& ) const = default; +#else + bool operator==( DisplayPresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcRect == rhs.srcRect ) + && ( dstRect == rhs.dstRect ) + && ( persistent == rhs.persistent ); + } + + bool operator!=( DisplayPresentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplayPresentInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Rect2D srcRect = {}; + VULKAN_HPP_NAMESPACE::Rect2D dstRect = {}; + VULKAN_HPP_NAMESPACE::Bool32 persistent = {}; + + }; + static_assert( sizeof( DisplayPresentInfoKHR ) == sizeof( VkDisplayPresentInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplayPresentInfoKHR; + }; + + struct DisplaySurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDisplaySurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DisplaySurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateFlagsKHR flags_ = {}, VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode_ = {}, uint32_t planeIndex_ = {}, uint32_t planeStackIndex_ = {}, VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity, float globalAlpha_ = {}, VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR alphaMode_ = VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR::eOpaque, VULKAN_HPP_NAMESPACE::Extent2D imageExtent_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), displayMode( displayMode_ ), planeIndex( planeIndex_ ), planeStackIndex( planeStackIndex_ ), transform( transform_ ), globalAlpha( globalAlpha_ ), alphaMode( alphaMode_ ), imageExtent( imageExtent_ ) + {} + + VULKAN_HPP_CONSTEXPR DisplaySurfaceCreateInfoKHR( DisplaySurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DisplaySurfaceCreateInfoKHR( VkDisplaySurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DisplaySurfaceCreateInfoKHR & operator=( VkDisplaySurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DisplaySurfaceCreateInfoKHR & operator=( DisplaySurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DisplaySurfaceCreateInfoKHR ) ); + return *this; + } + + DisplaySurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setDisplayMode( VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode_ ) VULKAN_HPP_NOEXCEPT + { + displayMode = displayMode_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setPlaneIndex( uint32_t planeIndex_ ) VULKAN_HPP_NOEXCEPT + { + planeIndex = planeIndex_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setPlaneStackIndex( uint32_t planeStackIndex_ ) VULKAN_HPP_NOEXCEPT + { + planeStackIndex = planeStackIndex_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setGlobalAlpha( float globalAlpha_ ) VULKAN_HPP_NOEXCEPT + { + globalAlpha = globalAlpha_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setAlphaMode( VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR alphaMode_ ) VULKAN_HPP_NOEXCEPT + { + alphaMode = alphaMode_; + return *this; + } + + DisplaySurfaceCreateInfoKHR & setImageExtent( VULKAN_HPP_NAMESPACE::Extent2D const & imageExtent_ ) VULKAN_HPP_NOEXCEPT + { + imageExtent = imageExtent_; + return *this; + } + + + operator VkDisplaySurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDisplaySurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DisplaySurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( DisplaySurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( displayMode == rhs.displayMode ) + && ( planeIndex == rhs.planeIndex ) + && ( planeStackIndex == rhs.planeStackIndex ) + && ( transform == rhs.transform ) + && ( globalAlpha == rhs.globalAlpha ) + && ( alphaMode == rhs.alphaMode ) + && ( imageExtent == rhs.imageExtent ); + } + + bool operator!=( DisplaySurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDisplaySurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateFlagsKHR flags = {}; + VULKAN_HPP_NAMESPACE::DisplayModeKHR displayMode = {}; + uint32_t planeIndex = {}; + uint32_t planeStackIndex = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + float globalAlpha = {}; + VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR alphaMode = VULKAN_HPP_NAMESPACE::DisplayPlaneAlphaFlagBitsKHR::eOpaque; + VULKAN_HPP_NAMESPACE::Extent2D imageExtent = {}; + + }; + static_assert( sizeof( DisplaySurfaceCreateInfoKHR ) == sizeof( VkDisplaySurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DisplaySurfaceCreateInfoKHR; + }; + + struct DrawIndexedIndirectCommand + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DrawIndexedIndirectCommand(uint32_t indexCount_ = {}, uint32_t instanceCount_ = {}, uint32_t firstIndex_ = {}, int32_t vertexOffset_ = {}, uint32_t firstInstance_ = {}) VULKAN_HPP_NOEXCEPT + : indexCount( indexCount_ ), instanceCount( instanceCount_ ), firstIndex( firstIndex_ ), vertexOffset( vertexOffset_ ), firstInstance( firstInstance_ ) + {} + + VULKAN_HPP_CONSTEXPR DrawIndexedIndirectCommand( DrawIndexedIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DrawIndexedIndirectCommand( VkDrawIndexedIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DrawIndexedIndirectCommand & operator=( VkDrawIndexedIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DrawIndexedIndirectCommand & operator=( DrawIndexedIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DrawIndexedIndirectCommand ) ); + return *this; + } + + DrawIndexedIndirectCommand & setIndexCount( uint32_t indexCount_ ) VULKAN_HPP_NOEXCEPT + { + indexCount = indexCount_; + return *this; + } + + DrawIndexedIndirectCommand & setInstanceCount( uint32_t instanceCount_ ) VULKAN_HPP_NOEXCEPT + { + instanceCount = instanceCount_; + return *this; + } + + DrawIndexedIndirectCommand & setFirstIndex( uint32_t firstIndex_ ) VULKAN_HPP_NOEXCEPT + { + firstIndex = firstIndex_; + return *this; + } + + DrawIndexedIndirectCommand & setVertexOffset( int32_t vertexOffset_ ) VULKAN_HPP_NOEXCEPT + { + vertexOffset = vertexOffset_; + return *this; + } + + DrawIndexedIndirectCommand & setFirstInstance( uint32_t firstInstance_ ) VULKAN_HPP_NOEXCEPT + { + firstInstance = firstInstance_; + return *this; + } + + + operator VkDrawIndexedIndirectCommand const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDrawIndexedIndirectCommand &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DrawIndexedIndirectCommand const& ) const = default; +#else + bool operator==( DrawIndexedIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( indexCount == rhs.indexCount ) + && ( instanceCount == rhs.instanceCount ) + && ( firstIndex == rhs.firstIndex ) + && ( vertexOffset == rhs.vertexOffset ) + && ( firstInstance == rhs.firstInstance ); + } + + bool operator!=( DrawIndexedIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t indexCount = {}; + uint32_t instanceCount = {}; + uint32_t firstIndex = {}; + int32_t vertexOffset = {}; + uint32_t firstInstance = {}; + + }; + static_assert( sizeof( DrawIndexedIndirectCommand ) == sizeof( VkDrawIndexedIndirectCommand ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DrawIndirectCommand + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DrawIndirectCommand(uint32_t vertexCount_ = {}, uint32_t instanceCount_ = {}, uint32_t firstVertex_ = {}, uint32_t firstInstance_ = {}) VULKAN_HPP_NOEXCEPT + : vertexCount( vertexCount_ ), instanceCount( instanceCount_ ), firstVertex( firstVertex_ ), firstInstance( firstInstance_ ) + {} + + VULKAN_HPP_CONSTEXPR DrawIndirectCommand( DrawIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DrawIndirectCommand( VkDrawIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DrawIndirectCommand & operator=( VkDrawIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DrawIndirectCommand & operator=( DrawIndirectCommand const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DrawIndirectCommand ) ); + return *this; + } + + DrawIndirectCommand & setVertexCount( uint32_t vertexCount_ ) VULKAN_HPP_NOEXCEPT + { + vertexCount = vertexCount_; + return *this; + } + + DrawIndirectCommand & setInstanceCount( uint32_t instanceCount_ ) VULKAN_HPP_NOEXCEPT + { + instanceCount = instanceCount_; + return *this; + } + + DrawIndirectCommand & setFirstVertex( uint32_t firstVertex_ ) VULKAN_HPP_NOEXCEPT + { + firstVertex = firstVertex_; + return *this; + } + + DrawIndirectCommand & setFirstInstance( uint32_t firstInstance_ ) VULKAN_HPP_NOEXCEPT + { + firstInstance = firstInstance_; + return *this; + } + + + operator VkDrawIndirectCommand const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDrawIndirectCommand &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DrawIndirectCommand const& ) const = default; +#else + bool operator==( DrawIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( vertexCount == rhs.vertexCount ) + && ( instanceCount == rhs.instanceCount ) + && ( firstVertex == rhs.firstVertex ) + && ( firstInstance == rhs.firstInstance ); + } + + bool operator!=( DrawIndirectCommand const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t vertexCount = {}; + uint32_t instanceCount = {}; + uint32_t firstVertex = {}; + uint32_t firstInstance = {}; + + }; + static_assert( sizeof( DrawIndirectCommand ) == sizeof( VkDrawIndirectCommand ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DrawMeshTasksIndirectCommandNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DrawMeshTasksIndirectCommandNV(uint32_t taskCount_ = {}, uint32_t firstTask_ = {}) VULKAN_HPP_NOEXCEPT + : taskCount( taskCount_ ), firstTask( firstTask_ ) + {} + + VULKAN_HPP_CONSTEXPR DrawMeshTasksIndirectCommandNV( DrawMeshTasksIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DrawMeshTasksIndirectCommandNV( VkDrawMeshTasksIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DrawMeshTasksIndirectCommandNV & operator=( VkDrawMeshTasksIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DrawMeshTasksIndirectCommandNV & operator=( DrawMeshTasksIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DrawMeshTasksIndirectCommandNV ) ); + return *this; + } + + DrawMeshTasksIndirectCommandNV & setTaskCount( uint32_t taskCount_ ) VULKAN_HPP_NOEXCEPT + { + taskCount = taskCount_; + return *this; + } + + DrawMeshTasksIndirectCommandNV & setFirstTask( uint32_t firstTask_ ) VULKAN_HPP_NOEXCEPT + { + firstTask = firstTask_; + return *this; + } + + + operator VkDrawMeshTasksIndirectCommandNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDrawMeshTasksIndirectCommandNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DrawMeshTasksIndirectCommandNV const& ) const = default; +#else + bool operator==( DrawMeshTasksIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( taskCount == rhs.taskCount ) + && ( firstTask == rhs.firstTask ); + } + + bool operator!=( DrawMeshTasksIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t taskCount = {}; + uint32_t firstTask = {}; + + }; + static_assert( sizeof( DrawMeshTasksIndirectCommandNV ) == sizeof( VkDrawMeshTasksIndirectCommandNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DrmFormatModifierPropertiesEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesEXT(uint64_t drmFormatModifier_ = {}, uint32_t drmFormatModifierPlaneCount_ = {}, VULKAN_HPP_NAMESPACE::FormatFeatureFlags drmFormatModifierTilingFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifier( drmFormatModifier_ ), drmFormatModifierPlaneCount( drmFormatModifierPlaneCount_ ), drmFormatModifierTilingFeatures( drmFormatModifierTilingFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesEXT( DrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DrmFormatModifierPropertiesEXT( VkDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DrmFormatModifierPropertiesEXT & operator=( VkDrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DrmFormatModifierPropertiesEXT & operator=( DrmFormatModifierPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DrmFormatModifierPropertiesEXT ) ); + return *this; + } + + + operator VkDrmFormatModifierPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDrmFormatModifierPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DrmFormatModifierPropertiesEXT const& ) const = default; +#else + bool operator==( DrmFormatModifierPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( drmFormatModifier == rhs.drmFormatModifier ) + && ( drmFormatModifierPlaneCount == rhs.drmFormatModifierPlaneCount ) + && ( drmFormatModifierTilingFeatures == rhs.drmFormatModifierTilingFeatures ); + } + + bool operator!=( DrmFormatModifierPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint64_t drmFormatModifier = {}; + uint32_t drmFormatModifierPlaneCount = {}; + VULKAN_HPP_NAMESPACE::FormatFeatureFlags drmFormatModifierTilingFeatures = {}; + + }; + static_assert( sizeof( DrmFormatModifierPropertiesEXT ) == sizeof( VkDrmFormatModifierPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct DrmFormatModifierPropertiesListEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eDrmFormatModifierPropertiesListEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesListEXT(uint32_t drmFormatModifierCount_ = {}, VULKAN_HPP_NAMESPACE::DrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifierCount( drmFormatModifierCount_ ), pDrmFormatModifierProperties( pDrmFormatModifierProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR DrmFormatModifierPropertiesListEXT( DrmFormatModifierPropertiesListEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + DrmFormatModifierPropertiesListEXT( VkDrmFormatModifierPropertiesListEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + DrmFormatModifierPropertiesListEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & drmFormatModifierProperties_ ) + : drmFormatModifierCount( static_cast( drmFormatModifierProperties_.size() ) ), pDrmFormatModifierProperties( drmFormatModifierProperties_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + DrmFormatModifierPropertiesListEXT & operator=( VkDrmFormatModifierPropertiesListEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + DrmFormatModifierPropertiesListEXT & operator=( DrmFormatModifierPropertiesListEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( DrmFormatModifierPropertiesListEXT ) ); + return *this; + } + + + operator VkDrmFormatModifierPropertiesListEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkDrmFormatModifierPropertiesListEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DrmFormatModifierPropertiesListEXT const& ) const = default; +#else + bool operator==( DrmFormatModifierPropertiesListEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( drmFormatModifierCount == rhs.drmFormatModifierCount ) + && ( pDrmFormatModifierProperties == rhs.pDrmFormatModifierProperties ); + } + + bool operator!=( DrmFormatModifierPropertiesListEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eDrmFormatModifierPropertiesListEXT; + void* pNext = {}; + uint32_t drmFormatModifierCount = {}; + VULKAN_HPP_NAMESPACE::DrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties = {}; + + }; + static_assert( sizeof( DrmFormatModifierPropertiesListEXT ) == sizeof( VkDrmFormatModifierPropertiesListEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = DrmFormatModifierPropertiesListEXT; + }; + + struct ExportFenceCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportFenceCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportFenceCreateInfo(VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportFenceCreateInfo( ExportFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportFenceCreateInfo( VkExportFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportFenceCreateInfo & operator=( VkExportFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportFenceCreateInfo & operator=( ExportFenceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportFenceCreateInfo ) ); + return *this; + } + + ExportFenceCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportFenceCreateInfo & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExportFenceCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportFenceCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportFenceCreateInfo const& ) const = default; +#else + bool operator==( ExportFenceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportFenceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportFenceCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalFenceHandleTypeFlags handleTypes = {}; + + }; + static_assert( sizeof( ExportFenceCreateInfo ) == sizeof( VkExportFenceCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportFenceCreateInfo; + }; + using ExportFenceCreateInfoKHR = ExportFenceCreateInfo; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportFenceWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportFenceWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportFenceWin32HandleInfoKHR(const SECURITY_ATTRIBUTES* pAttributes_ = {}, DWORD dwAccess_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : pAttributes( pAttributes_ ), dwAccess( dwAccess_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportFenceWin32HandleInfoKHR( ExportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportFenceWin32HandleInfoKHR( VkExportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportFenceWin32HandleInfoKHR & operator=( VkExportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportFenceWin32HandleInfoKHR & operator=( ExportFenceWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportFenceWin32HandleInfoKHR ) ); + return *this; + } + + ExportFenceWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportFenceWin32HandleInfoKHR & setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) VULKAN_HPP_NOEXCEPT + { + pAttributes = pAttributes_; + return *this; + } + + ExportFenceWin32HandleInfoKHR & setDwAccess( DWORD dwAccess_ ) VULKAN_HPP_NOEXCEPT + { + dwAccess = dwAccess_; + return *this; + } + + ExportFenceWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkExportFenceWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportFenceWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportFenceWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ExportFenceWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportFenceWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportFenceWin32HandleInfoKHR; + const void* pNext = {}; + const SECURITY_ATTRIBUTES* pAttributes = {}; + DWORD dwAccess = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ExportFenceWin32HandleInfoKHR ) == sizeof( VkExportFenceWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportFenceWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ExportMemoryAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportMemoryAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfo(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfo( ExportMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportMemoryAllocateInfo( VkExportMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportMemoryAllocateInfo & operator=( VkExportMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportMemoryAllocateInfo & operator=( ExportMemoryAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportMemoryAllocateInfo ) ); + return *this; + } + + ExportMemoryAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportMemoryAllocateInfo & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExportMemoryAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportMemoryAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportMemoryAllocateInfo const& ) const = default; +#else + bool operator==( ExportMemoryAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportMemoryAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportMemoryAllocateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes = {}; + + }; + static_assert( sizeof( ExportMemoryAllocateInfo ) == sizeof( VkExportMemoryAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportMemoryAllocateInfo; + }; + using ExportMemoryAllocateInfoKHR = ExportMemoryAllocateInfo; + + struct ExportMemoryAllocateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportMemoryAllocateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfoNV(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportMemoryAllocateInfoNV( ExportMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportMemoryAllocateInfoNV( VkExportMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportMemoryAllocateInfoNV & operator=( VkExportMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportMemoryAllocateInfoNV & operator=( ExportMemoryAllocateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportMemoryAllocateInfoNV ) ); + return *this; + } + + ExportMemoryAllocateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportMemoryAllocateInfoNV & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExportMemoryAllocateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportMemoryAllocateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportMemoryAllocateInfoNV const& ) const = default; +#else + bool operator==( ExportMemoryAllocateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportMemoryAllocateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportMemoryAllocateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes = {}; + + }; + static_assert( sizeof( ExportMemoryAllocateInfoNV ) == sizeof( VkExportMemoryAllocateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportMemoryAllocateInfoNV; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportMemoryWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportMemoryWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportMemoryWin32HandleInfoKHR(const SECURITY_ATTRIBUTES* pAttributes_ = {}, DWORD dwAccess_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : pAttributes( pAttributes_ ), dwAccess( dwAccess_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportMemoryWin32HandleInfoKHR( ExportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportMemoryWin32HandleInfoKHR( VkExportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportMemoryWin32HandleInfoKHR & operator=( VkExportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportMemoryWin32HandleInfoKHR & operator=( ExportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportMemoryWin32HandleInfoKHR ) ); + return *this; + } + + ExportMemoryWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR & setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) VULKAN_HPP_NOEXCEPT + { + pAttributes = pAttributes_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR & setDwAccess( DWORD dwAccess_ ) VULKAN_HPP_NOEXCEPT + { + dwAccess = dwAccess_; + return *this; + } + + ExportMemoryWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkExportMemoryWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportMemoryWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportMemoryWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ExportMemoryWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportMemoryWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportMemoryWin32HandleInfoKHR; + const void* pNext = {}; + const SECURITY_ATTRIBUTES* pAttributes = {}; + DWORD dwAccess = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ExportMemoryWin32HandleInfoKHR ) == sizeof( VkExportMemoryWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportMemoryWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportMemoryWin32HandleInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportMemoryWin32HandleInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportMemoryWin32HandleInfoNV(const SECURITY_ATTRIBUTES* pAttributes_ = {}, DWORD dwAccess_ = {}) VULKAN_HPP_NOEXCEPT + : pAttributes( pAttributes_ ), dwAccess( dwAccess_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportMemoryWin32HandleInfoNV( ExportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportMemoryWin32HandleInfoNV( VkExportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportMemoryWin32HandleInfoNV & operator=( VkExportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportMemoryWin32HandleInfoNV & operator=( ExportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportMemoryWin32HandleInfoNV ) ); + return *this; + } + + ExportMemoryWin32HandleInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportMemoryWin32HandleInfoNV & setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) VULKAN_HPP_NOEXCEPT + { + pAttributes = pAttributes_; + return *this; + } + + ExportMemoryWin32HandleInfoNV & setDwAccess( DWORD dwAccess_ ) VULKAN_HPP_NOEXCEPT + { + dwAccess = dwAccess_; + return *this; + } + + + operator VkExportMemoryWin32HandleInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportMemoryWin32HandleInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportMemoryWin32HandleInfoNV const& ) const = default; +#else + bool operator==( ExportMemoryWin32HandleInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ); + } + + bool operator!=( ExportMemoryWin32HandleInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportMemoryWin32HandleInfoNV; + const void* pNext = {}; + const SECURITY_ATTRIBUTES* pAttributes = {}; + DWORD dwAccess = {}; + + }; + static_assert( sizeof( ExportMemoryWin32HandleInfoNV ) == sizeof( VkExportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportMemoryWin32HandleInfoNV; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct ExportSemaphoreCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportSemaphoreCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportSemaphoreCreateInfo(VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportSemaphoreCreateInfo( ExportSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportSemaphoreCreateInfo( VkExportSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportSemaphoreCreateInfo & operator=( VkExportSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportSemaphoreCreateInfo & operator=( ExportSemaphoreCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportSemaphoreCreateInfo ) ); + return *this; + } + + ExportSemaphoreCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportSemaphoreCreateInfo & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExportSemaphoreCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportSemaphoreCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportSemaphoreCreateInfo const& ) const = default; +#else + bool operator==( ExportSemaphoreCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExportSemaphoreCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportSemaphoreCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalSemaphoreHandleTypeFlags handleTypes = {}; + + }; + static_assert( sizeof( ExportSemaphoreCreateInfo ) == sizeof( VkExportSemaphoreCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportSemaphoreCreateInfo; + }; + using ExportSemaphoreCreateInfoKHR = ExportSemaphoreCreateInfo; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ExportSemaphoreWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExportSemaphoreWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExportSemaphoreWin32HandleInfoKHR(const SECURITY_ATTRIBUTES* pAttributes_ = {}, DWORD dwAccess_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : pAttributes( pAttributes_ ), dwAccess( dwAccess_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ExportSemaphoreWin32HandleInfoKHR( ExportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExportSemaphoreWin32HandleInfoKHR( VkExportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExportSemaphoreWin32HandleInfoKHR & operator=( VkExportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR & operator=( ExportSemaphoreWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExportSemaphoreWin32HandleInfoKHR ) ); + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR & setPAttributes( const SECURITY_ATTRIBUTES* pAttributes_ ) VULKAN_HPP_NOEXCEPT + { + pAttributes = pAttributes_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR & setDwAccess( DWORD dwAccess_ ) VULKAN_HPP_NOEXCEPT + { + dwAccess = dwAccess_; + return *this; + } + + ExportSemaphoreWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkExportSemaphoreWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExportSemaphoreWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExportSemaphoreWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ExportSemaphoreWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pAttributes == rhs.pAttributes ) + && ( dwAccess == rhs.dwAccess ) + && ( name == rhs.name ); + } + + bool operator!=( ExportSemaphoreWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExportSemaphoreWin32HandleInfoKHR; + const void* pNext = {}; + const SECURITY_ATTRIBUTES* pAttributes = {}; + DWORD dwAccess = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ExportSemaphoreWin32HandleInfoKHR ) == sizeof( VkExportSemaphoreWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExportSemaphoreWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct ExternalFormatANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalFormatANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalFormatANDROID(uint64_t externalFormat_ = {}) VULKAN_HPP_NOEXCEPT + : externalFormat( externalFormat_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalFormatANDROID( ExternalFormatANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalFormatANDROID( VkExternalFormatANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalFormatANDROID & operator=( VkExternalFormatANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalFormatANDROID & operator=( ExternalFormatANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalFormatANDROID ) ); + return *this; + } + + ExternalFormatANDROID & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExternalFormatANDROID & setExternalFormat( uint64_t externalFormat_ ) VULKAN_HPP_NOEXCEPT + { + externalFormat = externalFormat_; + return *this; + } + + + operator VkExternalFormatANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalFormatANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalFormatANDROID const& ) const = default; +#else + bool operator==( ExternalFormatANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalFormat == rhs.externalFormat ); + } + + bool operator!=( ExternalFormatANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalFormatANDROID; + void* pNext = {}; + uint64_t externalFormat = {}; + + }; + static_assert( sizeof( ExternalFormatANDROID ) == sizeof( VkExternalFormatANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalFormatANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + struct ExternalImageFormatProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalImageFormatProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalImageFormatProperties(VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties_ = {}) VULKAN_HPP_NOEXCEPT + : externalMemoryProperties( externalMemoryProperties_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalImageFormatProperties( ExternalImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalImageFormatProperties( VkExternalImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalImageFormatProperties & operator=( VkExternalImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalImageFormatProperties & operator=( ExternalImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalImageFormatProperties ) ); + return *this; + } + + + operator VkExternalImageFormatProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalImageFormatProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalImageFormatProperties const& ) const = default; +#else + bool operator==( ExternalImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( externalMemoryProperties == rhs.externalMemoryProperties ); + } + + bool operator!=( ExternalImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalImageFormatProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryProperties externalMemoryProperties = {}; + + }; + static_assert( sizeof( ExternalImageFormatProperties ) == sizeof( VkExternalImageFormatProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalImageFormatProperties; + }; + using ExternalImageFormatPropertiesKHR = ExternalImageFormatProperties; + + struct ExternalMemoryBufferCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalMemoryBufferCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalMemoryBufferCreateInfo(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalMemoryBufferCreateInfo( ExternalMemoryBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalMemoryBufferCreateInfo( VkExternalMemoryBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalMemoryBufferCreateInfo & operator=( VkExternalMemoryBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalMemoryBufferCreateInfo & operator=( ExternalMemoryBufferCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalMemoryBufferCreateInfo ) ); + return *this; + } + + ExternalMemoryBufferCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExternalMemoryBufferCreateInfo & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExternalMemoryBufferCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalMemoryBufferCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalMemoryBufferCreateInfo const& ) const = default; +#else + bool operator==( ExternalMemoryBufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryBufferCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalMemoryBufferCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes = {}; + + }; + static_assert( sizeof( ExternalMemoryBufferCreateInfo ) == sizeof( VkExternalMemoryBufferCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalMemoryBufferCreateInfo; + }; + using ExternalMemoryBufferCreateInfoKHR = ExternalMemoryBufferCreateInfo; + + struct ExternalMemoryImageCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalMemoryImageCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfo(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfo( ExternalMemoryImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalMemoryImageCreateInfo( VkExternalMemoryImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalMemoryImageCreateInfo & operator=( VkExternalMemoryImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalMemoryImageCreateInfo & operator=( ExternalMemoryImageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalMemoryImageCreateInfo ) ); + return *this; + } + + ExternalMemoryImageCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExternalMemoryImageCreateInfo & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExternalMemoryImageCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalMemoryImageCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalMemoryImageCreateInfo const& ) const = default; +#else + bool operator==( ExternalMemoryImageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryImageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalMemoryImageCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlags handleTypes = {}; + + }; + static_assert( sizeof( ExternalMemoryImageCreateInfo ) == sizeof( VkExternalMemoryImageCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalMemoryImageCreateInfo; + }; + using ExternalMemoryImageCreateInfoKHR = ExternalMemoryImageCreateInfo; + + struct ExternalMemoryImageCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eExternalMemoryImageCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfoNV(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ = {}) VULKAN_HPP_NOEXCEPT + : handleTypes( handleTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR ExternalMemoryImageCreateInfoNV( ExternalMemoryImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ExternalMemoryImageCreateInfoNV( VkExternalMemoryImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ExternalMemoryImageCreateInfoNV & operator=( VkExternalMemoryImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ExternalMemoryImageCreateInfoNV & operator=( ExternalMemoryImageCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ExternalMemoryImageCreateInfoNV ) ); + return *this; + } + + ExternalMemoryImageCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ExternalMemoryImageCreateInfoNV & setHandleTypes( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes_ ) VULKAN_HPP_NOEXCEPT + { + handleTypes = handleTypes_; + return *this; + } + + + operator VkExternalMemoryImageCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkExternalMemoryImageCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ExternalMemoryImageCreateInfoNV const& ) const = default; +#else + bool operator==( ExternalMemoryImageCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleTypes == rhs.handleTypes ); + } + + bool operator!=( ExternalMemoryImageCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eExternalMemoryImageCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleTypes = {}; + + }; + static_assert( sizeof( ExternalMemoryImageCreateInfoNV ) == sizeof( VkExternalMemoryImageCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ExternalMemoryImageCreateInfoNV; + }; + + struct FilterCubicImageViewImageFormatPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFilterCubicImageViewImageFormatPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FilterCubicImageViewImageFormatPropertiesEXT(VULKAN_HPP_NAMESPACE::Bool32 filterCubic_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterCubicMinmax_ = {}) VULKAN_HPP_NOEXCEPT + : filterCubic( filterCubic_ ), filterCubicMinmax( filterCubicMinmax_ ) + {} + + VULKAN_HPP_CONSTEXPR FilterCubicImageViewImageFormatPropertiesEXT( FilterCubicImageViewImageFormatPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FilterCubicImageViewImageFormatPropertiesEXT( VkFilterCubicImageViewImageFormatPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FilterCubicImageViewImageFormatPropertiesEXT & operator=( VkFilterCubicImageViewImageFormatPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FilterCubicImageViewImageFormatPropertiesEXT & operator=( FilterCubicImageViewImageFormatPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FilterCubicImageViewImageFormatPropertiesEXT ) ); + return *this; + } + + + operator VkFilterCubicImageViewImageFormatPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFilterCubicImageViewImageFormatPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FilterCubicImageViewImageFormatPropertiesEXT const& ) const = default; +#else + bool operator==( FilterCubicImageViewImageFormatPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( filterCubic == rhs.filterCubic ) + && ( filterCubicMinmax == rhs.filterCubicMinmax ); + } + + bool operator!=( FilterCubicImageViewImageFormatPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFilterCubicImageViewImageFormatPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterCubic = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterCubicMinmax = {}; + + }; + static_assert( sizeof( FilterCubicImageViewImageFormatPropertiesEXT ) == sizeof( VkFilterCubicImageViewImageFormatPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FilterCubicImageViewImageFormatPropertiesEXT; + }; + + struct FragmentShadingRateAttachmentInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFragmentShadingRateAttachmentInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FragmentShadingRateAttachmentInfoKHR(const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment_ = {}, VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize_ = {}) VULKAN_HPP_NOEXCEPT + : pFragmentShadingRateAttachment( pFragmentShadingRateAttachment_ ), shadingRateAttachmentTexelSize( shadingRateAttachmentTexelSize_ ) + {} + + VULKAN_HPP_CONSTEXPR FragmentShadingRateAttachmentInfoKHR( FragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FragmentShadingRateAttachmentInfoKHR( VkFragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FragmentShadingRateAttachmentInfoKHR & operator=( VkFragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & operator=( FragmentShadingRateAttachmentInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FragmentShadingRateAttachmentInfoKHR ) ); + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setPFragmentShadingRateAttachment( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment_ ) VULKAN_HPP_NOEXCEPT + { + pFragmentShadingRateAttachment = pFragmentShadingRateAttachment_; + return *this; + } + + FragmentShadingRateAttachmentInfoKHR & setShadingRateAttachmentTexelSize( VULKAN_HPP_NAMESPACE::Extent2D const & shadingRateAttachmentTexelSize_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateAttachmentTexelSize = shadingRateAttachmentTexelSize_; + return *this; + } + + + operator VkFragmentShadingRateAttachmentInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFragmentShadingRateAttachmentInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FragmentShadingRateAttachmentInfoKHR const& ) const = default; +#else + bool operator==( FragmentShadingRateAttachmentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pFragmentShadingRateAttachment == rhs.pFragmentShadingRateAttachment ) + && ( shadingRateAttachmentTexelSize == rhs.shadingRateAttachmentTexelSize ); + } + + bool operator!=( FragmentShadingRateAttachmentInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFragmentShadingRateAttachmentInfoKHR; + const void* pNext = {}; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pFragmentShadingRateAttachment = {}; + VULKAN_HPP_NAMESPACE::Extent2D shadingRateAttachmentTexelSize = {}; + + }; + static_assert( sizeof( FragmentShadingRateAttachmentInfoKHR ) == sizeof( VkFragmentShadingRateAttachmentInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FragmentShadingRateAttachmentInfoKHR; + }; + + struct FramebufferAttachmentImageInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFramebufferAttachmentImageInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FramebufferAttachmentImageInfo(VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ = {}, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}, uint32_t width_ = {}, uint32_t height_ = {}, uint32_t layerCount_ = {}, uint32_t viewFormatCount_ = {}, const VULKAN_HPP_NAMESPACE::Format* pViewFormats_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), usage( usage_ ), width( width_ ), height( height_ ), layerCount( layerCount_ ), viewFormatCount( viewFormatCount_ ), pViewFormats( pViewFormats_ ) + {} + + VULKAN_HPP_CONSTEXPR FramebufferAttachmentImageInfo( FramebufferAttachmentImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FramebufferAttachmentImageInfo( VkFramebufferAttachmentImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferAttachmentImageInfo( VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_, uint32_t width_, uint32_t height_, uint32_t layerCount_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewFormats_ ) + : flags( flags_ ), usage( usage_ ), width( width_ ), height( height_ ), layerCount( layerCount_ ), viewFormatCount( static_cast( viewFormats_.size() ) ), pViewFormats( viewFormats_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FramebufferAttachmentImageInfo & operator=( VkFramebufferAttachmentImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FramebufferAttachmentImageInfo & operator=( FramebufferAttachmentImageInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FramebufferAttachmentImageInfo ) ); + return *this; + } + + FramebufferAttachmentImageInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FramebufferAttachmentImageInfo & setFlags( VULKAN_HPP_NAMESPACE::ImageCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + FramebufferAttachmentImageInfo & setUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + FramebufferAttachmentImageInfo & setWidth( uint32_t width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + FramebufferAttachmentImageInfo & setHeight( uint32_t height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + FramebufferAttachmentImageInfo & setLayerCount( uint32_t layerCount_ ) VULKAN_HPP_NOEXCEPT + { + layerCount = layerCount_; + return *this; + } + + FramebufferAttachmentImageInfo & setViewFormatCount( uint32_t viewFormatCount_ ) VULKAN_HPP_NOEXCEPT + { + viewFormatCount = viewFormatCount_; + return *this; + } + + FramebufferAttachmentImageInfo & setPViewFormats( const VULKAN_HPP_NAMESPACE::Format* pViewFormats_ ) VULKAN_HPP_NOEXCEPT + { + pViewFormats = pViewFormats_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferAttachmentImageInfo & setViewFormats( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewFormats_ ) VULKAN_HPP_NOEXCEPT + { + viewFormatCount = static_cast( viewFormats_.size() ); + pViewFormats = viewFormats_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkFramebufferAttachmentImageInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFramebufferAttachmentImageInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FramebufferAttachmentImageInfo const& ) const = default; +#else + bool operator==( FramebufferAttachmentImageInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( usage == rhs.usage ) + && ( width == rhs.width ) + && ( height == rhs.height ) + && ( layerCount == rhs.layerCount ) + && ( viewFormatCount == rhs.viewFormatCount ) + && ( pViewFormats == rhs.pViewFormats ); + } + + bool operator!=( FramebufferAttachmentImageInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFramebufferAttachmentImageInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage = {}; + uint32_t width = {}; + uint32_t height = {}; + uint32_t layerCount = {}; + uint32_t viewFormatCount = {}; + const VULKAN_HPP_NAMESPACE::Format* pViewFormats = {}; + + }; + static_assert( sizeof( FramebufferAttachmentImageInfo ) == sizeof( VkFramebufferAttachmentImageInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FramebufferAttachmentImageInfo; + }; + using FramebufferAttachmentImageInfoKHR = FramebufferAttachmentImageInfo; + + struct FramebufferAttachmentsCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eFramebufferAttachmentsCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR FramebufferAttachmentsCreateInfo(uint32_t attachmentImageInfoCount_ = {}, const VULKAN_HPP_NAMESPACE::FramebufferAttachmentImageInfo* pAttachmentImageInfos_ = {}) VULKAN_HPP_NOEXCEPT + : attachmentImageInfoCount( attachmentImageInfoCount_ ), pAttachmentImageInfos( pAttachmentImageInfos_ ) + {} + + VULKAN_HPP_CONSTEXPR FramebufferAttachmentsCreateInfo( FramebufferAttachmentsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + FramebufferAttachmentsCreateInfo( VkFramebufferAttachmentsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferAttachmentsCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachmentImageInfos_ ) + : attachmentImageInfoCount( static_cast( attachmentImageInfos_.size() ) ), pAttachmentImageInfos( attachmentImageInfos_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + FramebufferAttachmentsCreateInfo & operator=( VkFramebufferAttachmentsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + FramebufferAttachmentsCreateInfo & operator=( FramebufferAttachmentsCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( FramebufferAttachmentsCreateInfo ) ); + return *this; + } + + FramebufferAttachmentsCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + FramebufferAttachmentsCreateInfo & setAttachmentImageInfoCount( uint32_t attachmentImageInfoCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentImageInfoCount = attachmentImageInfoCount_; + return *this; + } + + FramebufferAttachmentsCreateInfo & setPAttachmentImageInfos( const VULKAN_HPP_NAMESPACE::FramebufferAttachmentImageInfo* pAttachmentImageInfos_ ) VULKAN_HPP_NOEXCEPT + { + pAttachmentImageInfos = pAttachmentImageInfos_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + FramebufferAttachmentsCreateInfo & setAttachmentImageInfos( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachmentImageInfos_ ) VULKAN_HPP_NOEXCEPT + { + attachmentImageInfoCount = static_cast( attachmentImageInfos_.size() ); + pAttachmentImageInfos = attachmentImageInfos_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkFramebufferAttachmentsCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkFramebufferAttachmentsCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( FramebufferAttachmentsCreateInfo const& ) const = default; +#else + bool operator==( FramebufferAttachmentsCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachmentImageInfoCount == rhs.attachmentImageInfoCount ) + && ( pAttachmentImageInfos == rhs.pAttachmentImageInfos ); + } + + bool operator!=( FramebufferAttachmentsCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eFramebufferAttachmentsCreateInfo; + const void* pNext = {}; + uint32_t attachmentImageInfoCount = {}; + const VULKAN_HPP_NAMESPACE::FramebufferAttachmentImageInfo* pAttachmentImageInfos = {}; + + }; + static_assert( sizeof( FramebufferAttachmentsCreateInfo ) == sizeof( VkFramebufferAttachmentsCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = FramebufferAttachmentsCreateInfo; + }; + using FramebufferAttachmentsCreateInfoKHR = FramebufferAttachmentsCreateInfo; + + struct GraphicsShaderGroupCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGraphicsShaderGroupCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GraphicsShaderGroupCreateInfoNV(uint32_t stageCount_ = {}, const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ = {}, const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ = {}) VULKAN_HPP_NOEXCEPT + : stageCount( stageCount_ ), pStages( pStages_ ), pVertexInputState( pVertexInputState_ ), pTessellationState( pTessellationState_ ) + {} + + VULKAN_HPP_CONSTEXPR GraphicsShaderGroupCreateInfoNV( GraphicsShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GraphicsShaderGroupCreateInfoNV( VkGraphicsShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsShaderGroupCreateInfoNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_, const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ = {}, const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ = {} ) + : stageCount( static_cast( stages_.size() ) ), pStages( stages_.data() ), pVertexInputState( pVertexInputState_ ), pTessellationState( pTessellationState_ ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GraphicsShaderGroupCreateInfoNV & operator=( VkGraphicsShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GraphicsShaderGroupCreateInfoNV & operator=( GraphicsShaderGroupCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GraphicsShaderGroupCreateInfoNV ) ); + return *this; + } + + GraphicsShaderGroupCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GraphicsShaderGroupCreateInfoNV & setStageCount( uint32_t stageCount_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = stageCount_; + return *this; + } + + GraphicsShaderGroupCreateInfoNV & setPStages( const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages_ ) VULKAN_HPP_NOEXCEPT + { + pStages = pStages_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsShaderGroupCreateInfoNV & setStages( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & stages_ ) VULKAN_HPP_NOEXCEPT + { + stageCount = static_cast( stages_.size() ); + pStages = stages_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + GraphicsShaderGroupCreateInfoNV & setPVertexInputState( const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState_ ) VULKAN_HPP_NOEXCEPT + { + pVertexInputState = pVertexInputState_; + return *this; + } + + GraphicsShaderGroupCreateInfoNV & setPTessellationState( const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState_ ) VULKAN_HPP_NOEXCEPT + { + pTessellationState = pTessellationState_; + return *this; + } + + + operator VkGraphicsShaderGroupCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGraphicsShaderGroupCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GraphicsShaderGroupCreateInfoNV const& ) const = default; +#else + bool operator==( GraphicsShaderGroupCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stageCount == rhs.stageCount ) + && ( pStages == rhs.pStages ) + && ( pVertexInputState == rhs.pVertexInputState ) + && ( pTessellationState == rhs.pTessellationState ); + } + + bool operator!=( GraphicsShaderGroupCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGraphicsShaderGroupCreateInfoNV; + const void* pNext = {}; + uint32_t stageCount = {}; + const VULKAN_HPP_NAMESPACE::PipelineShaderStageCreateInfo* pStages = {}; + const VULKAN_HPP_NAMESPACE::PipelineVertexInputStateCreateInfo* pVertexInputState = {}; + const VULKAN_HPP_NAMESPACE::PipelineTessellationStateCreateInfo* pTessellationState = {}; + + }; + static_assert( sizeof( GraphicsShaderGroupCreateInfoNV ) == sizeof( VkGraphicsShaderGroupCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GraphicsShaderGroupCreateInfoNV; + }; + + struct GraphicsPipelineShaderGroupsCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eGraphicsPipelineShaderGroupsCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR GraphicsPipelineShaderGroupsCreateInfoNV(uint32_t groupCount_ = {}, const VULKAN_HPP_NAMESPACE::GraphicsShaderGroupCreateInfoNV* pGroups_ = {}, uint32_t pipelineCount_ = {}, const VULKAN_HPP_NAMESPACE::Pipeline* pPipelines_ = {}) VULKAN_HPP_NOEXCEPT + : groupCount( groupCount_ ), pGroups( pGroups_ ), pipelineCount( pipelineCount_ ), pPipelines( pPipelines_ ) + {} + + VULKAN_HPP_CONSTEXPR GraphicsPipelineShaderGroupsCreateInfoNV( GraphicsPipelineShaderGroupsCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + GraphicsPipelineShaderGroupsCreateInfoNV( VkGraphicsPipelineShaderGroupsCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsPipelineShaderGroupsCreateInfoNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelines_ = {} ) + : groupCount( static_cast( groups_.size() ) ), pGroups( groups_.data() ), pipelineCount( static_cast( pipelines_.size() ) ), pPipelines( pipelines_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + GraphicsPipelineShaderGroupsCreateInfoNV & operator=( VkGraphicsPipelineShaderGroupsCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + GraphicsPipelineShaderGroupsCreateInfoNV & operator=( GraphicsPipelineShaderGroupsCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( GraphicsPipelineShaderGroupsCreateInfoNV ) ); + return *this; + } + + GraphicsPipelineShaderGroupsCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + GraphicsPipelineShaderGroupsCreateInfoNV & setGroupCount( uint32_t groupCount_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = groupCount_; + return *this; + } + + GraphicsPipelineShaderGroupsCreateInfoNV & setPGroups( const VULKAN_HPP_NAMESPACE::GraphicsShaderGroupCreateInfoNV* pGroups_ ) VULKAN_HPP_NOEXCEPT + { + pGroups = pGroups_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsPipelineShaderGroupsCreateInfoNV & setGroups( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & groups_ ) VULKAN_HPP_NOEXCEPT + { + groupCount = static_cast( groups_.size() ); + pGroups = groups_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + GraphicsPipelineShaderGroupsCreateInfoNV & setPipelineCount( uint32_t pipelineCount_ ) VULKAN_HPP_NOEXCEPT + { + pipelineCount = pipelineCount_; + return *this; + } + + GraphicsPipelineShaderGroupsCreateInfoNV & setPPipelines( const VULKAN_HPP_NAMESPACE::Pipeline* pPipelines_ ) VULKAN_HPP_NOEXCEPT + { + pPipelines = pPipelines_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + GraphicsPipelineShaderGroupsCreateInfoNV & setPipelines( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelines_ ) VULKAN_HPP_NOEXCEPT + { + pipelineCount = static_cast( pipelines_.size() ); + pPipelines = pipelines_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkGraphicsPipelineShaderGroupsCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkGraphicsPipelineShaderGroupsCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( GraphicsPipelineShaderGroupsCreateInfoNV const& ) const = default; +#else + bool operator==( GraphicsPipelineShaderGroupsCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( groupCount == rhs.groupCount ) + && ( pGroups == rhs.pGroups ) + && ( pipelineCount == rhs.pipelineCount ) + && ( pPipelines == rhs.pPipelines ); + } + + bool operator!=( GraphicsPipelineShaderGroupsCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eGraphicsPipelineShaderGroupsCreateInfoNV; + const void* pNext = {}; + uint32_t groupCount = {}; + const VULKAN_HPP_NAMESPACE::GraphicsShaderGroupCreateInfoNV* pGroups = {}; + uint32_t pipelineCount = {}; + const VULKAN_HPP_NAMESPACE::Pipeline* pPipelines = {}; + + }; + static_assert( sizeof( GraphicsPipelineShaderGroupsCreateInfoNV ) == sizeof( VkGraphicsPipelineShaderGroupsCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = GraphicsPipelineShaderGroupsCreateInfoNV; + }; + + struct HeadlessSurfaceCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eHeadlessSurfaceCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR HeadlessSurfaceCreateInfoEXT(VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateFlagsEXT flags_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ) + {} + + VULKAN_HPP_CONSTEXPR HeadlessSurfaceCreateInfoEXT( HeadlessSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + HeadlessSurfaceCreateInfoEXT( VkHeadlessSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + HeadlessSurfaceCreateInfoEXT & operator=( VkHeadlessSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + HeadlessSurfaceCreateInfoEXT & operator=( HeadlessSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( HeadlessSurfaceCreateInfoEXT ) ); + return *this; + } + + HeadlessSurfaceCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + HeadlessSurfaceCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + + operator VkHeadlessSurfaceCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkHeadlessSurfaceCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( HeadlessSurfaceCreateInfoEXT const& ) const = default; +#else + bool operator==( HeadlessSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ); + } + + bool operator!=( HeadlessSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eHeadlessSurfaceCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateFlagsEXT flags = {}; + + }; + static_assert( sizeof( HeadlessSurfaceCreateInfoEXT ) == sizeof( VkHeadlessSurfaceCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = HeadlessSurfaceCreateInfoEXT; + }; + +#ifdef VK_USE_PLATFORM_IOS_MVK + struct IOSSurfaceCreateInfoMVK + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eIosSurfaceCreateInfoMVK; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR IOSSurfaceCreateInfoMVK(VULKAN_HPP_NAMESPACE::IOSSurfaceCreateFlagsMVK flags_ = {}, const void* pView_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pView( pView_ ) + {} + + VULKAN_HPP_CONSTEXPR IOSSurfaceCreateInfoMVK( IOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + IOSSurfaceCreateInfoMVK( VkIOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + IOSSurfaceCreateInfoMVK & operator=( VkIOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + IOSSurfaceCreateInfoMVK & operator=( IOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( IOSSurfaceCreateInfoMVK ) ); + return *this; + } + + IOSSurfaceCreateInfoMVK & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + IOSSurfaceCreateInfoMVK & setFlags( VULKAN_HPP_NAMESPACE::IOSSurfaceCreateFlagsMVK flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + IOSSurfaceCreateInfoMVK & setPView( const void* pView_ ) VULKAN_HPP_NOEXCEPT + { + pView = pView_; + return *this; + } + + + operator VkIOSSurfaceCreateInfoMVK const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkIOSSurfaceCreateInfoMVK &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( IOSSurfaceCreateInfoMVK const& ) const = default; +#else + bool operator==( IOSSurfaceCreateInfoMVK const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pView == rhs.pView ); + } + + bool operator!=( IOSSurfaceCreateInfoMVK const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eIosSurfaceCreateInfoMVK; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::IOSSurfaceCreateFlagsMVK flags = {}; + const void* pView = {}; + + }; + static_assert( sizeof( IOSSurfaceCreateInfoMVK ) == sizeof( VkIOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = IOSSurfaceCreateInfoMVK; + }; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + + struct ImageDrmFormatModifierExplicitCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageDrmFormatModifierExplicitCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierExplicitCreateInfoEXT(uint64_t drmFormatModifier_ = {}, uint32_t drmFormatModifierPlaneCount_ = {}, const VULKAN_HPP_NAMESPACE::SubresourceLayout* pPlaneLayouts_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifier( drmFormatModifier_ ), drmFormatModifierPlaneCount( drmFormatModifierPlaneCount_ ), pPlaneLayouts( pPlaneLayouts_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierExplicitCreateInfoEXT( ImageDrmFormatModifierExplicitCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageDrmFormatModifierExplicitCreateInfoEXT( VkImageDrmFormatModifierExplicitCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageDrmFormatModifierExplicitCreateInfoEXT( uint64_t drmFormatModifier_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & planeLayouts_ ) + : drmFormatModifier( drmFormatModifier_ ), drmFormatModifierPlaneCount( static_cast( planeLayouts_.size() ) ), pPlaneLayouts( planeLayouts_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageDrmFormatModifierExplicitCreateInfoEXT & operator=( VkImageDrmFormatModifierExplicitCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageDrmFormatModifierExplicitCreateInfoEXT & operator=( ImageDrmFormatModifierExplicitCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageDrmFormatModifierExplicitCreateInfoEXT ) ); + return *this; + } + + ImageDrmFormatModifierExplicitCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageDrmFormatModifierExplicitCreateInfoEXT & setDrmFormatModifier( uint64_t drmFormatModifier_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifier = drmFormatModifier_; + return *this; + } + + ImageDrmFormatModifierExplicitCreateInfoEXT & setDrmFormatModifierPlaneCount( uint32_t drmFormatModifierPlaneCount_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifierPlaneCount = drmFormatModifierPlaneCount_; + return *this; + } + + ImageDrmFormatModifierExplicitCreateInfoEXT & setPPlaneLayouts( const VULKAN_HPP_NAMESPACE::SubresourceLayout* pPlaneLayouts_ ) VULKAN_HPP_NOEXCEPT + { + pPlaneLayouts = pPlaneLayouts_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageDrmFormatModifierExplicitCreateInfoEXT & setPlaneLayouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & planeLayouts_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifierPlaneCount = static_cast( planeLayouts_.size() ); + pPlaneLayouts = planeLayouts_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkImageDrmFormatModifierExplicitCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageDrmFormatModifierExplicitCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageDrmFormatModifierExplicitCreateInfoEXT const& ) const = default; +#else + bool operator==( ImageDrmFormatModifierExplicitCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( drmFormatModifier == rhs.drmFormatModifier ) + && ( drmFormatModifierPlaneCount == rhs.drmFormatModifierPlaneCount ) + && ( pPlaneLayouts == rhs.pPlaneLayouts ); + } + + bool operator!=( ImageDrmFormatModifierExplicitCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageDrmFormatModifierExplicitCreateInfoEXT; + const void* pNext = {}; + uint64_t drmFormatModifier = {}; + uint32_t drmFormatModifierPlaneCount = {}; + const VULKAN_HPP_NAMESPACE::SubresourceLayout* pPlaneLayouts = {}; + + }; + static_assert( sizeof( ImageDrmFormatModifierExplicitCreateInfoEXT ) == sizeof( VkImageDrmFormatModifierExplicitCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageDrmFormatModifierExplicitCreateInfoEXT; + }; + + struct ImageDrmFormatModifierListCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageDrmFormatModifierListCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierListCreateInfoEXT(uint32_t drmFormatModifierCount_ = {}, const uint64_t* pDrmFormatModifiers_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifierCount( drmFormatModifierCount_ ), pDrmFormatModifiers( pDrmFormatModifiers_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageDrmFormatModifierListCreateInfoEXT( ImageDrmFormatModifierListCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageDrmFormatModifierListCreateInfoEXT( VkImageDrmFormatModifierListCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageDrmFormatModifierListCreateInfoEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & drmFormatModifiers_ ) + : drmFormatModifierCount( static_cast( drmFormatModifiers_.size() ) ), pDrmFormatModifiers( drmFormatModifiers_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageDrmFormatModifierListCreateInfoEXT & operator=( VkImageDrmFormatModifierListCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageDrmFormatModifierListCreateInfoEXT & operator=( ImageDrmFormatModifierListCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageDrmFormatModifierListCreateInfoEXT ) ); + return *this; + } + + ImageDrmFormatModifierListCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageDrmFormatModifierListCreateInfoEXT & setDrmFormatModifierCount( uint32_t drmFormatModifierCount_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifierCount = drmFormatModifierCount_; + return *this; + } + + ImageDrmFormatModifierListCreateInfoEXT & setPDrmFormatModifiers( const uint64_t* pDrmFormatModifiers_ ) VULKAN_HPP_NOEXCEPT + { + pDrmFormatModifiers = pDrmFormatModifiers_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageDrmFormatModifierListCreateInfoEXT & setDrmFormatModifiers( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & drmFormatModifiers_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifierCount = static_cast( drmFormatModifiers_.size() ); + pDrmFormatModifiers = drmFormatModifiers_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkImageDrmFormatModifierListCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageDrmFormatModifierListCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageDrmFormatModifierListCreateInfoEXT const& ) const = default; +#else + bool operator==( ImageDrmFormatModifierListCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( drmFormatModifierCount == rhs.drmFormatModifierCount ) + && ( pDrmFormatModifiers == rhs.pDrmFormatModifiers ); + } + + bool operator!=( ImageDrmFormatModifierListCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageDrmFormatModifierListCreateInfoEXT; + const void* pNext = {}; + uint32_t drmFormatModifierCount = {}; + const uint64_t* pDrmFormatModifiers = {}; + + }; + static_assert( sizeof( ImageDrmFormatModifierListCreateInfoEXT ) == sizeof( VkImageDrmFormatModifierListCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageDrmFormatModifierListCreateInfoEXT; + }; + + struct ImageFormatListCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageFormatListCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageFormatListCreateInfo(uint32_t viewFormatCount_ = {}, const VULKAN_HPP_NAMESPACE::Format* pViewFormats_ = {}) VULKAN_HPP_NOEXCEPT + : viewFormatCount( viewFormatCount_ ), pViewFormats( pViewFormats_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageFormatListCreateInfo( ImageFormatListCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageFormatListCreateInfo( VkImageFormatListCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageFormatListCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewFormats_ ) + : viewFormatCount( static_cast( viewFormats_.size() ) ), pViewFormats( viewFormats_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageFormatListCreateInfo & operator=( VkImageFormatListCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageFormatListCreateInfo & operator=( ImageFormatListCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageFormatListCreateInfo ) ); + return *this; + } + + ImageFormatListCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageFormatListCreateInfo & setViewFormatCount( uint32_t viewFormatCount_ ) VULKAN_HPP_NOEXCEPT + { + viewFormatCount = viewFormatCount_; + return *this; + } + + ImageFormatListCreateInfo & setPViewFormats( const VULKAN_HPP_NAMESPACE::Format* pViewFormats_ ) VULKAN_HPP_NOEXCEPT + { + pViewFormats = pViewFormats_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ImageFormatListCreateInfo & setViewFormats( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewFormats_ ) VULKAN_HPP_NOEXCEPT + { + viewFormatCount = static_cast( viewFormats_.size() ); + pViewFormats = viewFormats_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkImageFormatListCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageFormatListCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageFormatListCreateInfo const& ) const = default; +#else + bool operator==( ImageFormatListCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( viewFormatCount == rhs.viewFormatCount ) + && ( pViewFormats == rhs.pViewFormats ); + } + + bool operator!=( ImageFormatListCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageFormatListCreateInfo; + const void* pNext = {}; + uint32_t viewFormatCount = {}; + const VULKAN_HPP_NAMESPACE::Format* pViewFormats = {}; + + }; + static_assert( sizeof( ImageFormatListCreateInfo ) == sizeof( VkImageFormatListCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageFormatListCreateInfo; + }; + using ImageFormatListCreateInfoKHR = ImageFormatListCreateInfo; + +#ifdef VK_USE_PLATFORM_FUCHSIA + struct ImagePipeSurfaceCreateInfoFUCHSIA + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImagepipeSurfaceCreateInfoFUCHSIA; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImagePipeSurfaceCreateInfoFUCHSIA(VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateFlagsFUCHSIA flags_ = {}, zx_handle_t imagePipeHandle_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), imagePipeHandle( imagePipeHandle_ ) + {} + + VULKAN_HPP_CONSTEXPR ImagePipeSurfaceCreateInfoFUCHSIA( ImagePipeSurfaceCreateInfoFUCHSIA const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImagePipeSurfaceCreateInfoFUCHSIA( VkImagePipeSurfaceCreateInfoFUCHSIA const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImagePipeSurfaceCreateInfoFUCHSIA & operator=( VkImagePipeSurfaceCreateInfoFUCHSIA const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImagePipeSurfaceCreateInfoFUCHSIA & operator=( ImagePipeSurfaceCreateInfoFUCHSIA const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImagePipeSurfaceCreateInfoFUCHSIA ) ); + return *this; + } + + ImagePipeSurfaceCreateInfoFUCHSIA & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImagePipeSurfaceCreateInfoFUCHSIA & setFlags( VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateFlagsFUCHSIA flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ImagePipeSurfaceCreateInfoFUCHSIA & setImagePipeHandle( zx_handle_t imagePipeHandle_ ) VULKAN_HPP_NOEXCEPT + { + imagePipeHandle = imagePipeHandle_; + return *this; + } + + + operator VkImagePipeSurfaceCreateInfoFUCHSIA const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImagePipeSurfaceCreateInfoFUCHSIA &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImagePipeSurfaceCreateInfoFUCHSIA const& ) const = default; +#else + bool operator==( ImagePipeSurfaceCreateInfoFUCHSIA const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( memcmp( &imagePipeHandle, &rhs.imagePipeHandle, sizeof( zx_handle_t ) ) == 0 ); + } + + bool operator!=( ImagePipeSurfaceCreateInfoFUCHSIA const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImagepipeSurfaceCreateInfoFUCHSIA; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateFlagsFUCHSIA flags = {}; + zx_handle_t imagePipeHandle = {}; + + }; + static_assert( sizeof( ImagePipeSurfaceCreateInfoFUCHSIA ) == sizeof( VkImagePipeSurfaceCreateInfoFUCHSIA ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImagePipeSurfaceCreateInfoFUCHSIA; + }; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + struct ImagePlaneMemoryRequirementsInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImagePlaneMemoryRequirementsInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImagePlaneMemoryRequirementsInfo(VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor) VULKAN_HPP_NOEXCEPT + : planeAspect( planeAspect_ ) + {} + + VULKAN_HPP_CONSTEXPR ImagePlaneMemoryRequirementsInfo( ImagePlaneMemoryRequirementsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImagePlaneMemoryRequirementsInfo( VkImagePlaneMemoryRequirementsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImagePlaneMemoryRequirementsInfo & operator=( VkImagePlaneMemoryRequirementsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImagePlaneMemoryRequirementsInfo & operator=( ImagePlaneMemoryRequirementsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImagePlaneMemoryRequirementsInfo ) ); + return *this; + } + + ImagePlaneMemoryRequirementsInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImagePlaneMemoryRequirementsInfo & setPlaneAspect( VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect_ ) VULKAN_HPP_NOEXCEPT + { + planeAspect = planeAspect_; + return *this; + } + + + operator VkImagePlaneMemoryRequirementsInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImagePlaneMemoryRequirementsInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImagePlaneMemoryRequirementsInfo const& ) const = default; +#else + bool operator==( ImagePlaneMemoryRequirementsInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( planeAspect == rhs.planeAspect ); + } + + bool operator!=( ImagePlaneMemoryRequirementsInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImagePlaneMemoryRequirementsInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageAspectFlagBits planeAspect = VULKAN_HPP_NAMESPACE::ImageAspectFlagBits::eColor; + + }; + static_assert( sizeof( ImagePlaneMemoryRequirementsInfo ) == sizeof( VkImagePlaneMemoryRequirementsInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImagePlaneMemoryRequirementsInfo; + }; + using ImagePlaneMemoryRequirementsInfoKHR = ImagePlaneMemoryRequirementsInfo; + + struct ImageStencilUsageCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageStencilUsageCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageStencilUsageCreateInfo(VULKAN_HPP_NAMESPACE::ImageUsageFlags stencilUsage_ = {}) VULKAN_HPP_NOEXCEPT + : stencilUsage( stencilUsage_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageStencilUsageCreateInfo( ImageStencilUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageStencilUsageCreateInfo( VkImageStencilUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageStencilUsageCreateInfo & operator=( VkImageStencilUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageStencilUsageCreateInfo & operator=( ImageStencilUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageStencilUsageCreateInfo ) ); + return *this; + } + + ImageStencilUsageCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageStencilUsageCreateInfo & setStencilUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags stencilUsage_ ) VULKAN_HPP_NOEXCEPT + { + stencilUsage = stencilUsage_; + return *this; + } + + + operator VkImageStencilUsageCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageStencilUsageCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageStencilUsageCreateInfo const& ) const = default; +#else + bool operator==( ImageStencilUsageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( stencilUsage == rhs.stencilUsage ); + } + + bool operator!=( ImageStencilUsageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageStencilUsageCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags stencilUsage = {}; + + }; + static_assert( sizeof( ImageStencilUsageCreateInfo ) == sizeof( VkImageStencilUsageCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageStencilUsageCreateInfo; + }; + using ImageStencilUsageCreateInfoEXT = ImageStencilUsageCreateInfo; + + struct ImageSwapchainCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageSwapchainCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageSwapchainCreateInfoKHR(VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ = {}) VULKAN_HPP_NOEXCEPT + : swapchain( swapchain_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageSwapchainCreateInfoKHR( ImageSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageSwapchainCreateInfoKHR( VkImageSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageSwapchainCreateInfoKHR & operator=( VkImageSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageSwapchainCreateInfoKHR & operator=( ImageSwapchainCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageSwapchainCreateInfoKHR ) ); + return *this; + } + + ImageSwapchainCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageSwapchainCreateInfoKHR & setSwapchain( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain_ ) VULKAN_HPP_NOEXCEPT + { + swapchain = swapchain_; + return *this; + } + + + operator VkImageSwapchainCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageSwapchainCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageSwapchainCreateInfoKHR const& ) const = default; +#else + bool operator==( ImageSwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchain == rhs.swapchain ); + } + + bool operator!=( ImageSwapchainCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageSwapchainCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain = {}; + + }; + static_assert( sizeof( ImageSwapchainCreateInfoKHR ) == sizeof( VkImageSwapchainCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageSwapchainCreateInfoKHR; + }; + + struct ImageViewASTCDecodeModeEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageViewAstcDecodeModeEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageViewASTCDecodeModeEXT(VULKAN_HPP_NAMESPACE::Format decodeMode_ = VULKAN_HPP_NAMESPACE::Format::eUndefined) VULKAN_HPP_NOEXCEPT + : decodeMode( decodeMode_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageViewASTCDecodeModeEXT( ImageViewASTCDecodeModeEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageViewASTCDecodeModeEXT( VkImageViewASTCDecodeModeEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageViewASTCDecodeModeEXT & operator=( VkImageViewASTCDecodeModeEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageViewASTCDecodeModeEXT & operator=( ImageViewASTCDecodeModeEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageViewASTCDecodeModeEXT ) ); + return *this; + } + + ImageViewASTCDecodeModeEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageViewASTCDecodeModeEXT & setDecodeMode( VULKAN_HPP_NAMESPACE::Format decodeMode_ ) VULKAN_HPP_NOEXCEPT + { + decodeMode = decodeMode_; + return *this; + } + + + operator VkImageViewASTCDecodeModeEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageViewASTCDecodeModeEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageViewASTCDecodeModeEXT const& ) const = default; +#else + bool operator==( ImageViewASTCDecodeModeEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( decodeMode == rhs.decodeMode ); + } + + bool operator!=( ImageViewASTCDecodeModeEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageViewAstcDecodeModeEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Format decodeMode = VULKAN_HPP_NAMESPACE::Format::eUndefined; + + }; + static_assert( sizeof( ImageViewASTCDecodeModeEXT ) == sizeof( VkImageViewASTCDecodeModeEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageViewASTCDecodeModeEXT; + }; + + struct ImageViewUsageCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImageViewUsageCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImageViewUsageCreateInfo(VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ = {}) VULKAN_HPP_NOEXCEPT + : usage( usage_ ) + {} + + VULKAN_HPP_CONSTEXPR ImageViewUsageCreateInfo( ImageViewUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImageViewUsageCreateInfo( VkImageViewUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImageViewUsageCreateInfo & operator=( VkImageViewUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImageViewUsageCreateInfo & operator=( ImageViewUsageCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImageViewUsageCreateInfo ) ); + return *this; + } + + ImageViewUsageCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImageViewUsageCreateInfo & setUsage( VULKAN_HPP_NAMESPACE::ImageUsageFlags usage_ ) VULKAN_HPP_NOEXCEPT + { + usage = usage_; + return *this; + } + + + operator VkImageViewUsageCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImageViewUsageCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImageViewUsageCreateInfo const& ) const = default; +#else + bool operator==( ImageViewUsageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( usage == rhs.usage ); + } + + bool operator!=( ImageViewUsageCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImageViewUsageCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags usage = {}; + + }; + static_assert( sizeof( ImageViewUsageCreateInfo ) == sizeof( VkImageViewUsageCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImageViewUsageCreateInfo; + }; + using ImageViewUsageCreateInfoKHR = ImageViewUsageCreateInfo; + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + struct ImportAndroidHardwareBufferInfoANDROID + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportAndroidHardwareBufferInfoANDROID; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportAndroidHardwareBufferInfoANDROID(struct AHardwareBuffer* buffer_ = {}) VULKAN_HPP_NOEXCEPT + : buffer( buffer_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportAndroidHardwareBufferInfoANDROID( ImportAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportAndroidHardwareBufferInfoANDROID( VkImportAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportAndroidHardwareBufferInfoANDROID & operator=( VkImportAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportAndroidHardwareBufferInfoANDROID & operator=( ImportAndroidHardwareBufferInfoANDROID const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportAndroidHardwareBufferInfoANDROID ) ); + return *this; + } + + ImportAndroidHardwareBufferInfoANDROID & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportAndroidHardwareBufferInfoANDROID & setBuffer( struct AHardwareBuffer* buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + + operator VkImportAndroidHardwareBufferInfoANDROID const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportAndroidHardwareBufferInfoANDROID &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportAndroidHardwareBufferInfoANDROID const& ) const = default; +#else + bool operator==( ImportAndroidHardwareBufferInfoANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( ImportAndroidHardwareBufferInfoANDROID const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportAndroidHardwareBufferInfoANDROID; + const void* pNext = {}; + struct AHardwareBuffer* buffer = {}; + + }; + static_assert( sizeof( ImportAndroidHardwareBufferInfoANDROID ) == sizeof( VkImportAndroidHardwareBufferInfoANDROID ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportAndroidHardwareBufferInfoANDROID; + }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + struct ImportMemoryFdInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportMemoryFdInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportMemoryFdInfoKHR(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, int fd_ = {}) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ), fd( fd_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportMemoryFdInfoKHR( ImportMemoryFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportMemoryFdInfoKHR( VkImportMemoryFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportMemoryFdInfoKHR & operator=( VkImportMemoryFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportMemoryFdInfoKHR & operator=( ImportMemoryFdInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportMemoryFdInfoKHR ) ); + return *this; + } + + ImportMemoryFdInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportMemoryFdInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportMemoryFdInfoKHR & setFd( int fd_ ) VULKAN_HPP_NOEXCEPT + { + fd = fd_; + return *this; + } + + + operator VkImportMemoryFdInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportMemoryFdInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportMemoryFdInfoKHR const& ) const = default; +#else + bool operator==( ImportMemoryFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( fd == rhs.fd ); + } + + bool operator!=( ImportMemoryFdInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportMemoryFdInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + int fd = {}; + + }; + static_assert( sizeof( ImportMemoryFdInfoKHR ) == sizeof( VkImportMemoryFdInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportMemoryFdInfoKHR; + }; + + struct ImportMemoryHostPointerInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportMemoryHostPointerInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportMemoryHostPointerInfoEXT(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, void* pHostPointer_ = {}) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ), pHostPointer( pHostPointer_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportMemoryHostPointerInfoEXT( ImportMemoryHostPointerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportMemoryHostPointerInfoEXT( VkImportMemoryHostPointerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportMemoryHostPointerInfoEXT & operator=( VkImportMemoryHostPointerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportMemoryHostPointerInfoEXT & operator=( ImportMemoryHostPointerInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportMemoryHostPointerInfoEXT ) ); + return *this; + } + + ImportMemoryHostPointerInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportMemoryHostPointerInfoEXT & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportMemoryHostPointerInfoEXT & setPHostPointer( void* pHostPointer_ ) VULKAN_HPP_NOEXCEPT + { + pHostPointer = pHostPointer_; + return *this; + } + + + operator VkImportMemoryHostPointerInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportMemoryHostPointerInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportMemoryHostPointerInfoEXT const& ) const = default; +#else + bool operator==( ImportMemoryHostPointerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( pHostPointer == rhs.pHostPointer ); + } + + bool operator!=( ImportMemoryHostPointerInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportMemoryHostPointerInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + void* pHostPointer = {}; + + }; + static_assert( sizeof( ImportMemoryHostPointerInfoEXT ) == sizeof( VkImportMemoryHostPointerInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportMemoryHostPointerInfoEXT; + }; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportMemoryWin32HandleInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportMemoryWin32HandleInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportMemoryWin32HandleInfoKHR(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd, HANDLE handle_ = {}, LPCWSTR name_ = {}) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ), handle( handle_ ), name( name_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportMemoryWin32HandleInfoKHR( ImportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportMemoryWin32HandleInfoKHR( VkImportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportMemoryWin32HandleInfoKHR & operator=( VkImportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportMemoryWin32HandleInfoKHR & operator=( ImportMemoryWin32HandleInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportMemoryWin32HandleInfoKHR ) ); + return *this; + } + + ImportMemoryWin32HandleInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR & setHandle( HANDLE handle_ ) VULKAN_HPP_NOEXCEPT + { + handle = handle_; + return *this; + } + + ImportMemoryWin32HandleInfoKHR & setName( LPCWSTR name_ ) VULKAN_HPP_NOEXCEPT + { + name = name_; + return *this; + } + + + operator VkImportMemoryWin32HandleInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportMemoryWin32HandleInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportMemoryWin32HandleInfoKHR const& ) const = default; +#else + bool operator==( ImportMemoryWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ) + && ( name == rhs.name ); + } + + bool operator!=( ImportMemoryWin32HandleInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportMemoryWin32HandleInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + HANDLE handle = {}; + LPCWSTR name = {}; + + }; + static_assert( sizeof( ImportMemoryWin32HandleInfoKHR ) == sizeof( VkImportMemoryWin32HandleInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportMemoryWin32HandleInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct ImportMemoryWin32HandleInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eImportMemoryWin32HandleInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ImportMemoryWin32HandleInfoNV(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType_ = {}, HANDLE handle_ = {}) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ), handle( handle_ ) + {} + + VULKAN_HPP_CONSTEXPR ImportMemoryWin32HandleInfoNV( ImportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ImportMemoryWin32HandleInfoNV( VkImportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ImportMemoryWin32HandleInfoNV & operator=( VkImportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ImportMemoryWin32HandleInfoNV & operator=( ImportMemoryWin32HandleInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ImportMemoryWin32HandleInfoNV ) ); + return *this; + } + + ImportMemoryWin32HandleInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ImportMemoryWin32HandleInfoNV & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + ImportMemoryWin32HandleInfoNV & setHandle( HANDLE handle_ ) VULKAN_HPP_NOEXCEPT + { + handle = handle_; + return *this; + } + + + operator VkImportMemoryWin32HandleInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkImportMemoryWin32HandleInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ImportMemoryWin32HandleInfoNV const& ) const = default; +#else + bool operator==( ImportMemoryWin32HandleInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ) + && ( handle == rhs.handle ); + } + + bool operator!=( ImportMemoryWin32HandleInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eImportMemoryWin32HandleInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType = {}; + HANDLE handle = {}; + + }; + static_assert( sizeof( ImportMemoryWin32HandleInfoNV ) == sizeof( VkImportMemoryWin32HandleInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ImportMemoryWin32HandleInfoNV; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct InputAttachmentAspectReference + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR InputAttachmentAspectReference(uint32_t subpass_ = {}, uint32_t inputAttachmentIndex_ = {}, VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ = {}) VULKAN_HPP_NOEXCEPT + : subpass( subpass_ ), inputAttachmentIndex( inputAttachmentIndex_ ), aspectMask( aspectMask_ ) + {} + + VULKAN_HPP_CONSTEXPR InputAttachmentAspectReference( InputAttachmentAspectReference const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + InputAttachmentAspectReference( VkInputAttachmentAspectReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + InputAttachmentAspectReference & operator=( VkInputAttachmentAspectReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + InputAttachmentAspectReference & operator=( InputAttachmentAspectReference const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( InputAttachmentAspectReference ) ); + return *this; + } + + InputAttachmentAspectReference & setSubpass( uint32_t subpass_ ) VULKAN_HPP_NOEXCEPT + { + subpass = subpass_; + return *this; + } + + InputAttachmentAspectReference & setInputAttachmentIndex( uint32_t inputAttachmentIndex_ ) VULKAN_HPP_NOEXCEPT + { + inputAttachmentIndex = inputAttachmentIndex_; + return *this; + } + + InputAttachmentAspectReference & setAspectMask( VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask_ ) VULKAN_HPP_NOEXCEPT + { + aspectMask = aspectMask_; + return *this; + } + + + operator VkInputAttachmentAspectReference const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkInputAttachmentAspectReference &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( InputAttachmentAspectReference const& ) const = default; +#else + bool operator==( InputAttachmentAspectReference const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( subpass == rhs.subpass ) + && ( inputAttachmentIndex == rhs.inputAttachmentIndex ) + && ( aspectMask == rhs.aspectMask ); + } + + bool operator!=( InputAttachmentAspectReference const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t subpass = {}; + uint32_t inputAttachmentIndex = {}; + VULKAN_HPP_NAMESPACE::ImageAspectFlags aspectMask = {}; + + }; + static_assert( sizeof( InputAttachmentAspectReference ) == sizeof( VkInputAttachmentAspectReference ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + using InputAttachmentAspectReferenceKHR = InputAttachmentAspectReference; + + struct InstanceCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eInstanceCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR InstanceCreateInfo(VULKAN_HPP_NAMESPACE::InstanceCreateFlags flags_ = {}, const VULKAN_HPP_NAMESPACE::ApplicationInfo* pApplicationInfo_ = {}, uint32_t enabledLayerCount_ = {}, const char* const * ppEnabledLayerNames_ = {}, uint32_t enabledExtensionCount_ = {}, const char* const * ppEnabledExtensionNames_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pApplicationInfo( pApplicationInfo_ ), enabledLayerCount( enabledLayerCount_ ), ppEnabledLayerNames( ppEnabledLayerNames_ ), enabledExtensionCount( enabledExtensionCount_ ), ppEnabledExtensionNames( ppEnabledExtensionNames_ ) + {} + + VULKAN_HPP_CONSTEXPR InstanceCreateInfo( InstanceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + InstanceCreateInfo( VkInstanceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + InstanceCreateInfo( VULKAN_HPP_NAMESPACE::InstanceCreateFlags flags_, const VULKAN_HPP_NAMESPACE::ApplicationInfo* pApplicationInfo_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledLayerNames_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledExtensionNames_ = {} ) + : flags( flags_ ), pApplicationInfo( pApplicationInfo_ ), enabledLayerCount( static_cast( pEnabledLayerNames_.size() ) ), ppEnabledLayerNames( pEnabledLayerNames_.data() ), enabledExtensionCount( static_cast( pEnabledExtensionNames_.size() ) ), ppEnabledExtensionNames( pEnabledExtensionNames_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + InstanceCreateInfo & operator=( VkInstanceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + InstanceCreateInfo & operator=( InstanceCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( InstanceCreateInfo ) ); + return *this; + } + + InstanceCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + InstanceCreateInfo & setFlags( VULKAN_HPP_NAMESPACE::InstanceCreateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + InstanceCreateInfo & setPApplicationInfo( const VULKAN_HPP_NAMESPACE::ApplicationInfo* pApplicationInfo_ ) VULKAN_HPP_NOEXCEPT + { + pApplicationInfo = pApplicationInfo_; + return *this; + } + + InstanceCreateInfo & setEnabledLayerCount( uint32_t enabledLayerCount_ ) VULKAN_HPP_NOEXCEPT + { + enabledLayerCount = enabledLayerCount_; + return *this; + } + + InstanceCreateInfo & setPpEnabledLayerNames( const char* const * ppEnabledLayerNames_ ) VULKAN_HPP_NOEXCEPT + { + ppEnabledLayerNames = ppEnabledLayerNames_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + InstanceCreateInfo & setPEnabledLayerNames( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledLayerNames_ ) VULKAN_HPP_NOEXCEPT + { + enabledLayerCount = static_cast( pEnabledLayerNames_.size() ); + ppEnabledLayerNames = pEnabledLayerNames_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + InstanceCreateInfo & setEnabledExtensionCount( uint32_t enabledExtensionCount_ ) VULKAN_HPP_NOEXCEPT + { + enabledExtensionCount = enabledExtensionCount_; + return *this; + } + + InstanceCreateInfo & setPpEnabledExtensionNames( const char* const * ppEnabledExtensionNames_ ) VULKAN_HPP_NOEXCEPT + { + ppEnabledExtensionNames = ppEnabledExtensionNames_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + InstanceCreateInfo & setPEnabledExtensionNames( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pEnabledExtensionNames_ ) VULKAN_HPP_NOEXCEPT + { + enabledExtensionCount = static_cast( pEnabledExtensionNames_.size() ); + ppEnabledExtensionNames = pEnabledExtensionNames_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkInstanceCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkInstanceCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( InstanceCreateInfo const& ) const = default; +#else + bool operator==( InstanceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pApplicationInfo == rhs.pApplicationInfo ) + && ( enabledLayerCount == rhs.enabledLayerCount ) + && ( ppEnabledLayerNames == rhs.ppEnabledLayerNames ) + && ( enabledExtensionCount == rhs.enabledExtensionCount ) + && ( ppEnabledExtensionNames == rhs.ppEnabledExtensionNames ); + } + + bool operator!=( InstanceCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eInstanceCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::InstanceCreateFlags flags = {}; + const VULKAN_HPP_NAMESPACE::ApplicationInfo* pApplicationInfo = {}; + uint32_t enabledLayerCount = {}; + const char* const * ppEnabledLayerNames = {}; + uint32_t enabledExtensionCount = {}; + const char* const * ppEnabledExtensionNames = {}; + + }; + static_assert( sizeof( InstanceCreateInfo ) == sizeof( VkInstanceCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = InstanceCreateInfo; + }; + +#ifdef VK_USE_PLATFORM_MACOS_MVK + struct MacOSSurfaceCreateInfoMVK + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMacosSurfaceCreateInfoMVK; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MacOSSurfaceCreateInfoMVK(VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateFlagsMVK flags_ = {}, const void* pView_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pView( pView_ ) + {} + + VULKAN_HPP_CONSTEXPR MacOSSurfaceCreateInfoMVK( MacOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MacOSSurfaceCreateInfoMVK( VkMacOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MacOSSurfaceCreateInfoMVK & operator=( VkMacOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MacOSSurfaceCreateInfoMVK & operator=( MacOSSurfaceCreateInfoMVK const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MacOSSurfaceCreateInfoMVK ) ); + return *this; + } + + MacOSSurfaceCreateInfoMVK & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MacOSSurfaceCreateInfoMVK & setFlags( VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateFlagsMVK flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + MacOSSurfaceCreateInfoMVK & setPView( const void* pView_ ) VULKAN_HPP_NOEXCEPT + { + pView = pView_; + return *this; + } + + + operator VkMacOSSurfaceCreateInfoMVK const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMacOSSurfaceCreateInfoMVK &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MacOSSurfaceCreateInfoMVK const& ) const = default; +#else + bool operator==( MacOSSurfaceCreateInfoMVK const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pView == rhs.pView ); + } + + bool operator!=( MacOSSurfaceCreateInfoMVK const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMacosSurfaceCreateInfoMVK; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateFlagsMVK flags = {}; + const void* pView = {}; + + }; + static_assert( sizeof( MacOSSurfaceCreateInfoMVK ) == sizeof( VkMacOSSurfaceCreateInfoMVK ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MacOSSurfaceCreateInfoMVK; + }; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + struct MemoryAllocateFlagsInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryAllocateFlagsInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryAllocateFlagsInfo(VULKAN_HPP_NAMESPACE::MemoryAllocateFlags flags_ = {}, uint32_t deviceMask_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), deviceMask( deviceMask_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryAllocateFlagsInfo( MemoryAllocateFlagsInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryAllocateFlagsInfo( VkMemoryAllocateFlagsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryAllocateFlagsInfo & operator=( VkMemoryAllocateFlagsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryAllocateFlagsInfo & operator=( MemoryAllocateFlagsInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryAllocateFlagsInfo ) ); + return *this; + } + + MemoryAllocateFlagsInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryAllocateFlagsInfo & setFlags( VULKAN_HPP_NAMESPACE::MemoryAllocateFlags flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + MemoryAllocateFlagsInfo & setDeviceMask( uint32_t deviceMask_ ) VULKAN_HPP_NOEXCEPT + { + deviceMask = deviceMask_; + return *this; + } + + + operator VkMemoryAllocateFlagsInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryAllocateFlagsInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryAllocateFlagsInfo const& ) const = default; +#else + bool operator==( MemoryAllocateFlagsInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( deviceMask == rhs.deviceMask ); + } + + bool operator!=( MemoryAllocateFlagsInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryAllocateFlagsInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::MemoryAllocateFlags flags = {}; + uint32_t deviceMask = {}; + + }; + static_assert( sizeof( MemoryAllocateFlagsInfo ) == sizeof( VkMemoryAllocateFlagsInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryAllocateFlagsInfo; + }; + using MemoryAllocateFlagsInfoKHR = MemoryAllocateFlagsInfo; + + struct MemoryDedicatedAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryDedicatedAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryDedicatedAllocateInfo(VULKAN_HPP_NAMESPACE::Image image_ = {}, VULKAN_HPP_NAMESPACE::Buffer buffer_ = {}) VULKAN_HPP_NOEXCEPT + : image( image_ ), buffer( buffer_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryDedicatedAllocateInfo( MemoryDedicatedAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryDedicatedAllocateInfo( VkMemoryDedicatedAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryDedicatedAllocateInfo & operator=( VkMemoryDedicatedAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryDedicatedAllocateInfo & operator=( MemoryDedicatedAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryDedicatedAllocateInfo ) ); + return *this; + } + + MemoryDedicatedAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryDedicatedAllocateInfo & setImage( VULKAN_HPP_NAMESPACE::Image image_ ) VULKAN_HPP_NOEXCEPT + { + image = image_; + return *this; + } + + MemoryDedicatedAllocateInfo & setBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer_ ) VULKAN_HPP_NOEXCEPT + { + buffer = buffer_; + return *this; + } + + + operator VkMemoryDedicatedAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryDedicatedAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryDedicatedAllocateInfo const& ) const = default; +#else + bool operator==( MemoryDedicatedAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( image == rhs.image ) + && ( buffer == rhs.buffer ); + } + + bool operator!=( MemoryDedicatedAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryDedicatedAllocateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Image image = {}; + VULKAN_HPP_NAMESPACE::Buffer buffer = {}; + + }; + static_assert( sizeof( MemoryDedicatedAllocateInfo ) == sizeof( VkMemoryDedicatedAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryDedicatedAllocateInfo; + }; + using MemoryDedicatedAllocateInfoKHR = MemoryDedicatedAllocateInfo; + + struct MemoryDedicatedRequirements + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryDedicatedRequirements; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryDedicatedRequirements(VULKAN_HPP_NAMESPACE::Bool32 prefersDedicatedAllocation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 requiresDedicatedAllocation_ = {}) VULKAN_HPP_NOEXCEPT + : prefersDedicatedAllocation( prefersDedicatedAllocation_ ), requiresDedicatedAllocation( requiresDedicatedAllocation_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryDedicatedRequirements( MemoryDedicatedRequirements const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryDedicatedRequirements( VkMemoryDedicatedRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryDedicatedRequirements & operator=( VkMemoryDedicatedRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryDedicatedRequirements & operator=( MemoryDedicatedRequirements const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryDedicatedRequirements ) ); + return *this; + } + + + operator VkMemoryDedicatedRequirements const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryDedicatedRequirements &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryDedicatedRequirements const& ) const = default; +#else + bool operator==( MemoryDedicatedRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( prefersDedicatedAllocation == rhs.prefersDedicatedAllocation ) + && ( requiresDedicatedAllocation == rhs.requiresDedicatedAllocation ); + } + + bool operator!=( MemoryDedicatedRequirements const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryDedicatedRequirements; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 prefersDedicatedAllocation = {}; + VULKAN_HPP_NAMESPACE::Bool32 requiresDedicatedAllocation = {}; + + }; + static_assert( sizeof( MemoryDedicatedRequirements ) == sizeof( VkMemoryDedicatedRequirements ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryDedicatedRequirements; + }; + using MemoryDedicatedRequirementsKHR = MemoryDedicatedRequirements; + + struct MemoryOpaqueCaptureAddressAllocateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryOpaqueCaptureAddressAllocateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryOpaqueCaptureAddressAllocateInfo(uint64_t opaqueCaptureAddress_ = {}) VULKAN_HPP_NOEXCEPT + : opaqueCaptureAddress( opaqueCaptureAddress_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryOpaqueCaptureAddressAllocateInfo( MemoryOpaqueCaptureAddressAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryOpaqueCaptureAddressAllocateInfo( VkMemoryOpaqueCaptureAddressAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryOpaqueCaptureAddressAllocateInfo & operator=( VkMemoryOpaqueCaptureAddressAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryOpaqueCaptureAddressAllocateInfo & operator=( MemoryOpaqueCaptureAddressAllocateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryOpaqueCaptureAddressAllocateInfo ) ); + return *this; + } + + MemoryOpaqueCaptureAddressAllocateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryOpaqueCaptureAddressAllocateInfo & setOpaqueCaptureAddress( uint64_t opaqueCaptureAddress_ ) VULKAN_HPP_NOEXCEPT + { + opaqueCaptureAddress = opaqueCaptureAddress_; + return *this; + } + + + operator VkMemoryOpaqueCaptureAddressAllocateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryOpaqueCaptureAddressAllocateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryOpaqueCaptureAddressAllocateInfo const& ) const = default; +#else + bool operator==( MemoryOpaqueCaptureAddressAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( opaqueCaptureAddress == rhs.opaqueCaptureAddress ); + } + + bool operator!=( MemoryOpaqueCaptureAddressAllocateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryOpaqueCaptureAddressAllocateInfo; + const void* pNext = {}; + uint64_t opaqueCaptureAddress = {}; + + }; + static_assert( sizeof( MemoryOpaqueCaptureAddressAllocateInfo ) == sizeof( VkMemoryOpaqueCaptureAddressAllocateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryOpaqueCaptureAddressAllocateInfo; + }; + using MemoryOpaqueCaptureAddressAllocateInfoKHR = MemoryOpaqueCaptureAddressAllocateInfo; + + struct MemoryPriorityAllocateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMemoryPriorityAllocateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MemoryPriorityAllocateInfoEXT(float priority_ = {}) VULKAN_HPP_NOEXCEPT + : priority( priority_ ) + {} + + VULKAN_HPP_CONSTEXPR MemoryPriorityAllocateInfoEXT( MemoryPriorityAllocateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MemoryPriorityAllocateInfoEXT( VkMemoryPriorityAllocateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MemoryPriorityAllocateInfoEXT & operator=( VkMemoryPriorityAllocateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MemoryPriorityAllocateInfoEXT & operator=( MemoryPriorityAllocateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MemoryPriorityAllocateInfoEXT ) ); + return *this; + } + + MemoryPriorityAllocateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MemoryPriorityAllocateInfoEXT & setPriority( float priority_ ) VULKAN_HPP_NOEXCEPT + { + priority = priority_; + return *this; + } + + + operator VkMemoryPriorityAllocateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMemoryPriorityAllocateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MemoryPriorityAllocateInfoEXT const& ) const = default; +#else + bool operator==( MemoryPriorityAllocateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( priority == rhs.priority ); + } + + bool operator!=( MemoryPriorityAllocateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMemoryPriorityAllocateInfoEXT; + const void* pNext = {}; + float priority = {}; + + }; + static_assert( sizeof( MemoryPriorityAllocateInfoEXT ) == sizeof( VkMemoryPriorityAllocateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MemoryPriorityAllocateInfoEXT; + }; + +#ifdef VK_USE_PLATFORM_METAL_EXT + struct MetalSurfaceCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eMetalSurfaceCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR MetalSurfaceCreateInfoEXT(VULKAN_HPP_NAMESPACE::MetalSurfaceCreateFlagsEXT flags_ = {}, const CAMetalLayer* pLayer_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), pLayer( pLayer_ ) + {} + + VULKAN_HPP_CONSTEXPR MetalSurfaceCreateInfoEXT( MetalSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + MetalSurfaceCreateInfoEXT( VkMetalSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + MetalSurfaceCreateInfoEXT & operator=( VkMetalSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + MetalSurfaceCreateInfoEXT & operator=( MetalSurfaceCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( MetalSurfaceCreateInfoEXT ) ); + return *this; + } + + MetalSurfaceCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + MetalSurfaceCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::MetalSurfaceCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + MetalSurfaceCreateInfoEXT & setPLayer( const CAMetalLayer* pLayer_ ) VULKAN_HPP_NOEXCEPT + { + pLayer = pLayer_; + return *this; + } + + + operator VkMetalSurfaceCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkMetalSurfaceCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( MetalSurfaceCreateInfoEXT const& ) const = default; +#else + bool operator==( MetalSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( pLayer == rhs.pLayer ); + } + + bool operator!=( MetalSurfaceCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eMetalSurfaceCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::MetalSurfaceCreateFlagsEXT flags = {}; + const CAMetalLayer* pLayer = {}; + + }; + static_assert( sizeof( MetalSurfaceCreateInfoEXT ) == sizeof( VkMetalSurfaceCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = MetalSurfaceCreateInfoEXT; + }; +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + union PerformanceCounterResultKHR + { + PerformanceCounterResultKHR( VULKAN_HPP_NAMESPACE::PerformanceCounterResultKHR const& rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PerformanceCounterResultKHR ) ); + } + + PerformanceCounterResultKHR( int32_t int32_ = {} ) + : int32( int32_ ) + {} + + PerformanceCounterResultKHR( int64_t int64_ ) + : int64( int64_ ) + {} + + PerformanceCounterResultKHR( uint32_t uint32_ ) + : uint32( uint32_ ) + {} + + PerformanceCounterResultKHR( uint64_t uint64_ ) + : uint64( uint64_ ) + {} + + PerformanceCounterResultKHR( float float32_ ) + : float32( float32_ ) + {} + + PerformanceCounterResultKHR( double float64_ ) + : float64( float64_ ) + {} + + PerformanceCounterResultKHR & setInt32( int32_t int32_ ) VULKAN_HPP_NOEXCEPT + { + int32 = int32_; + return *this; + } + + PerformanceCounterResultKHR & setInt64( int64_t int64_ ) VULKAN_HPP_NOEXCEPT + { + int64 = int64_; + return *this; + } + + PerformanceCounterResultKHR & setUint32( uint32_t uint32_ ) VULKAN_HPP_NOEXCEPT + { + uint32 = uint32_; + return *this; + } + + PerformanceCounterResultKHR & setUint64( uint64_t uint64_ ) VULKAN_HPP_NOEXCEPT + { + uint64 = uint64_; + return *this; + } + + PerformanceCounterResultKHR & setFloat32( float float32_ ) VULKAN_HPP_NOEXCEPT + { + float32 = float32_; + return *this; + } + + PerformanceCounterResultKHR & setFloat64( double float64_ ) VULKAN_HPP_NOEXCEPT + { + float64 = float64_; + return *this; + } + + VULKAN_HPP_NAMESPACE::PerformanceCounterResultKHR & operator=( VULKAN_HPP_NAMESPACE::PerformanceCounterResultKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast(this), &rhs, sizeof( VULKAN_HPP_NAMESPACE::PerformanceCounterResultKHR ) ); + return *this; + } + + operator VkPerformanceCounterResultKHR const&() const + { + return *reinterpret_cast(this); + } + + operator VkPerformanceCounterResultKHR &() + { + return *reinterpret_cast(this); + } + + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; + }; + + struct PerformanceQuerySubmitInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePerformanceQuerySubmitInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PerformanceQuerySubmitInfoKHR(uint32_t counterPassIndex_ = {}) VULKAN_HPP_NOEXCEPT + : counterPassIndex( counterPassIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR PerformanceQuerySubmitInfoKHR( PerformanceQuerySubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PerformanceQuerySubmitInfoKHR( VkPerformanceQuerySubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PerformanceQuerySubmitInfoKHR & operator=( VkPerformanceQuerySubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PerformanceQuerySubmitInfoKHR & operator=( PerformanceQuerySubmitInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PerformanceQuerySubmitInfoKHR ) ); + return *this; + } + + PerformanceQuerySubmitInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PerformanceQuerySubmitInfoKHR & setCounterPassIndex( uint32_t counterPassIndex_ ) VULKAN_HPP_NOEXCEPT + { + counterPassIndex = counterPassIndex_; + return *this; + } + + + operator VkPerformanceQuerySubmitInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPerformanceQuerySubmitInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PerformanceQuerySubmitInfoKHR const& ) const = default; +#else + bool operator==( PerformanceQuerySubmitInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( counterPassIndex == rhs.counterPassIndex ); + } + + bool operator!=( PerformanceQuerySubmitInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePerformanceQuerySubmitInfoKHR; + const void* pNext = {}; + uint32_t counterPassIndex = {}; + + }; + static_assert( sizeof( PerformanceQuerySubmitInfoKHR ) == sizeof( VkPerformanceQuerySubmitInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PerformanceQuerySubmitInfoKHR; + }; + + struct PhysicalDevice16BitStorageFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevice16BitStorageFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevice16BitStorageFeatures(VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16_ = {}) VULKAN_HPP_NOEXCEPT + : storageBuffer16BitAccess( storageBuffer16BitAccess_ ), uniformAndStorageBuffer16BitAccess( uniformAndStorageBuffer16BitAccess_ ), storagePushConstant16( storagePushConstant16_ ), storageInputOutput16( storageInputOutput16_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevice16BitStorageFeatures( PhysicalDevice16BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevice16BitStorageFeatures( VkPhysicalDevice16BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevice16BitStorageFeatures & operator=( VkPhysicalDevice16BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevice16BitStorageFeatures & operator=( PhysicalDevice16BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevice16BitStorageFeatures ) ); + return *this; + } + + PhysicalDevice16BitStorageFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevice16BitStorageFeatures & setStorageBuffer16BitAccess( VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + storageBuffer16BitAccess = storageBuffer16BitAccess_; + return *this; + } + + PhysicalDevice16BitStorageFeatures & setUniformAndStorageBuffer16BitAccess( VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + uniformAndStorageBuffer16BitAccess = uniformAndStorageBuffer16BitAccess_; + return *this; + } + + PhysicalDevice16BitStorageFeatures & setStoragePushConstant16( VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16_ ) VULKAN_HPP_NOEXCEPT + { + storagePushConstant16 = storagePushConstant16_; + return *this; + } + + PhysicalDevice16BitStorageFeatures & setStorageInputOutput16( VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16_ ) VULKAN_HPP_NOEXCEPT + { + storageInputOutput16 = storageInputOutput16_; + return *this; + } + + + operator VkPhysicalDevice16BitStorageFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevice16BitStorageFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevice16BitStorageFeatures const& ) const = default; +#else + bool operator==( PhysicalDevice16BitStorageFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageBuffer16BitAccess == rhs.storageBuffer16BitAccess ) + && ( uniformAndStorageBuffer16BitAccess == rhs.uniformAndStorageBuffer16BitAccess ) + && ( storagePushConstant16 == rhs.storagePushConstant16 ) + && ( storageInputOutput16 == rhs.storageInputOutput16 ); + } + + bool operator!=( PhysicalDevice16BitStorageFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevice16BitStorageFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16 = {}; + + }; + static_assert( sizeof( PhysicalDevice16BitStorageFeatures ) == sizeof( VkPhysicalDevice16BitStorageFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevice16BitStorageFeatures; + }; + using PhysicalDevice16BitStorageFeaturesKHR = PhysicalDevice16BitStorageFeatures; + + struct PhysicalDevice4444FormatsFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevice4444FormatsFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevice4444FormatsFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 formatA4R4G4B4_ = {}, VULKAN_HPP_NAMESPACE::Bool32 formatA4B4G4R4_ = {}) VULKAN_HPP_NOEXCEPT + : formatA4R4G4B4( formatA4R4G4B4_ ), formatA4B4G4R4( formatA4B4G4R4_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevice4444FormatsFeaturesEXT( PhysicalDevice4444FormatsFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevice4444FormatsFeaturesEXT( VkPhysicalDevice4444FormatsFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevice4444FormatsFeaturesEXT & operator=( VkPhysicalDevice4444FormatsFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevice4444FormatsFeaturesEXT & operator=( PhysicalDevice4444FormatsFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevice4444FormatsFeaturesEXT ) ); + return *this; + } + + PhysicalDevice4444FormatsFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevice4444FormatsFeaturesEXT & setFormatA4R4G4B4( VULKAN_HPP_NAMESPACE::Bool32 formatA4R4G4B4_ ) VULKAN_HPP_NOEXCEPT + { + formatA4R4G4B4 = formatA4R4G4B4_; + return *this; + } + + PhysicalDevice4444FormatsFeaturesEXT & setFormatA4B4G4R4( VULKAN_HPP_NAMESPACE::Bool32 formatA4B4G4R4_ ) VULKAN_HPP_NOEXCEPT + { + formatA4B4G4R4 = formatA4B4G4R4_; + return *this; + } + + + operator VkPhysicalDevice4444FormatsFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevice4444FormatsFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevice4444FormatsFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDevice4444FormatsFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( formatA4R4G4B4 == rhs.formatA4R4G4B4 ) + && ( formatA4B4G4R4 == rhs.formatA4B4G4R4 ); + } + + bool operator!=( PhysicalDevice4444FormatsFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevice4444FormatsFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 formatA4R4G4B4 = {}; + VULKAN_HPP_NAMESPACE::Bool32 formatA4B4G4R4 = {}; + + }; + static_assert( sizeof( PhysicalDevice4444FormatsFeaturesEXT ) == sizeof( VkPhysicalDevice4444FormatsFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevice4444FormatsFeaturesEXT; + }; + + struct PhysicalDevice8BitStorageFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevice8BitStorageFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevice8BitStorageFeatures(VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8_ = {}) VULKAN_HPP_NOEXCEPT + : storageBuffer8BitAccess( storageBuffer8BitAccess_ ), uniformAndStorageBuffer8BitAccess( uniformAndStorageBuffer8BitAccess_ ), storagePushConstant8( storagePushConstant8_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevice8BitStorageFeatures( PhysicalDevice8BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevice8BitStorageFeatures( VkPhysicalDevice8BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevice8BitStorageFeatures & operator=( VkPhysicalDevice8BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevice8BitStorageFeatures & operator=( PhysicalDevice8BitStorageFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevice8BitStorageFeatures ) ); + return *this; + } + + PhysicalDevice8BitStorageFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevice8BitStorageFeatures & setStorageBuffer8BitAccess( VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + storageBuffer8BitAccess = storageBuffer8BitAccess_; + return *this; + } + + PhysicalDevice8BitStorageFeatures & setUniformAndStorageBuffer8BitAccess( VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + uniformAndStorageBuffer8BitAccess = uniformAndStorageBuffer8BitAccess_; + return *this; + } + + PhysicalDevice8BitStorageFeatures & setStoragePushConstant8( VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8_ ) VULKAN_HPP_NOEXCEPT + { + storagePushConstant8 = storagePushConstant8_; + return *this; + } + + + operator VkPhysicalDevice8BitStorageFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevice8BitStorageFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevice8BitStorageFeatures const& ) const = default; +#else + bool operator==( PhysicalDevice8BitStorageFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageBuffer8BitAccess == rhs.storageBuffer8BitAccess ) + && ( uniformAndStorageBuffer8BitAccess == rhs.uniformAndStorageBuffer8BitAccess ) + && ( storagePushConstant8 == rhs.storagePushConstant8 ); + } + + bool operator!=( PhysicalDevice8BitStorageFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevice8BitStorageFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8 = {}; + + }; + static_assert( sizeof( PhysicalDevice8BitStorageFeatures ) == sizeof( VkPhysicalDevice8BitStorageFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevice8BitStorageFeatures; + }; + using PhysicalDevice8BitStorageFeaturesKHR = PhysicalDevice8BitStorageFeatures; + + struct PhysicalDeviceASTCDecodeFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceASTCDecodeFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 decodeModeSharedExponent_ = {}) VULKAN_HPP_NOEXCEPT + : decodeModeSharedExponent( decodeModeSharedExponent_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceASTCDecodeFeaturesEXT( PhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceASTCDecodeFeaturesEXT( VkPhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceASTCDecodeFeaturesEXT & operator=( VkPhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceASTCDecodeFeaturesEXT & operator=( PhysicalDeviceASTCDecodeFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceASTCDecodeFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceASTCDecodeFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceASTCDecodeFeaturesEXT & setDecodeModeSharedExponent( VULKAN_HPP_NAMESPACE::Bool32 decodeModeSharedExponent_ ) VULKAN_HPP_NOEXCEPT + { + decodeModeSharedExponent = decodeModeSharedExponent_; + return *this; + } + + + operator VkPhysicalDeviceASTCDecodeFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceASTCDecodeFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceASTCDecodeFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceASTCDecodeFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( decodeModeSharedExponent == rhs.decodeModeSharedExponent ); + } + + bool operator!=( PhysicalDeviceASTCDecodeFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAstcDecodeFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 decodeModeSharedExponent = {}; + + }; + static_assert( sizeof( PhysicalDeviceASTCDecodeFeaturesEXT ) == sizeof( VkPhysicalDeviceASTCDecodeFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceASTCDecodeFeaturesEXT; + }; + + struct PhysicalDeviceAccelerationStructureFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructureFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild_ = {}, VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructure( accelerationStructure_ ), accelerationStructureCaptureReplay( accelerationStructureCaptureReplay_ ), accelerationStructureIndirectBuild( accelerationStructureIndirectBuild_ ), accelerationStructureHostCommands( accelerationStructureHostCommands_ ), descriptorBindingAccelerationStructureUpdateAfterBind( descriptorBindingAccelerationStructureUpdateAfterBind_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructureFeaturesKHR( PhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceAccelerationStructureFeaturesKHR( VkPhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceAccelerationStructureFeaturesKHR & operator=( VkPhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & operator=( PhysicalDeviceAccelerationStructureFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceAccelerationStructureFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructure( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructure = accelerationStructure_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCaptureReplay = accelerationStructureCaptureReplay_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureIndirectBuild( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureIndirectBuild = accelerationStructureIndirectBuild_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setAccelerationStructureHostCommands( VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureHostCommands = accelerationStructureHostCommands_; + return *this; + } + + PhysicalDeviceAccelerationStructureFeaturesKHR & setDescriptorBindingAccelerationStructureUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingAccelerationStructureUpdateAfterBind = descriptorBindingAccelerationStructureUpdateAfterBind_; + return *this; + } + + + operator VkPhysicalDeviceAccelerationStructureFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceAccelerationStructureFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceAccelerationStructureFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceAccelerationStructureFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructure == rhs.accelerationStructure ) + && ( accelerationStructureCaptureReplay == rhs.accelerationStructureCaptureReplay ) + && ( accelerationStructureIndirectBuild == rhs.accelerationStructureIndirectBuild ) + && ( accelerationStructureHostCommands == rhs.accelerationStructureHostCommands ) + && ( descriptorBindingAccelerationStructureUpdateAfterBind == rhs.descriptorBindingAccelerationStructureUpdateAfterBind ); + } + + bool operator!=( PhysicalDeviceAccelerationStructureFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAccelerationStructureFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructure = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureIndirectBuild = {}; + VULKAN_HPP_NAMESPACE::Bool32 accelerationStructureHostCommands = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingAccelerationStructureUpdateAfterBind = {}; + + }; + static_assert( sizeof( PhysicalDeviceAccelerationStructureFeaturesKHR ) == sizeof( VkPhysicalDeviceAccelerationStructureFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceAccelerationStructureFeaturesKHR; + }; + + struct PhysicalDeviceAccelerationStructurePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructurePropertiesKHR(uint64_t maxGeometryCount_ = {}, uint64_t maxInstanceCount_ = {}, uint64_t maxPrimitiveCount_ = {}, uint32_t maxPerStageDescriptorAccelerationStructures_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ = {}, uint32_t maxDescriptorSetAccelerationStructures_ = {}, uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures_ = {}, uint32_t minAccelerationStructureScratchOffsetAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : maxGeometryCount( maxGeometryCount_ ), maxInstanceCount( maxInstanceCount_ ), maxPrimitiveCount( maxPrimitiveCount_ ), maxPerStageDescriptorAccelerationStructures( maxPerStageDescriptorAccelerationStructures_ ), maxPerStageDescriptorUpdateAfterBindAccelerationStructures( maxPerStageDescriptorUpdateAfterBindAccelerationStructures_ ), maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ), maxDescriptorSetUpdateAfterBindAccelerationStructures( maxDescriptorSetUpdateAfterBindAccelerationStructures_ ), minAccelerationStructureScratchOffsetAlignment( minAccelerationStructureScratchOffsetAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceAccelerationStructurePropertiesKHR( PhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceAccelerationStructurePropertiesKHR( VkPhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceAccelerationStructurePropertiesKHR & operator=( VkPhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceAccelerationStructurePropertiesKHR & operator=( PhysicalDeviceAccelerationStructurePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceAccelerationStructurePropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceAccelerationStructurePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceAccelerationStructurePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceAccelerationStructurePropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceAccelerationStructurePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxGeometryCount == rhs.maxGeometryCount ) + && ( maxInstanceCount == rhs.maxInstanceCount ) + && ( maxPrimitiveCount == rhs.maxPrimitiveCount ) + && ( maxPerStageDescriptorAccelerationStructures == rhs.maxPerStageDescriptorAccelerationStructures ) + && ( maxPerStageDescriptorUpdateAfterBindAccelerationStructures == rhs.maxPerStageDescriptorUpdateAfterBindAccelerationStructures ) + && ( maxDescriptorSetAccelerationStructures == rhs.maxDescriptorSetAccelerationStructures ) + && ( maxDescriptorSetUpdateAfterBindAccelerationStructures == rhs.maxDescriptorSetUpdateAfterBindAccelerationStructures ) + && ( minAccelerationStructureScratchOffsetAlignment == rhs.minAccelerationStructureScratchOffsetAlignment ); + } + + bool operator!=( PhysicalDeviceAccelerationStructurePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceAccelerationStructurePropertiesKHR; + void* pNext = {}; + uint64_t maxGeometryCount = {}; + uint64_t maxInstanceCount = {}; + uint64_t maxPrimitiveCount = {}; + uint32_t maxPerStageDescriptorAccelerationStructures = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures = {}; + uint32_t maxDescriptorSetAccelerationStructures = {}; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures = {}; + uint32_t minAccelerationStructureScratchOffsetAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceAccelerationStructurePropertiesKHR ) == sizeof( VkPhysicalDeviceAccelerationStructurePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceAccelerationStructurePropertiesKHR; + }; + + struct PhysicalDeviceBlendOperationAdvancedFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceBlendOperationAdvancedFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCoherentOperations_ = {}) VULKAN_HPP_NOEXCEPT + : advancedBlendCoherentOperations( advancedBlendCoherentOperations_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceBlendOperationAdvancedFeaturesEXT( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT & operator=( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT & operator=( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceBlendOperationAdvancedFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceBlendOperationAdvancedFeaturesEXT & setAdvancedBlendCoherentOperations( VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCoherentOperations_ ) VULKAN_HPP_NOEXCEPT + { + advancedBlendCoherentOperations = advancedBlendCoherentOperations_; + return *this; + } + + + operator VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( advancedBlendCoherentOperations == rhs.advancedBlendCoherentOperations ); + } + + bool operator!=( PhysicalDeviceBlendOperationAdvancedFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceBlendOperationAdvancedFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCoherentOperations = {}; + + }; + static_assert( sizeof( PhysicalDeviceBlendOperationAdvancedFeaturesEXT ) == sizeof( VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceBlendOperationAdvancedFeaturesEXT; + }; + + struct PhysicalDeviceBlendOperationAdvancedPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceBlendOperationAdvancedPropertiesEXT(uint32_t advancedBlendMaxColorAttachments_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendIndependentBlend_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendNonPremultipliedSrcColor_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendNonPremultipliedDstColor_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCorrelatedOverlap_ = {}, VULKAN_HPP_NAMESPACE::Bool32 advancedBlendAllOperations_ = {}) VULKAN_HPP_NOEXCEPT + : advancedBlendMaxColorAttachments( advancedBlendMaxColorAttachments_ ), advancedBlendIndependentBlend( advancedBlendIndependentBlend_ ), advancedBlendNonPremultipliedSrcColor( advancedBlendNonPremultipliedSrcColor_ ), advancedBlendNonPremultipliedDstColor( advancedBlendNonPremultipliedDstColor_ ), advancedBlendCorrelatedOverlap( advancedBlendCorrelatedOverlap_ ), advancedBlendAllOperations( advancedBlendAllOperations_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceBlendOperationAdvancedPropertiesEXT( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceBlendOperationAdvancedPropertiesEXT( VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceBlendOperationAdvancedPropertiesEXT & operator=( VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceBlendOperationAdvancedPropertiesEXT & operator=( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceBlendOperationAdvancedPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( advancedBlendMaxColorAttachments == rhs.advancedBlendMaxColorAttachments ) + && ( advancedBlendIndependentBlend == rhs.advancedBlendIndependentBlend ) + && ( advancedBlendNonPremultipliedSrcColor == rhs.advancedBlendNonPremultipliedSrcColor ) + && ( advancedBlendNonPremultipliedDstColor == rhs.advancedBlendNonPremultipliedDstColor ) + && ( advancedBlendCorrelatedOverlap == rhs.advancedBlendCorrelatedOverlap ) + && ( advancedBlendAllOperations == rhs.advancedBlendAllOperations ); + } + + bool operator!=( PhysicalDeviceBlendOperationAdvancedPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceBlendOperationAdvancedPropertiesEXT; + void* pNext = {}; + uint32_t advancedBlendMaxColorAttachments = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendIndependentBlend = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendNonPremultipliedSrcColor = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendNonPremultipliedDstColor = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendCorrelatedOverlap = {}; + VULKAN_HPP_NAMESPACE::Bool32 advancedBlendAllOperations = {}; + + }; + static_assert( sizeof( PhysicalDeviceBlendOperationAdvancedPropertiesEXT ) == sizeof( VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceBlendOperationAdvancedPropertiesEXT; + }; + + struct PhysicalDeviceBufferDeviceAddressFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceBufferDeviceAddressFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceBufferDeviceAddressFeatures(VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ = {}) VULKAN_HPP_NOEXCEPT + : bufferDeviceAddress( bufferDeviceAddress_ ), bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ), bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceBufferDeviceAddressFeatures( PhysicalDeviceBufferDeviceAddressFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceBufferDeviceAddressFeatures( VkPhysicalDeviceBufferDeviceAddressFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceBufferDeviceAddressFeatures & operator=( VkPhysicalDeviceBufferDeviceAddressFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeatures & operator=( PhysicalDeviceBufferDeviceAddressFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceBufferDeviceAddressFeatures ) ); + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeatures & setBufferDeviceAddress( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddress = bufferDeviceAddress_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeatures & setBufferDeviceAddressCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressCaptureReplay = bufferDeviceAddressCaptureReplay_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeatures & setBufferDeviceAddressMultiDevice( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressMultiDevice = bufferDeviceAddressMultiDevice_; + return *this; + } + + + operator VkPhysicalDeviceBufferDeviceAddressFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceBufferDeviceAddressFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceBufferDeviceAddressFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceBufferDeviceAddressFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( bufferDeviceAddress == rhs.bufferDeviceAddress ) + && ( bufferDeviceAddressCaptureReplay == rhs.bufferDeviceAddressCaptureReplay ) + && ( bufferDeviceAddressMultiDevice == rhs.bufferDeviceAddressMultiDevice ); + } + + bool operator!=( PhysicalDeviceBufferDeviceAddressFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceBufferDeviceAddressFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice = {}; + + }; + static_assert( sizeof( PhysicalDeviceBufferDeviceAddressFeatures ) == sizeof( VkPhysicalDeviceBufferDeviceAddressFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceBufferDeviceAddressFeatures; + }; + using PhysicalDeviceBufferDeviceAddressFeaturesKHR = PhysicalDeviceBufferDeviceAddressFeatures; + + struct PhysicalDeviceBufferDeviceAddressFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceBufferDeviceAddressFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceBufferDeviceAddressFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ = {}) VULKAN_HPP_NOEXCEPT + : bufferDeviceAddress( bufferDeviceAddress_ ), bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ), bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceBufferDeviceAddressFeaturesEXT( PhysicalDeviceBufferDeviceAddressFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceBufferDeviceAddressFeaturesEXT( VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & operator=( VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & operator=( PhysicalDeviceBufferDeviceAddressFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceBufferDeviceAddressFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & setBufferDeviceAddress( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddress = bufferDeviceAddress_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & setBufferDeviceAddressCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressCaptureReplay = bufferDeviceAddressCaptureReplay_; + return *this; + } + + PhysicalDeviceBufferDeviceAddressFeaturesEXT & setBufferDeviceAddressMultiDevice( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressMultiDevice = bufferDeviceAddressMultiDevice_; + return *this; + } + + + operator VkPhysicalDeviceBufferDeviceAddressFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceBufferDeviceAddressFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceBufferDeviceAddressFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceBufferDeviceAddressFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( bufferDeviceAddress == rhs.bufferDeviceAddress ) + && ( bufferDeviceAddressCaptureReplay == rhs.bufferDeviceAddressCaptureReplay ) + && ( bufferDeviceAddressMultiDevice == rhs.bufferDeviceAddressMultiDevice ); + } + + bool operator!=( PhysicalDeviceBufferDeviceAddressFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceBufferDeviceAddressFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice = {}; + + }; + static_assert( sizeof( PhysicalDeviceBufferDeviceAddressFeaturesEXT ) == sizeof( VkPhysicalDeviceBufferDeviceAddressFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceBufferDeviceAddressFeaturesEXT; + }; + using PhysicalDeviceBufferAddressFeaturesEXT = PhysicalDeviceBufferDeviceAddressFeaturesEXT; + + struct PhysicalDeviceCoherentMemoryFeaturesAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCoherentMemoryFeaturesAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCoherentMemoryFeaturesAMD(VULKAN_HPP_NAMESPACE::Bool32 deviceCoherentMemory_ = {}) VULKAN_HPP_NOEXCEPT + : deviceCoherentMemory( deviceCoherentMemory_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCoherentMemoryFeaturesAMD( PhysicalDeviceCoherentMemoryFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCoherentMemoryFeaturesAMD( VkPhysicalDeviceCoherentMemoryFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCoherentMemoryFeaturesAMD & operator=( VkPhysicalDeviceCoherentMemoryFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCoherentMemoryFeaturesAMD & operator=( PhysicalDeviceCoherentMemoryFeaturesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCoherentMemoryFeaturesAMD ) ); + return *this; + } + + PhysicalDeviceCoherentMemoryFeaturesAMD & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCoherentMemoryFeaturesAMD & setDeviceCoherentMemory( VULKAN_HPP_NAMESPACE::Bool32 deviceCoherentMemory_ ) VULKAN_HPP_NOEXCEPT + { + deviceCoherentMemory = deviceCoherentMemory_; + return *this; + } + + + operator VkPhysicalDeviceCoherentMemoryFeaturesAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCoherentMemoryFeaturesAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCoherentMemoryFeaturesAMD const& ) const = default; +#else + bool operator==( PhysicalDeviceCoherentMemoryFeaturesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceCoherentMemory == rhs.deviceCoherentMemory ); + } + + bool operator!=( PhysicalDeviceCoherentMemoryFeaturesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCoherentMemoryFeaturesAMD; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceCoherentMemory = {}; + + }; + static_assert( sizeof( PhysicalDeviceCoherentMemoryFeaturesAMD ) == sizeof( VkPhysicalDeviceCoherentMemoryFeaturesAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCoherentMemoryFeaturesAMD; + }; + + struct PhysicalDeviceComputeShaderDerivativesFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceComputeShaderDerivativesFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads_ = {}, VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear_ = {}) VULKAN_HPP_NOEXCEPT + : computeDerivativeGroupQuads( computeDerivativeGroupQuads_ ), computeDerivativeGroupLinear( computeDerivativeGroupLinear_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceComputeShaderDerivativesFeaturesNV( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceComputeShaderDerivativesFeaturesNV( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceComputeShaderDerivativesFeaturesNV & operator=( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV & operator=( PhysicalDeviceComputeShaderDerivativesFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceComputeShaderDerivativesFeaturesNV ) ); + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV & setComputeDerivativeGroupQuads( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads_ ) VULKAN_HPP_NOEXCEPT + { + computeDerivativeGroupQuads = computeDerivativeGroupQuads_; + return *this; + } + + PhysicalDeviceComputeShaderDerivativesFeaturesNV & setComputeDerivativeGroupLinear( VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear_ ) VULKAN_HPP_NOEXCEPT + { + computeDerivativeGroupLinear = computeDerivativeGroupLinear_; + return *this; + } + + + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceComputeShaderDerivativesFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceComputeShaderDerivativesFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceComputeShaderDerivativesFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( computeDerivativeGroupQuads == rhs.computeDerivativeGroupQuads ) + && ( computeDerivativeGroupLinear == rhs.computeDerivativeGroupLinear ); + } + + bool operator!=( PhysicalDeviceComputeShaderDerivativesFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceComputeShaderDerivativesFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupQuads = {}; + VULKAN_HPP_NAMESPACE::Bool32 computeDerivativeGroupLinear = {}; + + }; + static_assert( sizeof( PhysicalDeviceComputeShaderDerivativesFeaturesNV ) == sizeof( VkPhysicalDeviceComputeShaderDerivativesFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceComputeShaderDerivativesFeaturesNV; + }; + + struct PhysicalDeviceConditionalRenderingFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceConditionalRenderingFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 conditionalRendering_ = {}, VULKAN_HPP_NAMESPACE::Bool32 inheritedConditionalRendering_ = {}) VULKAN_HPP_NOEXCEPT + : conditionalRendering( conditionalRendering_ ), inheritedConditionalRendering( inheritedConditionalRendering_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceConditionalRenderingFeaturesEXT( PhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceConditionalRenderingFeaturesEXT( VkPhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceConditionalRenderingFeaturesEXT & operator=( VkPhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT & operator=( PhysicalDeviceConditionalRenderingFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceConditionalRenderingFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT & setConditionalRendering( VULKAN_HPP_NAMESPACE::Bool32 conditionalRendering_ ) VULKAN_HPP_NOEXCEPT + { + conditionalRendering = conditionalRendering_; + return *this; + } + + PhysicalDeviceConditionalRenderingFeaturesEXT & setInheritedConditionalRendering( VULKAN_HPP_NAMESPACE::Bool32 inheritedConditionalRendering_ ) VULKAN_HPP_NOEXCEPT + { + inheritedConditionalRendering = inheritedConditionalRendering_; + return *this; + } + + + operator VkPhysicalDeviceConditionalRenderingFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceConditionalRenderingFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceConditionalRenderingFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceConditionalRenderingFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conditionalRendering == rhs.conditionalRendering ) + && ( inheritedConditionalRendering == rhs.inheritedConditionalRendering ); + } + + bool operator!=( PhysicalDeviceConditionalRenderingFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceConditionalRenderingFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 conditionalRendering = {}; + VULKAN_HPP_NAMESPACE::Bool32 inheritedConditionalRendering = {}; + + }; + static_assert( sizeof( PhysicalDeviceConditionalRenderingFeaturesEXT ) == sizeof( VkPhysicalDeviceConditionalRenderingFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceConditionalRenderingFeaturesEXT; + }; + + struct PhysicalDeviceConservativeRasterizationPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceConservativeRasterizationPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceConservativeRasterizationPropertiesEXT(float primitiveOverestimationSize_ = {}, float maxExtraPrimitiveOverestimationSize_ = {}, float extraPrimitiveOverestimationSizeGranularity_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveUnderestimation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 conservativePointAndLineRasterization_ = {}, VULKAN_HPP_NAMESPACE::Bool32 degenerateTrianglesRasterized_ = {}, VULKAN_HPP_NAMESPACE::Bool32 degenerateLinesRasterized_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fullyCoveredFragmentShaderInputVariable_ = {}, VULKAN_HPP_NAMESPACE::Bool32 conservativeRasterizationPostDepthCoverage_ = {}) VULKAN_HPP_NOEXCEPT + : primitiveOverestimationSize( primitiveOverestimationSize_ ), maxExtraPrimitiveOverestimationSize( maxExtraPrimitiveOverestimationSize_ ), extraPrimitiveOverestimationSizeGranularity( extraPrimitiveOverestimationSizeGranularity_ ), primitiveUnderestimation( primitiveUnderestimation_ ), conservativePointAndLineRasterization( conservativePointAndLineRasterization_ ), degenerateTrianglesRasterized( degenerateTrianglesRasterized_ ), degenerateLinesRasterized( degenerateLinesRasterized_ ), fullyCoveredFragmentShaderInputVariable( fullyCoveredFragmentShaderInputVariable_ ), conservativeRasterizationPostDepthCoverage( conservativeRasterizationPostDepthCoverage_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceConservativeRasterizationPropertiesEXT( PhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceConservativeRasterizationPropertiesEXT( VkPhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceConservativeRasterizationPropertiesEXT & operator=( VkPhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceConservativeRasterizationPropertiesEXT & operator=( PhysicalDeviceConservativeRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceConservativeRasterizationPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceConservativeRasterizationPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceConservativeRasterizationPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceConservativeRasterizationPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceConservativeRasterizationPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( primitiveOverestimationSize == rhs.primitiveOverestimationSize ) + && ( maxExtraPrimitiveOverestimationSize == rhs.maxExtraPrimitiveOverestimationSize ) + && ( extraPrimitiveOverestimationSizeGranularity == rhs.extraPrimitiveOverestimationSizeGranularity ) + && ( primitiveUnderestimation == rhs.primitiveUnderestimation ) + && ( conservativePointAndLineRasterization == rhs.conservativePointAndLineRasterization ) + && ( degenerateTrianglesRasterized == rhs.degenerateTrianglesRasterized ) + && ( degenerateLinesRasterized == rhs.degenerateLinesRasterized ) + && ( fullyCoveredFragmentShaderInputVariable == rhs.fullyCoveredFragmentShaderInputVariable ) + && ( conservativeRasterizationPostDepthCoverage == rhs.conservativeRasterizationPostDepthCoverage ); + } + + bool operator!=( PhysicalDeviceConservativeRasterizationPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceConservativeRasterizationPropertiesEXT; + void* pNext = {}; + float primitiveOverestimationSize = {}; + float maxExtraPrimitiveOverestimationSize = {}; + float extraPrimitiveOverestimationSizeGranularity = {}; + VULKAN_HPP_NAMESPACE::Bool32 primitiveUnderestimation = {}; + VULKAN_HPP_NAMESPACE::Bool32 conservativePointAndLineRasterization = {}; + VULKAN_HPP_NAMESPACE::Bool32 degenerateTrianglesRasterized = {}; + VULKAN_HPP_NAMESPACE::Bool32 degenerateLinesRasterized = {}; + VULKAN_HPP_NAMESPACE::Bool32 fullyCoveredFragmentShaderInputVariable = {}; + VULKAN_HPP_NAMESPACE::Bool32 conservativeRasterizationPostDepthCoverage = {}; + + }; + static_assert( sizeof( PhysicalDeviceConservativeRasterizationPropertiesEXT ) == sizeof( VkPhysicalDeviceConservativeRasterizationPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceConservativeRasterizationPropertiesEXT; + }; + + struct PhysicalDeviceCooperativeMatrixFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCooperativeMatrixFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrix_ = {}, VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrixRobustBufferAccess_ = {}) VULKAN_HPP_NOEXCEPT + : cooperativeMatrix( cooperativeMatrix_ ), cooperativeMatrixRobustBufferAccess( cooperativeMatrixRobustBufferAccess_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixFeaturesNV( PhysicalDeviceCooperativeMatrixFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCooperativeMatrixFeaturesNV( VkPhysicalDeviceCooperativeMatrixFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCooperativeMatrixFeaturesNV & operator=( VkPhysicalDeviceCooperativeMatrixFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCooperativeMatrixFeaturesNV & operator=( PhysicalDeviceCooperativeMatrixFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCooperativeMatrixFeaturesNV ) ); + return *this; + } + + PhysicalDeviceCooperativeMatrixFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCooperativeMatrixFeaturesNV & setCooperativeMatrix( VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrix_ ) VULKAN_HPP_NOEXCEPT + { + cooperativeMatrix = cooperativeMatrix_; + return *this; + } + + PhysicalDeviceCooperativeMatrixFeaturesNV & setCooperativeMatrixRobustBufferAccess( VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrixRobustBufferAccess_ ) VULKAN_HPP_NOEXCEPT + { + cooperativeMatrixRobustBufferAccess = cooperativeMatrixRobustBufferAccess_; + return *this; + } + + + operator VkPhysicalDeviceCooperativeMatrixFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCooperativeMatrixFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCooperativeMatrixFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceCooperativeMatrixFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( cooperativeMatrix == rhs.cooperativeMatrix ) + && ( cooperativeMatrixRobustBufferAccess == rhs.cooperativeMatrixRobustBufferAccess ); + } + + bool operator!=( PhysicalDeviceCooperativeMatrixFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCooperativeMatrixFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrix = {}; + VULKAN_HPP_NAMESPACE::Bool32 cooperativeMatrixRobustBufferAccess = {}; + + }; + static_assert( sizeof( PhysicalDeviceCooperativeMatrixFeaturesNV ) == sizeof( VkPhysicalDeviceCooperativeMatrixFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCooperativeMatrixFeaturesNV; + }; + + struct PhysicalDeviceCooperativeMatrixPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCooperativeMatrixPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixPropertiesNV(VULKAN_HPP_NAMESPACE::ShaderStageFlags cooperativeMatrixSupportedStages_ = {}) VULKAN_HPP_NOEXCEPT + : cooperativeMatrixSupportedStages( cooperativeMatrixSupportedStages_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCooperativeMatrixPropertiesNV( PhysicalDeviceCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCooperativeMatrixPropertiesNV( VkPhysicalDeviceCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCooperativeMatrixPropertiesNV & operator=( VkPhysicalDeviceCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCooperativeMatrixPropertiesNV & operator=( PhysicalDeviceCooperativeMatrixPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCooperativeMatrixPropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceCooperativeMatrixPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCooperativeMatrixPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCooperativeMatrixPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceCooperativeMatrixPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( cooperativeMatrixSupportedStages == rhs.cooperativeMatrixSupportedStages ); + } + + bool operator!=( PhysicalDeviceCooperativeMatrixPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCooperativeMatrixPropertiesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags cooperativeMatrixSupportedStages = {}; + + }; + static_assert( sizeof( PhysicalDeviceCooperativeMatrixPropertiesNV ) == sizeof( VkPhysicalDeviceCooperativeMatrixPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCooperativeMatrixPropertiesNV; + }; + + struct PhysicalDeviceCornerSampledImageFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCornerSampledImageFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCornerSampledImageFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 cornerSampledImage_ = {}) VULKAN_HPP_NOEXCEPT + : cornerSampledImage( cornerSampledImage_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCornerSampledImageFeaturesNV( PhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCornerSampledImageFeaturesNV( VkPhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCornerSampledImageFeaturesNV & operator=( VkPhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCornerSampledImageFeaturesNV & operator=( PhysicalDeviceCornerSampledImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCornerSampledImageFeaturesNV ) ); + return *this; + } + + PhysicalDeviceCornerSampledImageFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCornerSampledImageFeaturesNV & setCornerSampledImage( VULKAN_HPP_NAMESPACE::Bool32 cornerSampledImage_ ) VULKAN_HPP_NOEXCEPT + { + cornerSampledImage = cornerSampledImage_; + return *this; + } + + + operator VkPhysicalDeviceCornerSampledImageFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCornerSampledImageFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCornerSampledImageFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceCornerSampledImageFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( cornerSampledImage == rhs.cornerSampledImage ); + } + + bool operator!=( PhysicalDeviceCornerSampledImageFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCornerSampledImageFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 cornerSampledImage = {}; + + }; + static_assert( sizeof( PhysicalDeviceCornerSampledImageFeaturesNV ) == sizeof( VkPhysicalDeviceCornerSampledImageFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCornerSampledImageFeaturesNV; + }; + + struct PhysicalDeviceCoverageReductionModeFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCoverageReductionModeFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCoverageReductionModeFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 coverageReductionMode_ = {}) VULKAN_HPP_NOEXCEPT + : coverageReductionMode( coverageReductionMode_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCoverageReductionModeFeaturesNV( PhysicalDeviceCoverageReductionModeFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCoverageReductionModeFeaturesNV( VkPhysicalDeviceCoverageReductionModeFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCoverageReductionModeFeaturesNV & operator=( VkPhysicalDeviceCoverageReductionModeFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCoverageReductionModeFeaturesNV & operator=( PhysicalDeviceCoverageReductionModeFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCoverageReductionModeFeaturesNV ) ); + return *this; + } + + PhysicalDeviceCoverageReductionModeFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCoverageReductionModeFeaturesNV & setCoverageReductionMode( VULKAN_HPP_NAMESPACE::Bool32 coverageReductionMode_ ) VULKAN_HPP_NOEXCEPT + { + coverageReductionMode = coverageReductionMode_; + return *this; + } + + + operator VkPhysicalDeviceCoverageReductionModeFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCoverageReductionModeFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCoverageReductionModeFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceCoverageReductionModeFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( coverageReductionMode == rhs.coverageReductionMode ); + } + + bool operator!=( PhysicalDeviceCoverageReductionModeFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCoverageReductionModeFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 coverageReductionMode = {}; + + }; + static_assert( sizeof( PhysicalDeviceCoverageReductionModeFeaturesNV ) == sizeof( VkPhysicalDeviceCoverageReductionModeFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCoverageReductionModeFeaturesNV; + }; + + struct PhysicalDeviceCustomBorderColorFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCustomBorderColorFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 customBorderColors_ = {}, VULKAN_HPP_NAMESPACE::Bool32 customBorderColorWithoutFormat_ = {}) VULKAN_HPP_NOEXCEPT + : customBorderColors( customBorderColors_ ), customBorderColorWithoutFormat( customBorderColorWithoutFormat_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorFeaturesEXT( PhysicalDeviceCustomBorderColorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCustomBorderColorFeaturesEXT( VkPhysicalDeviceCustomBorderColorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCustomBorderColorFeaturesEXT & operator=( VkPhysicalDeviceCustomBorderColorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCustomBorderColorFeaturesEXT & operator=( PhysicalDeviceCustomBorderColorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCustomBorderColorFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceCustomBorderColorFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceCustomBorderColorFeaturesEXT & setCustomBorderColors( VULKAN_HPP_NAMESPACE::Bool32 customBorderColors_ ) VULKAN_HPP_NOEXCEPT + { + customBorderColors = customBorderColors_; + return *this; + } + + PhysicalDeviceCustomBorderColorFeaturesEXT & setCustomBorderColorWithoutFormat( VULKAN_HPP_NAMESPACE::Bool32 customBorderColorWithoutFormat_ ) VULKAN_HPP_NOEXCEPT + { + customBorderColorWithoutFormat = customBorderColorWithoutFormat_; + return *this; + } + + + operator VkPhysicalDeviceCustomBorderColorFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCustomBorderColorFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCustomBorderColorFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceCustomBorderColorFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( customBorderColors == rhs.customBorderColors ) + && ( customBorderColorWithoutFormat == rhs.customBorderColorWithoutFormat ); + } + + bool operator!=( PhysicalDeviceCustomBorderColorFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCustomBorderColorFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 customBorderColors = {}; + VULKAN_HPP_NAMESPACE::Bool32 customBorderColorWithoutFormat = {}; + + }; + static_assert( sizeof( PhysicalDeviceCustomBorderColorFeaturesEXT ) == sizeof( VkPhysicalDeviceCustomBorderColorFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCustomBorderColorFeaturesEXT; + }; + + struct PhysicalDeviceCustomBorderColorPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceCustomBorderColorPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorPropertiesEXT(uint32_t maxCustomBorderColorSamplers_ = {}) VULKAN_HPP_NOEXCEPT + : maxCustomBorderColorSamplers( maxCustomBorderColorSamplers_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceCustomBorderColorPropertiesEXT( PhysicalDeviceCustomBorderColorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceCustomBorderColorPropertiesEXT( VkPhysicalDeviceCustomBorderColorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceCustomBorderColorPropertiesEXT & operator=( VkPhysicalDeviceCustomBorderColorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceCustomBorderColorPropertiesEXT & operator=( PhysicalDeviceCustomBorderColorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceCustomBorderColorPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceCustomBorderColorPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceCustomBorderColorPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceCustomBorderColorPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceCustomBorderColorPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxCustomBorderColorSamplers == rhs.maxCustomBorderColorSamplers ); + } + + bool operator!=( PhysicalDeviceCustomBorderColorPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceCustomBorderColorPropertiesEXT; + void* pNext = {}; + uint32_t maxCustomBorderColorSamplers = {}; + + }; + static_assert( sizeof( PhysicalDeviceCustomBorderColorPropertiesEXT ) == sizeof( VkPhysicalDeviceCustomBorderColorPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceCustomBorderColorPropertiesEXT; + }; + + struct PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocationImageAliasing_ = {}) VULKAN_HPP_NOEXCEPT + : dedicatedAllocationImageAliasing( dedicatedAllocationImageAliasing_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV( VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & operator=( VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & operator=( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV ) ); + return *this; + } + + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV & setDedicatedAllocationImageAliasing( VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocationImageAliasing_ ) VULKAN_HPP_NOEXCEPT + { + dedicatedAllocationImageAliasing = dedicatedAllocationImageAliasing_; + return *this; + } + + + operator VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dedicatedAllocationImageAliasing == rhs.dedicatedAllocationImageAliasing ); + } + + bool operator!=( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 dedicatedAllocationImageAliasing = {}; + + }; + static_assert( sizeof( PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV ) == sizeof( VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + }; + + struct PhysicalDeviceDepthClipEnableFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDepthClipEnableFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthClipEnableFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ = {}) VULKAN_HPP_NOEXCEPT + : depthClipEnable( depthClipEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthClipEnableFeaturesEXT( PhysicalDeviceDepthClipEnableFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDepthClipEnableFeaturesEXT( VkPhysicalDeviceDepthClipEnableFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDepthClipEnableFeaturesEXT & operator=( VkPhysicalDeviceDepthClipEnableFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDepthClipEnableFeaturesEXT & operator=( PhysicalDeviceDepthClipEnableFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDepthClipEnableFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceDepthClipEnableFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDepthClipEnableFeaturesEXT & setDepthClipEnable( VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthClipEnable = depthClipEnable_; + return *this; + } + + + operator VkPhysicalDeviceDepthClipEnableFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDepthClipEnableFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDepthClipEnableFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceDepthClipEnableFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( depthClipEnable == rhs.depthClipEnable ); + } + + bool operator!=( PhysicalDeviceDepthClipEnableFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDepthClipEnableFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable = {}; + + }; + static_assert( sizeof( PhysicalDeviceDepthClipEnableFeaturesEXT ) == sizeof( VkPhysicalDeviceDepthClipEnableFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDepthClipEnableFeaturesEXT; + }; + + struct PhysicalDeviceDepthStencilResolveProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDepthStencilResolveProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthStencilResolveProperties(VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedDepthResolveModes_ = {}, VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedStencilResolveModes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentResolve_ = {}) VULKAN_HPP_NOEXCEPT + : supportedDepthResolveModes( supportedDepthResolveModes_ ), supportedStencilResolveModes( supportedStencilResolveModes_ ), independentResolveNone( independentResolveNone_ ), independentResolve( independentResolve_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDepthStencilResolveProperties( PhysicalDeviceDepthStencilResolveProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDepthStencilResolveProperties( VkPhysicalDeviceDepthStencilResolveProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDepthStencilResolveProperties & operator=( VkPhysicalDeviceDepthStencilResolveProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDepthStencilResolveProperties & operator=( PhysicalDeviceDepthStencilResolveProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDepthStencilResolveProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceDepthStencilResolveProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDepthStencilResolveProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDepthStencilResolveProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceDepthStencilResolveProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supportedDepthResolveModes == rhs.supportedDepthResolveModes ) + && ( supportedStencilResolveModes == rhs.supportedStencilResolveModes ) + && ( independentResolveNone == rhs.independentResolveNone ) + && ( independentResolve == rhs.independentResolve ); + } + + bool operator!=( PhysicalDeviceDepthStencilResolveProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDepthStencilResolveProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedDepthResolveModes = {}; + VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedStencilResolveModes = {}; + VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone = {}; + VULKAN_HPP_NAMESPACE::Bool32 independentResolve = {}; + + }; + static_assert( sizeof( PhysicalDeviceDepthStencilResolveProperties ) == sizeof( VkPhysicalDeviceDepthStencilResolveProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDepthStencilResolveProperties; + }; + using PhysicalDeviceDepthStencilResolvePropertiesKHR = PhysicalDeviceDepthStencilResolveProperties; + + struct PhysicalDeviceDescriptorIndexingFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDescriptorIndexingFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorIndexingFeatures(VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray_ = {}) VULKAN_HPP_NOEXCEPT + : shaderInputAttachmentArrayDynamicIndexing( shaderInputAttachmentArrayDynamicIndexing_ ), shaderUniformTexelBufferArrayDynamicIndexing( shaderUniformTexelBufferArrayDynamicIndexing_ ), shaderStorageTexelBufferArrayDynamicIndexing( shaderStorageTexelBufferArrayDynamicIndexing_ ), shaderUniformBufferArrayNonUniformIndexing( shaderUniformBufferArrayNonUniformIndexing_ ), shaderSampledImageArrayNonUniformIndexing( shaderSampledImageArrayNonUniformIndexing_ ), shaderStorageBufferArrayNonUniformIndexing( shaderStorageBufferArrayNonUniformIndexing_ ), shaderStorageImageArrayNonUniformIndexing( shaderStorageImageArrayNonUniformIndexing_ ), shaderInputAttachmentArrayNonUniformIndexing( shaderInputAttachmentArrayNonUniformIndexing_ ), shaderUniformTexelBufferArrayNonUniformIndexing( shaderUniformTexelBufferArrayNonUniformIndexing_ ), shaderStorageTexelBufferArrayNonUniformIndexing( shaderStorageTexelBufferArrayNonUniformIndexing_ ), descriptorBindingUniformBufferUpdateAfterBind( descriptorBindingUniformBufferUpdateAfterBind_ ), descriptorBindingSampledImageUpdateAfterBind( descriptorBindingSampledImageUpdateAfterBind_ ), descriptorBindingStorageImageUpdateAfterBind( descriptorBindingStorageImageUpdateAfterBind_ ), descriptorBindingStorageBufferUpdateAfterBind( descriptorBindingStorageBufferUpdateAfterBind_ ), descriptorBindingUniformTexelBufferUpdateAfterBind( descriptorBindingUniformTexelBufferUpdateAfterBind_ ), descriptorBindingStorageTexelBufferUpdateAfterBind( descriptorBindingStorageTexelBufferUpdateAfterBind_ ), descriptorBindingUpdateUnusedWhilePending( descriptorBindingUpdateUnusedWhilePending_ ), descriptorBindingPartiallyBound( descriptorBindingPartiallyBound_ ), descriptorBindingVariableDescriptorCount( descriptorBindingVariableDescriptorCount_ ), runtimeDescriptorArray( runtimeDescriptorArray_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorIndexingFeatures( PhysicalDeviceDescriptorIndexingFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDescriptorIndexingFeatures( VkPhysicalDeviceDescriptorIndexingFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDescriptorIndexingFeatures & operator=( VkPhysicalDeviceDescriptorIndexingFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & operator=( PhysicalDeviceDescriptorIndexingFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDescriptorIndexingFeatures ) ); + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderInputAttachmentArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderInputAttachmentArrayDynamicIndexing = shaderInputAttachmentArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderUniformTexelBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformTexelBufferArrayDynamicIndexing = shaderUniformTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderStorageTexelBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageTexelBufferArrayDynamicIndexing = shaderStorageTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderUniformBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformBufferArrayNonUniformIndexing = shaderUniformBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderSampledImageArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderSampledImageArrayNonUniformIndexing = shaderSampledImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderStorageBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageBufferArrayNonUniformIndexing = shaderStorageBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderStorageImageArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageArrayNonUniformIndexing = shaderStorageImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderInputAttachmentArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderInputAttachmentArrayNonUniformIndexing = shaderInputAttachmentArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderUniformTexelBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformTexelBufferArrayNonUniformIndexing = shaderUniformTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setShaderStorageTexelBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageTexelBufferArrayNonUniformIndexing = shaderStorageTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingUniformBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUniformBufferUpdateAfterBind = descriptorBindingUniformBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingSampledImageUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingSampledImageUpdateAfterBind = descriptorBindingSampledImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingStorageImageUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageImageUpdateAfterBind = descriptorBindingStorageImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingStorageBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageBufferUpdateAfterBind = descriptorBindingStorageBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingUniformTexelBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUniformTexelBufferUpdateAfterBind = descriptorBindingUniformTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingStorageTexelBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageTexelBufferUpdateAfterBind = descriptorBindingStorageTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingUpdateUnusedWhilePending( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUpdateUnusedWhilePending = descriptorBindingUpdateUnusedWhilePending_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingPartiallyBound( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingPartiallyBound = descriptorBindingPartiallyBound_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setDescriptorBindingVariableDescriptorCount( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingVariableDescriptorCount = descriptorBindingVariableDescriptorCount_; + return *this; + } + + PhysicalDeviceDescriptorIndexingFeatures & setRuntimeDescriptorArray( VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray_ ) VULKAN_HPP_NOEXCEPT + { + runtimeDescriptorArray = runtimeDescriptorArray_; + return *this; + } + + + operator VkPhysicalDeviceDescriptorIndexingFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDescriptorIndexingFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDescriptorIndexingFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceDescriptorIndexingFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderInputAttachmentArrayDynamicIndexing == rhs.shaderInputAttachmentArrayDynamicIndexing ) + && ( shaderUniformTexelBufferArrayDynamicIndexing == rhs.shaderUniformTexelBufferArrayDynamicIndexing ) + && ( shaderStorageTexelBufferArrayDynamicIndexing == rhs.shaderStorageTexelBufferArrayDynamicIndexing ) + && ( shaderUniformBufferArrayNonUniformIndexing == rhs.shaderUniformBufferArrayNonUniformIndexing ) + && ( shaderSampledImageArrayNonUniformIndexing == rhs.shaderSampledImageArrayNonUniformIndexing ) + && ( shaderStorageBufferArrayNonUniformIndexing == rhs.shaderStorageBufferArrayNonUniformIndexing ) + && ( shaderStorageImageArrayNonUniformIndexing == rhs.shaderStorageImageArrayNonUniformIndexing ) + && ( shaderInputAttachmentArrayNonUniformIndexing == rhs.shaderInputAttachmentArrayNonUniformIndexing ) + && ( shaderUniformTexelBufferArrayNonUniformIndexing == rhs.shaderUniformTexelBufferArrayNonUniformIndexing ) + && ( shaderStorageTexelBufferArrayNonUniformIndexing == rhs.shaderStorageTexelBufferArrayNonUniformIndexing ) + && ( descriptorBindingUniformBufferUpdateAfterBind == rhs.descriptorBindingUniformBufferUpdateAfterBind ) + && ( descriptorBindingSampledImageUpdateAfterBind == rhs.descriptorBindingSampledImageUpdateAfterBind ) + && ( descriptorBindingStorageImageUpdateAfterBind == rhs.descriptorBindingStorageImageUpdateAfterBind ) + && ( descriptorBindingStorageBufferUpdateAfterBind == rhs.descriptorBindingStorageBufferUpdateAfterBind ) + && ( descriptorBindingUniformTexelBufferUpdateAfterBind == rhs.descriptorBindingUniformTexelBufferUpdateAfterBind ) + && ( descriptorBindingStorageTexelBufferUpdateAfterBind == rhs.descriptorBindingStorageTexelBufferUpdateAfterBind ) + && ( descriptorBindingUpdateUnusedWhilePending == rhs.descriptorBindingUpdateUnusedWhilePending ) + && ( descriptorBindingPartiallyBound == rhs.descriptorBindingPartiallyBound ) + && ( descriptorBindingVariableDescriptorCount == rhs.descriptorBindingVariableDescriptorCount ) + && ( runtimeDescriptorArray == rhs.runtimeDescriptorArray ); + } + + bool operator!=( PhysicalDeviceDescriptorIndexingFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDescriptorIndexingFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount = {}; + VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray = {}; + + }; + static_assert( sizeof( PhysicalDeviceDescriptorIndexingFeatures ) == sizeof( VkPhysicalDeviceDescriptorIndexingFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDescriptorIndexingFeatures; + }; + using PhysicalDeviceDescriptorIndexingFeaturesEXT = PhysicalDeviceDescriptorIndexingFeatures; + + struct PhysicalDeviceDescriptorIndexingProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDescriptorIndexingProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorIndexingProperties(uint32_t maxUpdateAfterBindDescriptorsInAllPools_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccessUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 quadDivergentImplicitLod_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindSamplers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments_ = {}, uint32_t maxPerStageUpdateAfterBindResources_ = {}, uint32_t maxDescriptorSetUpdateAfterBindSamplers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ = {}, uint32_t maxDescriptorSetUpdateAfterBindSampledImages_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageImages_ = {}, uint32_t maxDescriptorSetUpdateAfterBindInputAttachments_ = {}) VULKAN_HPP_NOEXCEPT + : maxUpdateAfterBindDescriptorsInAllPools( maxUpdateAfterBindDescriptorsInAllPools_ ), shaderUniformBufferArrayNonUniformIndexingNative( shaderUniformBufferArrayNonUniformIndexingNative_ ), shaderSampledImageArrayNonUniformIndexingNative( shaderSampledImageArrayNonUniformIndexingNative_ ), shaderStorageBufferArrayNonUniformIndexingNative( shaderStorageBufferArrayNonUniformIndexingNative_ ), shaderStorageImageArrayNonUniformIndexingNative( shaderStorageImageArrayNonUniformIndexingNative_ ), shaderInputAttachmentArrayNonUniformIndexingNative( shaderInputAttachmentArrayNonUniformIndexingNative_ ), robustBufferAccessUpdateAfterBind( robustBufferAccessUpdateAfterBind_ ), quadDivergentImplicitLod( quadDivergentImplicitLod_ ), maxPerStageDescriptorUpdateAfterBindSamplers( maxPerStageDescriptorUpdateAfterBindSamplers_ ), maxPerStageDescriptorUpdateAfterBindUniformBuffers( maxPerStageDescriptorUpdateAfterBindUniformBuffers_ ), maxPerStageDescriptorUpdateAfterBindStorageBuffers( maxPerStageDescriptorUpdateAfterBindStorageBuffers_ ), maxPerStageDescriptorUpdateAfterBindSampledImages( maxPerStageDescriptorUpdateAfterBindSampledImages_ ), maxPerStageDescriptorUpdateAfterBindStorageImages( maxPerStageDescriptorUpdateAfterBindStorageImages_ ), maxPerStageDescriptorUpdateAfterBindInputAttachments( maxPerStageDescriptorUpdateAfterBindInputAttachments_ ), maxPerStageUpdateAfterBindResources( maxPerStageUpdateAfterBindResources_ ), maxDescriptorSetUpdateAfterBindSamplers( maxDescriptorSetUpdateAfterBindSamplers_ ), maxDescriptorSetUpdateAfterBindUniformBuffers( maxDescriptorSetUpdateAfterBindUniformBuffers_ ), maxDescriptorSetUpdateAfterBindUniformBuffersDynamic( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ ), maxDescriptorSetUpdateAfterBindStorageBuffers( maxDescriptorSetUpdateAfterBindStorageBuffers_ ), maxDescriptorSetUpdateAfterBindStorageBuffersDynamic( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ ), maxDescriptorSetUpdateAfterBindSampledImages( maxDescriptorSetUpdateAfterBindSampledImages_ ), maxDescriptorSetUpdateAfterBindStorageImages( maxDescriptorSetUpdateAfterBindStorageImages_ ), maxDescriptorSetUpdateAfterBindInputAttachments( maxDescriptorSetUpdateAfterBindInputAttachments_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDescriptorIndexingProperties( PhysicalDeviceDescriptorIndexingProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDescriptorIndexingProperties( VkPhysicalDeviceDescriptorIndexingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDescriptorIndexingProperties & operator=( VkPhysicalDeviceDescriptorIndexingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDescriptorIndexingProperties & operator=( PhysicalDeviceDescriptorIndexingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDescriptorIndexingProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceDescriptorIndexingProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDescriptorIndexingProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDescriptorIndexingProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceDescriptorIndexingProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxUpdateAfterBindDescriptorsInAllPools == rhs.maxUpdateAfterBindDescriptorsInAllPools ) + && ( shaderUniformBufferArrayNonUniformIndexingNative == rhs.shaderUniformBufferArrayNonUniformIndexingNative ) + && ( shaderSampledImageArrayNonUniformIndexingNative == rhs.shaderSampledImageArrayNonUniformIndexingNative ) + && ( shaderStorageBufferArrayNonUniformIndexingNative == rhs.shaderStorageBufferArrayNonUniformIndexingNative ) + && ( shaderStorageImageArrayNonUniformIndexingNative == rhs.shaderStorageImageArrayNonUniformIndexingNative ) + && ( shaderInputAttachmentArrayNonUniformIndexingNative == rhs.shaderInputAttachmentArrayNonUniformIndexingNative ) + && ( robustBufferAccessUpdateAfterBind == rhs.robustBufferAccessUpdateAfterBind ) + && ( quadDivergentImplicitLod == rhs.quadDivergentImplicitLod ) + && ( maxPerStageDescriptorUpdateAfterBindSamplers == rhs.maxPerStageDescriptorUpdateAfterBindSamplers ) + && ( maxPerStageDescriptorUpdateAfterBindUniformBuffers == rhs.maxPerStageDescriptorUpdateAfterBindUniformBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindStorageBuffers == rhs.maxPerStageDescriptorUpdateAfterBindStorageBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindSampledImages == rhs.maxPerStageDescriptorUpdateAfterBindSampledImages ) + && ( maxPerStageDescriptorUpdateAfterBindStorageImages == rhs.maxPerStageDescriptorUpdateAfterBindStorageImages ) + && ( maxPerStageDescriptorUpdateAfterBindInputAttachments == rhs.maxPerStageDescriptorUpdateAfterBindInputAttachments ) + && ( maxPerStageUpdateAfterBindResources == rhs.maxPerStageUpdateAfterBindResources ) + && ( maxDescriptorSetUpdateAfterBindSamplers == rhs.maxDescriptorSetUpdateAfterBindSamplers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffers == rhs.maxDescriptorSetUpdateAfterBindUniformBuffers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffers == rhs.maxDescriptorSetUpdateAfterBindStorageBuffers ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindSampledImages == rhs.maxDescriptorSetUpdateAfterBindSampledImages ) + && ( maxDescriptorSetUpdateAfterBindStorageImages == rhs.maxDescriptorSetUpdateAfterBindStorageImages ) + && ( maxDescriptorSetUpdateAfterBindInputAttachments == rhs.maxDescriptorSetUpdateAfterBindInputAttachments ); + } + + bool operator!=( PhysicalDeviceDescriptorIndexingProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDescriptorIndexingProperties; + void* pNext = {}; + uint32_t maxUpdateAfterBindDescriptorsInAllPools = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccessUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 quadDivergentImplicitLod = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments = {}; + uint32_t maxPerStageUpdateAfterBindResources = {}; + uint32_t maxDescriptorSetUpdateAfterBindSamplers = {}; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers = {}; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages = {}; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments = {}; + + }; + static_assert( sizeof( PhysicalDeviceDescriptorIndexingProperties ) == sizeof( VkPhysicalDeviceDescriptorIndexingProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDescriptorIndexingProperties; + }; + using PhysicalDeviceDescriptorIndexingPropertiesEXT = PhysicalDeviceDescriptorIndexingProperties; + + struct PhysicalDeviceDeviceGeneratedCommandsFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceGeneratedCommandsFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedCommands_ = {}) VULKAN_HPP_NOEXCEPT + : deviceGeneratedCommands( deviceGeneratedCommands_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceGeneratedCommandsFeaturesNV( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV( VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV & operator=( VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV & operator=( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV ) ); + return *this; + } + + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDeviceGeneratedCommandsFeaturesNV & setDeviceGeneratedCommands( VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedCommands_ ) VULKAN_HPP_NOEXCEPT + { + deviceGeneratedCommands = deviceGeneratedCommands_; + return *this; + } + + + operator VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceGeneratedCommands == rhs.deviceGeneratedCommands ); + } + + bool operator!=( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceGeneratedCommands = {}; + + }; + static_assert( sizeof( PhysicalDeviceDeviceGeneratedCommandsFeaturesNV ) == sizeof( VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + }; + + struct PhysicalDeviceDeviceGeneratedCommandsPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceGeneratedCommandsPropertiesNV(uint32_t maxGraphicsShaderGroupCount_ = {}, uint32_t maxIndirectSequenceCount_ = {}, uint32_t maxIndirectCommandsTokenCount_ = {}, uint32_t maxIndirectCommandsStreamCount_ = {}, uint32_t maxIndirectCommandsTokenOffset_ = {}, uint32_t maxIndirectCommandsStreamStride_ = {}, uint32_t minSequencesCountBufferOffsetAlignment_ = {}, uint32_t minSequencesIndexBufferOffsetAlignment_ = {}, uint32_t minIndirectCommandsBufferOffsetAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : maxGraphicsShaderGroupCount( maxGraphicsShaderGroupCount_ ), maxIndirectSequenceCount( maxIndirectSequenceCount_ ), maxIndirectCommandsTokenCount( maxIndirectCommandsTokenCount_ ), maxIndirectCommandsStreamCount( maxIndirectCommandsStreamCount_ ), maxIndirectCommandsTokenOffset( maxIndirectCommandsTokenOffset_ ), maxIndirectCommandsStreamStride( maxIndirectCommandsStreamStride_ ), minSequencesCountBufferOffsetAlignment( minSequencesCountBufferOffsetAlignment_ ), minSequencesIndexBufferOffsetAlignment( minSequencesIndexBufferOffsetAlignment_ ), minIndirectCommandsBufferOffsetAlignment( minIndirectCommandsBufferOffsetAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceGeneratedCommandsPropertiesNV( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDeviceGeneratedCommandsPropertiesNV( VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDeviceGeneratedCommandsPropertiesNV & operator=( VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDeviceGeneratedCommandsPropertiesNV & operator=( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxGraphicsShaderGroupCount == rhs.maxGraphicsShaderGroupCount ) + && ( maxIndirectSequenceCount == rhs.maxIndirectSequenceCount ) + && ( maxIndirectCommandsTokenCount == rhs.maxIndirectCommandsTokenCount ) + && ( maxIndirectCommandsStreamCount == rhs.maxIndirectCommandsStreamCount ) + && ( maxIndirectCommandsTokenOffset == rhs.maxIndirectCommandsTokenOffset ) + && ( maxIndirectCommandsStreamStride == rhs.maxIndirectCommandsStreamStride ) + && ( minSequencesCountBufferOffsetAlignment == rhs.minSequencesCountBufferOffsetAlignment ) + && ( minSequencesIndexBufferOffsetAlignment == rhs.minSequencesIndexBufferOffsetAlignment ) + && ( minIndirectCommandsBufferOffsetAlignment == rhs.minIndirectCommandsBufferOffsetAlignment ); + } + + bool operator!=( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + void* pNext = {}; + uint32_t maxGraphicsShaderGroupCount = {}; + uint32_t maxIndirectSequenceCount = {}; + uint32_t maxIndirectCommandsTokenCount = {}; + uint32_t maxIndirectCommandsStreamCount = {}; + uint32_t maxIndirectCommandsTokenOffset = {}; + uint32_t maxIndirectCommandsStreamStride = {}; + uint32_t minSequencesCountBufferOffsetAlignment = {}; + uint32_t minSequencesIndexBufferOffsetAlignment = {}; + uint32_t minIndirectCommandsBufferOffsetAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceDeviceGeneratedCommandsPropertiesNV ) == sizeof( VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + }; + + struct PhysicalDeviceDeviceMemoryReportFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceMemoryReportFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport_ = {}) VULKAN_HPP_NOEXCEPT + : deviceMemoryReport( deviceMemoryReport_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDeviceMemoryReportFeaturesEXT( PhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDeviceMemoryReportFeaturesEXT( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & operator=( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & operator=( PhysicalDeviceDeviceMemoryReportFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDeviceMemoryReportFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDeviceMemoryReportFeaturesEXT & setDeviceMemoryReport( VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport_ ) VULKAN_HPP_NOEXCEPT + { + deviceMemoryReport = deviceMemoryReport_; + return *this; + } + + + operator VkPhysicalDeviceDeviceMemoryReportFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDeviceMemoryReportFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceMemoryReport == rhs.deviceMemoryReport ); + } + + bool operator!=( PhysicalDeviceDeviceMemoryReportFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDeviceMemoryReportFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceMemoryReport = {}; + + }; + static_assert( sizeof( PhysicalDeviceDeviceMemoryReportFeaturesEXT ) == sizeof( VkPhysicalDeviceDeviceMemoryReportFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDeviceMemoryReportFeaturesEXT; + }; + + struct PhysicalDeviceDiagnosticsConfigFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDiagnosticsConfigFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDiagnosticsConfigFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 diagnosticsConfig_ = {}) VULKAN_HPP_NOEXCEPT + : diagnosticsConfig( diagnosticsConfig_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDiagnosticsConfigFeaturesNV( PhysicalDeviceDiagnosticsConfigFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDiagnosticsConfigFeaturesNV( VkPhysicalDeviceDiagnosticsConfigFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDiagnosticsConfigFeaturesNV & operator=( VkPhysicalDeviceDiagnosticsConfigFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDiagnosticsConfigFeaturesNV & operator=( PhysicalDeviceDiagnosticsConfigFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDiagnosticsConfigFeaturesNV ) ); + return *this; + } + + PhysicalDeviceDiagnosticsConfigFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceDiagnosticsConfigFeaturesNV & setDiagnosticsConfig( VULKAN_HPP_NAMESPACE::Bool32 diagnosticsConfig_ ) VULKAN_HPP_NOEXCEPT + { + diagnosticsConfig = diagnosticsConfig_; + return *this; + } + + + operator VkPhysicalDeviceDiagnosticsConfigFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDiagnosticsConfigFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDiagnosticsConfigFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceDiagnosticsConfigFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( diagnosticsConfig == rhs.diagnosticsConfig ); + } + + bool operator!=( PhysicalDeviceDiagnosticsConfigFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDiagnosticsConfigFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 diagnosticsConfig = {}; + + }; + static_assert( sizeof( PhysicalDeviceDiagnosticsConfigFeaturesNV ) == sizeof( VkPhysicalDeviceDiagnosticsConfigFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDiagnosticsConfigFeaturesNV; + }; + + struct PhysicalDeviceDiscardRectanglePropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceDiscardRectanglePropertiesEXT(uint32_t maxDiscardRectangles_ = {}) VULKAN_HPP_NOEXCEPT + : maxDiscardRectangles( maxDiscardRectangles_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceDiscardRectanglePropertiesEXT( PhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDiscardRectanglePropertiesEXT( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDiscardRectanglePropertiesEXT & operator=( VkPhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDiscardRectanglePropertiesEXT & operator=( PhysicalDeviceDiscardRectanglePropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceDiscardRectanglePropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDiscardRectanglePropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDiscardRectanglePropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxDiscardRectangles == rhs.maxDiscardRectangles ); + } + + bool operator!=( PhysicalDeviceDiscardRectanglePropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDiscardRectanglePropertiesEXT; + void* pNext = {}; + uint32_t maxDiscardRectangles = {}; + + }; + static_assert( sizeof( PhysicalDeviceDiscardRectanglePropertiesEXT ) == sizeof( VkPhysicalDeviceDiscardRectanglePropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDiscardRectanglePropertiesEXT; + }; + + struct PhysicalDeviceDriverProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceDriverProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDriverProperties(VULKAN_HPP_NAMESPACE::DriverId driverID_ = VULKAN_HPP_NAMESPACE::DriverId::eAmdProprietary, std::array const& driverName_ = {}, std::array const& driverInfo_ = {}, VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion_ = {}) VULKAN_HPP_NOEXCEPT + : driverID( driverID_ ), driverName( driverName_ ), driverInfo( driverInfo_ ), conformanceVersion( conformanceVersion_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceDriverProperties( PhysicalDeviceDriverProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceDriverProperties( VkPhysicalDeviceDriverProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceDriverProperties & operator=( VkPhysicalDeviceDriverProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceDriverProperties & operator=( PhysicalDeviceDriverProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceDriverProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceDriverProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceDriverProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceDriverProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceDriverProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( driverID == rhs.driverID ) + && ( driverName == rhs.driverName ) + && ( driverInfo == rhs.driverInfo ) + && ( conformanceVersion == rhs.conformanceVersion ); + } + + bool operator!=( PhysicalDeviceDriverProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceDriverProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DriverId driverID = VULKAN_HPP_NAMESPACE::DriverId::eAmdProprietary; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverInfo = {}; + VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion = {}; + + }; + static_assert( sizeof( PhysicalDeviceDriverProperties ) == sizeof( VkPhysicalDeviceDriverProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceDriverProperties; + }; + using PhysicalDeviceDriverPropertiesKHR = PhysicalDeviceDriverProperties; + + struct PhysicalDeviceExclusiveScissorFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExclusiveScissorFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 exclusiveScissor_ = {}) VULKAN_HPP_NOEXCEPT + : exclusiveScissor( exclusiveScissor_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExclusiveScissorFeaturesNV( PhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExclusiveScissorFeaturesNV( VkPhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExclusiveScissorFeaturesNV & operator=( VkPhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExclusiveScissorFeaturesNV & operator=( PhysicalDeviceExclusiveScissorFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExclusiveScissorFeaturesNV ) ); + return *this; + } + + PhysicalDeviceExclusiveScissorFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExclusiveScissorFeaturesNV & setExclusiveScissor( VULKAN_HPP_NAMESPACE::Bool32 exclusiveScissor_ ) VULKAN_HPP_NOEXCEPT + { + exclusiveScissor = exclusiveScissor_; + return *this; + } + + + operator VkPhysicalDeviceExclusiveScissorFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExclusiveScissorFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExclusiveScissorFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceExclusiveScissorFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exclusiveScissor == rhs.exclusiveScissor ); + } + + bool operator!=( PhysicalDeviceExclusiveScissorFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExclusiveScissorFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 exclusiveScissor = {}; + + }; + static_assert( sizeof( PhysicalDeviceExclusiveScissorFeaturesNV ) == sizeof( VkPhysicalDeviceExclusiveScissorFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExclusiveScissorFeaturesNV; + }; + + struct PhysicalDeviceExtendedDynamicStateFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExtendedDynamicStateFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExtendedDynamicStateFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState_ = {}) VULKAN_HPP_NOEXCEPT + : extendedDynamicState( extendedDynamicState_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExtendedDynamicStateFeaturesEXT( PhysicalDeviceExtendedDynamicStateFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExtendedDynamicStateFeaturesEXT( VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExtendedDynamicStateFeaturesEXT & operator=( VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExtendedDynamicStateFeaturesEXT & operator=( PhysicalDeviceExtendedDynamicStateFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExtendedDynamicStateFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceExtendedDynamicStateFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExtendedDynamicStateFeaturesEXT & setExtendedDynamicState( VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState_ ) VULKAN_HPP_NOEXCEPT + { + extendedDynamicState = extendedDynamicState_; + return *this; + } + + + operator VkPhysicalDeviceExtendedDynamicStateFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExtendedDynamicStateFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExtendedDynamicStateFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceExtendedDynamicStateFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( extendedDynamicState == rhs.extendedDynamicState ); + } + + bool operator!=( PhysicalDeviceExtendedDynamicStateFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExtendedDynamicStateFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 extendedDynamicState = {}; + + }; + static_assert( sizeof( PhysicalDeviceExtendedDynamicStateFeaturesEXT ) == sizeof( VkPhysicalDeviceExtendedDynamicStateFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExtendedDynamicStateFeaturesEXT; + }; + + struct PhysicalDeviceExternalImageFormatInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExternalImageFormatInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalImageFormatInfo(VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd) VULKAN_HPP_NOEXCEPT + : handleType( handleType_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalImageFormatInfo( PhysicalDeviceExternalImageFormatInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExternalImageFormatInfo( VkPhysicalDeviceExternalImageFormatInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExternalImageFormatInfo & operator=( VkPhysicalDeviceExternalImageFormatInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExternalImageFormatInfo & operator=( PhysicalDeviceExternalImageFormatInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExternalImageFormatInfo ) ); + return *this; + } + + PhysicalDeviceExternalImageFormatInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceExternalImageFormatInfo & setHandleType( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType_ ) VULKAN_HPP_NOEXCEPT + { + handleType = handleType_; + return *this; + } + + + operator VkPhysicalDeviceExternalImageFormatInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExternalImageFormatInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExternalImageFormatInfo const& ) const = default; +#else + bool operator==( PhysicalDeviceExternalImageFormatInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( handleType == rhs.handleType ); + } + + bool operator!=( PhysicalDeviceExternalImageFormatInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExternalImageFormatInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType = VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits::eOpaqueFd; + + }; + static_assert( sizeof( PhysicalDeviceExternalImageFormatInfo ) == sizeof( VkPhysicalDeviceExternalImageFormatInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExternalImageFormatInfo; + }; + using PhysicalDeviceExternalImageFormatInfoKHR = PhysicalDeviceExternalImageFormatInfo; + + struct PhysicalDeviceExternalMemoryHostPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalMemoryHostPropertiesEXT(VULKAN_HPP_NAMESPACE::DeviceSize minImportedHostPointerAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : minImportedHostPointerAlignment( minImportedHostPointerAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceExternalMemoryHostPropertiesEXT( PhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceExternalMemoryHostPropertiesEXT( VkPhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceExternalMemoryHostPropertiesEXT & operator=( VkPhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceExternalMemoryHostPropertiesEXT & operator=( PhysicalDeviceExternalMemoryHostPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceExternalMemoryHostPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceExternalMemoryHostPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceExternalMemoryHostPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceExternalMemoryHostPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceExternalMemoryHostPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minImportedHostPointerAlignment == rhs.minImportedHostPointerAlignment ); + } + + bool operator!=( PhysicalDeviceExternalMemoryHostPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceExternalMemoryHostPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize minImportedHostPointerAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceExternalMemoryHostPropertiesEXT ) == sizeof( VkPhysicalDeviceExternalMemoryHostPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceExternalMemoryHostPropertiesEXT; + }; + + struct PhysicalDeviceFloatControlsProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFloatControlsProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFloatControlsProperties(VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence denormBehaviorIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence roundingModeIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64_ = {}) VULKAN_HPP_NOEXCEPT + : denormBehaviorIndependence( denormBehaviorIndependence_ ), roundingModeIndependence( roundingModeIndependence_ ), shaderSignedZeroInfNanPreserveFloat16( shaderSignedZeroInfNanPreserveFloat16_ ), shaderSignedZeroInfNanPreserveFloat32( shaderSignedZeroInfNanPreserveFloat32_ ), shaderSignedZeroInfNanPreserveFloat64( shaderSignedZeroInfNanPreserveFloat64_ ), shaderDenormPreserveFloat16( shaderDenormPreserveFloat16_ ), shaderDenormPreserveFloat32( shaderDenormPreserveFloat32_ ), shaderDenormPreserveFloat64( shaderDenormPreserveFloat64_ ), shaderDenormFlushToZeroFloat16( shaderDenormFlushToZeroFloat16_ ), shaderDenormFlushToZeroFloat32( shaderDenormFlushToZeroFloat32_ ), shaderDenormFlushToZeroFloat64( shaderDenormFlushToZeroFloat64_ ), shaderRoundingModeRTEFloat16( shaderRoundingModeRTEFloat16_ ), shaderRoundingModeRTEFloat32( shaderRoundingModeRTEFloat32_ ), shaderRoundingModeRTEFloat64( shaderRoundingModeRTEFloat64_ ), shaderRoundingModeRTZFloat16( shaderRoundingModeRTZFloat16_ ), shaderRoundingModeRTZFloat32( shaderRoundingModeRTZFloat32_ ), shaderRoundingModeRTZFloat64( shaderRoundingModeRTZFloat64_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFloatControlsProperties( PhysicalDeviceFloatControlsProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFloatControlsProperties( VkPhysicalDeviceFloatControlsProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFloatControlsProperties & operator=( VkPhysicalDeviceFloatControlsProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFloatControlsProperties & operator=( PhysicalDeviceFloatControlsProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFloatControlsProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceFloatControlsProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFloatControlsProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFloatControlsProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceFloatControlsProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( denormBehaviorIndependence == rhs.denormBehaviorIndependence ) + && ( roundingModeIndependence == rhs.roundingModeIndependence ) + && ( shaderSignedZeroInfNanPreserveFloat16 == rhs.shaderSignedZeroInfNanPreserveFloat16 ) + && ( shaderSignedZeroInfNanPreserveFloat32 == rhs.shaderSignedZeroInfNanPreserveFloat32 ) + && ( shaderSignedZeroInfNanPreserveFloat64 == rhs.shaderSignedZeroInfNanPreserveFloat64 ) + && ( shaderDenormPreserveFloat16 == rhs.shaderDenormPreserveFloat16 ) + && ( shaderDenormPreserveFloat32 == rhs.shaderDenormPreserveFloat32 ) + && ( shaderDenormPreserveFloat64 == rhs.shaderDenormPreserveFloat64 ) + && ( shaderDenormFlushToZeroFloat16 == rhs.shaderDenormFlushToZeroFloat16 ) + && ( shaderDenormFlushToZeroFloat32 == rhs.shaderDenormFlushToZeroFloat32 ) + && ( shaderDenormFlushToZeroFloat64 == rhs.shaderDenormFlushToZeroFloat64 ) + && ( shaderRoundingModeRTEFloat16 == rhs.shaderRoundingModeRTEFloat16 ) + && ( shaderRoundingModeRTEFloat32 == rhs.shaderRoundingModeRTEFloat32 ) + && ( shaderRoundingModeRTEFloat64 == rhs.shaderRoundingModeRTEFloat64 ) + && ( shaderRoundingModeRTZFloat16 == rhs.shaderRoundingModeRTZFloat16 ) + && ( shaderRoundingModeRTZFloat32 == rhs.shaderRoundingModeRTZFloat32 ) + && ( shaderRoundingModeRTZFloat64 == rhs.shaderRoundingModeRTZFloat64 ); + } + + bool operator!=( PhysicalDeviceFloatControlsProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFloatControlsProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence denormBehaviorIndependence = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly; + VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence roundingModeIndependence = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64 = {}; + + }; + static_assert( sizeof( PhysicalDeviceFloatControlsProperties ) == sizeof( VkPhysicalDeviceFloatControlsProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFloatControlsProperties; + }; + using PhysicalDeviceFloatControlsPropertiesKHR = PhysicalDeviceFloatControlsProperties; + + struct PhysicalDeviceFragmentDensityMap2FeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentDensityMap2FeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMap2FeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDeferred_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentDensityMapDeferred( fragmentDensityMapDeferred_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMap2FeaturesEXT( PhysicalDeviceFragmentDensityMap2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentDensityMap2FeaturesEXT( VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentDensityMap2FeaturesEXT & operator=( VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentDensityMap2FeaturesEXT & operator=( PhysicalDeviceFragmentDensityMap2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentDensityMap2FeaturesEXT ) ); + return *this; + } + + PhysicalDeviceFragmentDensityMap2FeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentDensityMap2FeaturesEXT & setFragmentDensityMapDeferred( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDeferred_ ) VULKAN_HPP_NOEXCEPT + { + fragmentDensityMapDeferred = fragmentDensityMapDeferred_; + return *this; + } + + + operator VkPhysicalDeviceFragmentDensityMap2FeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentDensityMap2FeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentDensityMap2FeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentDensityMap2FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentDensityMapDeferred == rhs.fragmentDensityMapDeferred ); + } + + bool operator!=( PhysicalDeviceFragmentDensityMap2FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentDensityMap2FeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDeferred = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentDensityMap2FeaturesEXT ) == sizeof( VkPhysicalDeviceFragmentDensityMap2FeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentDensityMap2FeaturesEXT; + }; + + struct PhysicalDeviceFragmentDensityMap2PropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentDensityMap2PropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMap2PropertiesEXT(VULKAN_HPP_NAMESPACE::Bool32 subsampledLoads_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subsampledCoarseReconstructionEarlyAccess_ = {}, uint32_t maxSubsampledArrayLayers_ = {}, uint32_t maxDescriptorSetSubsampledSamplers_ = {}) VULKAN_HPP_NOEXCEPT + : subsampledLoads( subsampledLoads_ ), subsampledCoarseReconstructionEarlyAccess( subsampledCoarseReconstructionEarlyAccess_ ), maxSubsampledArrayLayers( maxSubsampledArrayLayers_ ), maxDescriptorSetSubsampledSamplers( maxDescriptorSetSubsampledSamplers_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMap2PropertiesEXT( PhysicalDeviceFragmentDensityMap2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentDensityMap2PropertiesEXT( VkPhysicalDeviceFragmentDensityMap2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentDensityMap2PropertiesEXT & operator=( VkPhysicalDeviceFragmentDensityMap2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentDensityMap2PropertiesEXT & operator=( PhysicalDeviceFragmentDensityMap2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentDensityMap2PropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentDensityMap2PropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentDensityMap2PropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentDensityMap2PropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentDensityMap2PropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subsampledLoads == rhs.subsampledLoads ) + && ( subsampledCoarseReconstructionEarlyAccess == rhs.subsampledCoarseReconstructionEarlyAccess ) + && ( maxSubsampledArrayLayers == rhs.maxSubsampledArrayLayers ) + && ( maxDescriptorSetSubsampledSamplers == rhs.maxDescriptorSetSubsampledSamplers ); + } + + bool operator!=( PhysicalDeviceFragmentDensityMap2PropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentDensityMap2PropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 subsampledLoads = {}; + VULKAN_HPP_NAMESPACE::Bool32 subsampledCoarseReconstructionEarlyAccess = {}; + uint32_t maxSubsampledArrayLayers = {}; + uint32_t maxDescriptorSetSubsampledSamplers = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentDensityMap2PropertiesEXT ) == sizeof( VkPhysicalDeviceFragmentDensityMap2PropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentDensityMap2PropertiesEXT; + }; + + struct PhysicalDeviceFragmentDensityMapFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentDensityMapFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMap_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDynamic_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapNonSubsampledImages_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentDensityMap( fragmentDensityMap_ ), fragmentDensityMapDynamic( fragmentDensityMapDynamic_ ), fragmentDensityMapNonSubsampledImages( fragmentDensityMapNonSubsampledImages_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapFeaturesEXT( PhysicalDeviceFragmentDensityMapFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentDensityMapFeaturesEXT( VkPhysicalDeviceFragmentDensityMapFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentDensityMapFeaturesEXT & operator=( VkPhysicalDeviceFragmentDensityMapFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentDensityMapFeaturesEXT & operator=( PhysicalDeviceFragmentDensityMapFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentDensityMapFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceFragmentDensityMapFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentDensityMapFeaturesEXT & setFragmentDensityMap( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMap_ ) VULKAN_HPP_NOEXCEPT + { + fragmentDensityMap = fragmentDensityMap_; + return *this; + } + + PhysicalDeviceFragmentDensityMapFeaturesEXT & setFragmentDensityMapDynamic( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDynamic_ ) VULKAN_HPP_NOEXCEPT + { + fragmentDensityMapDynamic = fragmentDensityMapDynamic_; + return *this; + } + + PhysicalDeviceFragmentDensityMapFeaturesEXT & setFragmentDensityMapNonSubsampledImages( VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapNonSubsampledImages_ ) VULKAN_HPP_NOEXCEPT + { + fragmentDensityMapNonSubsampledImages = fragmentDensityMapNonSubsampledImages_; + return *this; + } + + + operator VkPhysicalDeviceFragmentDensityMapFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentDensityMapFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentDensityMapFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentDensityMapFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentDensityMap == rhs.fragmentDensityMap ) + && ( fragmentDensityMapDynamic == rhs.fragmentDensityMapDynamic ) + && ( fragmentDensityMapNonSubsampledImages == rhs.fragmentDensityMapNonSubsampledImages ); + } + + bool operator!=( PhysicalDeviceFragmentDensityMapFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentDensityMapFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMap = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapDynamic = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityMapNonSubsampledImages = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentDensityMapFeaturesEXT ) == sizeof( VkPhysicalDeviceFragmentDensityMapFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentDensityMapFeaturesEXT; + }; + + struct PhysicalDeviceFragmentDensityMapPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentDensityMapPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapPropertiesEXT(VULKAN_HPP_NAMESPACE::Extent2D minFragmentDensityTexelSize_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxFragmentDensityTexelSize_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityInvocations_ = {}) VULKAN_HPP_NOEXCEPT + : minFragmentDensityTexelSize( minFragmentDensityTexelSize_ ), maxFragmentDensityTexelSize( maxFragmentDensityTexelSize_ ), fragmentDensityInvocations( fragmentDensityInvocations_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentDensityMapPropertiesEXT( PhysicalDeviceFragmentDensityMapPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentDensityMapPropertiesEXT( VkPhysicalDeviceFragmentDensityMapPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentDensityMapPropertiesEXT & operator=( VkPhysicalDeviceFragmentDensityMapPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentDensityMapPropertiesEXT & operator=( PhysicalDeviceFragmentDensityMapPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentDensityMapPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentDensityMapPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentDensityMapPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentDensityMapPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentDensityMapPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minFragmentDensityTexelSize == rhs.minFragmentDensityTexelSize ) + && ( maxFragmentDensityTexelSize == rhs.maxFragmentDensityTexelSize ) + && ( fragmentDensityInvocations == rhs.fragmentDensityInvocations ); + } + + bool operator!=( PhysicalDeviceFragmentDensityMapPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentDensityMapPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D minFragmentDensityTexelSize = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxFragmentDensityTexelSize = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentDensityInvocations = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentDensityMapPropertiesEXT ) == sizeof( VkPhysicalDeviceFragmentDensityMapPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentDensityMapPropertiesEXT; + }; + + struct PhysicalDeviceFragmentShaderBarycentricFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderBarycentricFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderBarycentric_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentShaderBarycentric( fragmentShaderBarycentric_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderBarycentricFeaturesNV( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV & operator=( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV & operator=( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShaderBarycentricFeaturesNV ) ); + return *this; + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShaderBarycentricFeaturesNV & setFragmentShaderBarycentric( VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderBarycentric_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShaderBarycentric = fragmentShaderBarycentric_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentShaderBarycentric == rhs.fragmentShaderBarycentric ); + } + + bool operator!=( PhysicalDeviceFragmentShaderBarycentricFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShaderBarycentricFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderBarycentric = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShaderBarycentricFeaturesNV ) == sizeof( VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShaderBarycentricFeaturesNV; + }; + + struct PhysicalDeviceFragmentShaderInterlockFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShaderInterlockFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderInterlockFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderSampleInterlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderPixelInterlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderShadingRateInterlock_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentShaderSampleInterlock( fragmentShaderSampleInterlock_ ), fragmentShaderPixelInterlock( fragmentShaderPixelInterlock_ ), fragmentShaderShadingRateInterlock( fragmentShaderShadingRateInterlock_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShaderInterlockFeaturesEXT( PhysicalDeviceFragmentShaderInterlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT( VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & operator=( VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & operator=( PhysicalDeviceFragmentShaderInterlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShaderInterlockFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & setFragmentShaderSampleInterlock( VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderSampleInterlock_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShaderSampleInterlock = fragmentShaderSampleInterlock_; + return *this; + } + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & setFragmentShaderPixelInterlock( VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderPixelInterlock_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShaderPixelInterlock = fragmentShaderPixelInterlock_; + return *this; + } + + PhysicalDeviceFragmentShaderInterlockFeaturesEXT & setFragmentShaderShadingRateInterlock( VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderShadingRateInterlock_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShaderShadingRateInterlock = fragmentShaderShadingRateInterlock_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShaderInterlockFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShaderInterlockFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentShaderSampleInterlock == rhs.fragmentShaderSampleInterlock ) + && ( fragmentShaderPixelInterlock == rhs.fragmentShaderPixelInterlock ) + && ( fragmentShaderShadingRateInterlock == rhs.fragmentShaderShadingRateInterlock ); + } + + bool operator!=( PhysicalDeviceFragmentShaderInterlockFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShaderInterlockFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderSampleInterlock = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderPixelInterlock = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShaderShadingRateInterlock = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShaderInterlockFeaturesEXT ) == sizeof( VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShaderInterlockFeaturesEXT; + }; + + struct PhysicalDeviceFragmentShadingRateEnumsFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums_ = {}, VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates_ = {}, VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentShadingRateEnums( fragmentShadingRateEnums_ ), supersampleFragmentShadingRates( supersampleFragmentShadingRates_ ), noInvocationFragmentShadingRates( noInvocationFragmentShadingRates_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsFeaturesNV( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & operator=( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & operator=( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setFragmentShadingRateEnums( VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums_ ) VULKAN_HPP_NOEXCEPT + { + fragmentShadingRateEnums = fragmentShadingRateEnums_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setSupersampleFragmentShadingRates( VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates_ ) VULKAN_HPP_NOEXCEPT + { + supersampleFragmentShadingRates = supersampleFragmentShadingRates_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsFeaturesNV & setNoInvocationFragmentShadingRates( VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates_ ) VULKAN_HPP_NOEXCEPT + { + noInvocationFragmentShadingRates = noInvocationFragmentShadingRates_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentShadingRateEnums == rhs.fragmentShadingRateEnums ) + && ( supersampleFragmentShadingRates == rhs.supersampleFragmentShadingRates ) + && ( noInvocationFragmentShadingRates == rhs.noInvocationFragmentShadingRates ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateEnums = {}; + VULKAN_HPP_NAMESPACE::Bool32 supersampleFragmentShadingRates = {}; + VULKAN_HPP_NAMESPACE::Bool32 noInvocationFragmentShadingRates = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateEnumsFeaturesNV ) == sizeof( VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + }; + + struct PhysicalDeviceFragmentShadingRateEnumsPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsPropertiesNV(VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1) VULKAN_HPP_NOEXCEPT + : maxFragmentShadingRateInvocationCount( maxFragmentShadingRateInvocationCount_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateEnumsPropertiesNV( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & operator=( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & operator=( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateEnumsPropertiesNV & setMaxFragmentShadingRateInvocationCount( VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount_ ) VULKAN_HPP_NOEXCEPT + { + maxFragmentShadingRateInvocationCount = maxFragmentShadingRateInvocationCount_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxFragmentShadingRateInvocationCount == rhs.maxFragmentShadingRateInvocationCount ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateInvocationCount = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateEnumsPropertiesNV ) == sizeof( VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + }; + + struct PhysicalDeviceFragmentShadingRateFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate_ = {}, VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineFragmentShadingRate( pipelineFragmentShadingRate_ ), primitiveFragmentShadingRate( primitiveFragmentShadingRate_ ), attachmentFragmentShadingRate( attachmentFragmentShadingRate_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRateFeaturesKHR( PhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRateFeaturesKHR( VkPhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRateFeaturesKHR & operator=( VkPhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & operator=( PhysicalDeviceFragmentShadingRateFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRateFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPipelineFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + pipelineFragmentShadingRate = pipelineFragmentShadingRate_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setPrimitiveFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + primitiveFragmentShadingRate = primitiveFragmentShadingRate_; + return *this; + } + + PhysicalDeviceFragmentShadingRateFeaturesKHR & setAttachmentFragmentShadingRate( VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate_ ) VULKAN_HPP_NOEXCEPT + { + attachmentFragmentShadingRate = attachmentFragmentShadingRate_; + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRateFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShadingRateFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRateFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRateFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineFragmentShadingRate == rhs.pipelineFragmentShadingRate ) + && ( primitiveFragmentShadingRate == rhs.primitiveFragmentShadingRate ) + && ( attachmentFragmentShadingRate == rhs.attachmentFragmentShadingRate ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRateFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRateFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineFragmentShadingRate = {}; + VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRate = {}; + VULKAN_HPP_NAMESPACE::Bool32 attachmentFragmentShadingRate = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRateFeaturesKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRateFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShadingRateFeaturesKHR; + }; + + struct PhysicalDeviceFragmentShadingRatePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRatePropertiesKHR(VULKAN_HPP_NAMESPACE::Extent2D minFragmentShadingRateAttachmentTexelSize_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxFragmentShadingRateAttachmentTexelSize_ = {}, uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ = {}, VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRateWithMultipleViewports_ = {}, VULKAN_HPP_NAMESPACE::Bool32 layeredShadingRateAttachments_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateNonTrivialCombinerOps_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxFragmentSize_ = {}, uint32_t maxFragmentSizeAspectRatio_ = {}, uint32_t maxFragmentShadingRateCoverageSamples_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateRasterizationSamples_ = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderDepthStencilWrites_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithSampleMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderSampleMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithConservativeRasterization_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithFragmentShaderInterlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithCustomSampleLocations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateStrictMultiplyCombiner_ = {}) VULKAN_HPP_NOEXCEPT + : minFragmentShadingRateAttachmentTexelSize( minFragmentShadingRateAttachmentTexelSize_ ), maxFragmentShadingRateAttachmentTexelSize( maxFragmentShadingRateAttachmentTexelSize_ ), maxFragmentShadingRateAttachmentTexelSizeAspectRatio( maxFragmentShadingRateAttachmentTexelSizeAspectRatio_ ), primitiveFragmentShadingRateWithMultipleViewports( primitiveFragmentShadingRateWithMultipleViewports_ ), layeredShadingRateAttachments( layeredShadingRateAttachments_ ), fragmentShadingRateNonTrivialCombinerOps( fragmentShadingRateNonTrivialCombinerOps_ ), maxFragmentSize( maxFragmentSize_ ), maxFragmentSizeAspectRatio( maxFragmentSizeAspectRatio_ ), maxFragmentShadingRateCoverageSamples( maxFragmentShadingRateCoverageSamples_ ), maxFragmentShadingRateRasterizationSamples( maxFragmentShadingRateRasterizationSamples_ ), fragmentShadingRateWithShaderDepthStencilWrites( fragmentShadingRateWithShaderDepthStencilWrites_ ), fragmentShadingRateWithSampleMask( fragmentShadingRateWithSampleMask_ ), fragmentShadingRateWithShaderSampleMask( fragmentShadingRateWithShaderSampleMask_ ), fragmentShadingRateWithConservativeRasterization( fragmentShadingRateWithConservativeRasterization_ ), fragmentShadingRateWithFragmentShaderInterlock( fragmentShadingRateWithFragmentShaderInterlock_ ), fragmentShadingRateWithCustomSampleLocations( fragmentShadingRateWithCustomSampleLocations_ ), fragmentShadingRateStrictMultiplyCombiner( fragmentShadingRateStrictMultiplyCombiner_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceFragmentShadingRatePropertiesKHR( PhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceFragmentShadingRatePropertiesKHR( VkPhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceFragmentShadingRatePropertiesKHR & operator=( VkPhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceFragmentShadingRatePropertiesKHR & operator=( PhysicalDeviceFragmentShadingRatePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceFragmentShadingRatePropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceFragmentShadingRatePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceFragmentShadingRatePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceFragmentShadingRatePropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceFragmentShadingRatePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minFragmentShadingRateAttachmentTexelSize == rhs.minFragmentShadingRateAttachmentTexelSize ) + && ( maxFragmentShadingRateAttachmentTexelSize == rhs.maxFragmentShadingRateAttachmentTexelSize ) + && ( maxFragmentShadingRateAttachmentTexelSizeAspectRatio == rhs.maxFragmentShadingRateAttachmentTexelSizeAspectRatio ) + && ( primitiveFragmentShadingRateWithMultipleViewports == rhs.primitiveFragmentShadingRateWithMultipleViewports ) + && ( layeredShadingRateAttachments == rhs.layeredShadingRateAttachments ) + && ( fragmentShadingRateNonTrivialCombinerOps == rhs.fragmentShadingRateNonTrivialCombinerOps ) + && ( maxFragmentSize == rhs.maxFragmentSize ) + && ( maxFragmentSizeAspectRatio == rhs.maxFragmentSizeAspectRatio ) + && ( maxFragmentShadingRateCoverageSamples == rhs.maxFragmentShadingRateCoverageSamples ) + && ( maxFragmentShadingRateRasterizationSamples == rhs.maxFragmentShadingRateRasterizationSamples ) + && ( fragmentShadingRateWithShaderDepthStencilWrites == rhs.fragmentShadingRateWithShaderDepthStencilWrites ) + && ( fragmentShadingRateWithSampleMask == rhs.fragmentShadingRateWithSampleMask ) + && ( fragmentShadingRateWithShaderSampleMask == rhs.fragmentShadingRateWithShaderSampleMask ) + && ( fragmentShadingRateWithConservativeRasterization == rhs.fragmentShadingRateWithConservativeRasterization ) + && ( fragmentShadingRateWithFragmentShaderInterlock == rhs.fragmentShadingRateWithFragmentShaderInterlock ) + && ( fragmentShadingRateWithCustomSampleLocations == rhs.fragmentShadingRateWithCustomSampleLocations ) + && ( fragmentShadingRateStrictMultiplyCombiner == rhs.fragmentShadingRateStrictMultiplyCombiner ); + } + + bool operator!=( PhysicalDeviceFragmentShadingRatePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceFragmentShadingRatePropertiesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D minFragmentShadingRateAttachmentTexelSize = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxFragmentShadingRateAttachmentTexelSize = {}; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio = {}; + VULKAN_HPP_NAMESPACE::Bool32 primitiveFragmentShadingRateWithMultipleViewports = {}; + VULKAN_HPP_NAMESPACE::Bool32 layeredShadingRateAttachments = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateNonTrivialCombinerOps = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxFragmentSize = {}; + uint32_t maxFragmentSizeAspectRatio = {}; + uint32_t maxFragmentShadingRateCoverageSamples = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlagBits maxFragmentShadingRateRasterizationSamples = VULKAN_HPP_NAMESPACE::SampleCountFlagBits::e1; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderDepthStencilWrites = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithSampleMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithShaderSampleMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithConservativeRasterization = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithFragmentShaderInterlock = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateWithCustomSampleLocations = {}; + VULKAN_HPP_NAMESPACE::Bool32 fragmentShadingRateStrictMultiplyCombiner = {}; + + }; + static_assert( sizeof( PhysicalDeviceFragmentShadingRatePropertiesKHR ) == sizeof( VkPhysicalDeviceFragmentShadingRatePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceFragmentShadingRatePropertiesKHR; + }; + + struct PhysicalDeviceGroupProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceGroupProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGroupProperties(uint32_t physicalDeviceCount_ = {}, std::array const& physicalDevices_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subsetAllocation_ = {}) VULKAN_HPP_NOEXCEPT + : physicalDeviceCount( physicalDeviceCount_ ), physicalDevices( physicalDevices_ ), subsetAllocation( subsetAllocation_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceGroupProperties( PhysicalDeviceGroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceGroupProperties( VkPhysicalDeviceGroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceGroupProperties & operator=( VkPhysicalDeviceGroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceGroupProperties & operator=( PhysicalDeviceGroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceGroupProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceGroupProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceGroupProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceGroupProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceGroupProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( physicalDeviceCount == rhs.physicalDeviceCount ) + && ( physicalDevices == rhs.physicalDevices ) + && ( subsetAllocation == rhs.subsetAllocation ); + } + + bool operator!=( PhysicalDeviceGroupProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceGroupProperties; + void* pNext = {}; + uint32_t physicalDeviceCount = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D physicalDevices = {}; + VULKAN_HPP_NAMESPACE::Bool32 subsetAllocation = {}; + + }; + static_assert( sizeof( PhysicalDeviceGroupProperties ) == sizeof( VkPhysicalDeviceGroupProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceGroupProperties; + }; + using PhysicalDeviceGroupPropertiesKHR = PhysicalDeviceGroupProperties; + + struct PhysicalDeviceHostQueryResetFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceHostQueryResetFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceHostQueryResetFeatures(VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset_ = {}) VULKAN_HPP_NOEXCEPT + : hostQueryReset( hostQueryReset_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceHostQueryResetFeatures( PhysicalDeviceHostQueryResetFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceHostQueryResetFeatures( VkPhysicalDeviceHostQueryResetFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceHostQueryResetFeatures & operator=( VkPhysicalDeviceHostQueryResetFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceHostQueryResetFeatures & operator=( PhysicalDeviceHostQueryResetFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceHostQueryResetFeatures ) ); + return *this; + } + + PhysicalDeviceHostQueryResetFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceHostQueryResetFeatures & setHostQueryReset( VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset_ ) VULKAN_HPP_NOEXCEPT + { + hostQueryReset = hostQueryReset_; + return *this; + } + + + operator VkPhysicalDeviceHostQueryResetFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceHostQueryResetFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceHostQueryResetFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceHostQueryResetFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( hostQueryReset == rhs.hostQueryReset ); + } + + bool operator!=( PhysicalDeviceHostQueryResetFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceHostQueryResetFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset = {}; + + }; + static_assert( sizeof( PhysicalDeviceHostQueryResetFeatures ) == sizeof( VkPhysicalDeviceHostQueryResetFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceHostQueryResetFeatures; + }; + using PhysicalDeviceHostQueryResetFeaturesEXT = PhysicalDeviceHostQueryResetFeatures; + + struct PhysicalDeviceIDProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceIdProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIDProperties(std::array const& deviceUUID_ = {}, std::array const& driverUUID_ = {}, std::array const& deviceLUID_ = {}, uint32_t deviceNodeMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 deviceLUIDValid_ = {}) VULKAN_HPP_NOEXCEPT + : deviceUUID( deviceUUID_ ), driverUUID( driverUUID_ ), deviceLUID( deviceLUID_ ), deviceNodeMask( deviceNodeMask_ ), deviceLUIDValid( deviceLUIDValid_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceIDProperties( PhysicalDeviceIDProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceIDProperties( VkPhysicalDeviceIDProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceIDProperties & operator=( VkPhysicalDeviceIDProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceIDProperties & operator=( PhysicalDeviceIDProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceIDProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceIDProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceIDProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceIDProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceIDProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceUUID == rhs.deviceUUID ) + && ( driverUUID == rhs.driverUUID ) + && ( deviceLUID == rhs.deviceLUID ) + && ( deviceNodeMask == rhs.deviceNodeMask ) + && ( deviceLUIDValid == rhs.deviceLUIDValid ); + } + + bool operator!=( PhysicalDeviceIDProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceIdProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceUUID = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverUUID = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceLUID = {}; + uint32_t deviceNodeMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceLUIDValid = {}; + + }; + static_assert( sizeof( PhysicalDeviceIDProperties ) == sizeof( VkPhysicalDeviceIDProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceIDProperties; + }; + using PhysicalDeviceIDPropertiesKHR = PhysicalDeviceIDProperties; + + struct PhysicalDeviceImageDrmFormatModifierInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageDrmFormatModifierInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageDrmFormatModifierInfoEXT(uint64_t drmFormatModifier_ = {}, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive, uint32_t queueFamilyIndexCount_ = {}, const uint32_t* pQueueFamilyIndices_ = {}) VULKAN_HPP_NOEXCEPT + : drmFormatModifier( drmFormatModifier_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( queueFamilyIndexCount_ ), pQueueFamilyIndices( pQueueFamilyIndices_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageDrmFormatModifierInfoEXT( PhysicalDeviceImageDrmFormatModifierInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageDrmFormatModifierInfoEXT( VkPhysicalDeviceImageDrmFormatModifierInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PhysicalDeviceImageDrmFormatModifierInfoEXT( uint64_t drmFormatModifier_, VULKAN_HPP_NAMESPACE::SharingMode sharingMode_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) + : drmFormatModifier( drmFormatModifier_ ), sharingMode( sharingMode_ ), queueFamilyIndexCount( static_cast( queueFamilyIndices_.size() ) ), pQueueFamilyIndices( queueFamilyIndices_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceImageDrmFormatModifierInfoEXT & operator=( VkPhysicalDeviceImageDrmFormatModifierInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & operator=( PhysicalDeviceImageDrmFormatModifierInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceImageDrmFormatModifierInfoEXT ) ); + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & setDrmFormatModifier( uint64_t drmFormatModifier_ ) VULKAN_HPP_NOEXCEPT + { + drmFormatModifier = drmFormatModifier_; + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & setSharingMode( VULKAN_HPP_NAMESPACE::SharingMode sharingMode_ ) VULKAN_HPP_NOEXCEPT + { + sharingMode = sharingMode_; + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & setQueueFamilyIndexCount( uint32_t queueFamilyIndexCount_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = queueFamilyIndexCount_; + return *this; + } + + PhysicalDeviceImageDrmFormatModifierInfoEXT & setPQueueFamilyIndices( const uint32_t* pQueueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + pQueueFamilyIndices = pQueueFamilyIndices_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PhysicalDeviceImageDrmFormatModifierInfoEXT & setQueueFamilyIndices( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & queueFamilyIndices_ ) VULKAN_HPP_NOEXCEPT + { + queueFamilyIndexCount = static_cast( queueFamilyIndices_.size() ); + pQueueFamilyIndices = queueFamilyIndices_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPhysicalDeviceImageDrmFormatModifierInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageDrmFormatModifierInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceImageDrmFormatModifierInfoEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceImageDrmFormatModifierInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( drmFormatModifier == rhs.drmFormatModifier ) + && ( sharingMode == rhs.sharingMode ) + && ( queueFamilyIndexCount == rhs.queueFamilyIndexCount ) + && ( pQueueFamilyIndices == rhs.pQueueFamilyIndices ); + } + + bool operator!=( PhysicalDeviceImageDrmFormatModifierInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageDrmFormatModifierInfoEXT; + const void* pNext = {}; + uint64_t drmFormatModifier = {}; + VULKAN_HPP_NAMESPACE::SharingMode sharingMode = VULKAN_HPP_NAMESPACE::SharingMode::eExclusive; + uint32_t queueFamilyIndexCount = {}; + const uint32_t* pQueueFamilyIndices = {}; + + }; + static_assert( sizeof( PhysicalDeviceImageDrmFormatModifierInfoEXT ) == sizeof( VkPhysicalDeviceImageDrmFormatModifierInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceImageDrmFormatModifierInfoEXT; + }; + + struct PhysicalDeviceImageRobustnessFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageRobustnessFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageRobustnessFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess_ = {}) VULKAN_HPP_NOEXCEPT + : robustImageAccess( robustImageAccess_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageRobustnessFeaturesEXT( PhysicalDeviceImageRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageRobustnessFeaturesEXT( VkPhysicalDeviceImageRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceImageRobustnessFeaturesEXT & operator=( VkPhysicalDeviceImageRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceImageRobustnessFeaturesEXT & operator=( PhysicalDeviceImageRobustnessFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceImageRobustnessFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceImageRobustnessFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImageRobustnessFeaturesEXT & setRobustImageAccess( VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess_ ) VULKAN_HPP_NOEXCEPT + { + robustImageAccess = robustImageAccess_; + return *this; + } + + + operator VkPhysicalDeviceImageRobustnessFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageRobustnessFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceImageRobustnessFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceImageRobustnessFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( robustImageAccess == rhs.robustImageAccess ); + } + + bool operator!=( PhysicalDeviceImageRobustnessFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageRobustnessFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess = {}; + + }; + static_assert( sizeof( PhysicalDeviceImageRobustnessFeaturesEXT ) == sizeof( VkPhysicalDeviceImageRobustnessFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceImageRobustnessFeaturesEXT; + }; + + struct PhysicalDeviceImageViewImageFormatInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImageViewImageFormatInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageViewImageFormatInfoEXT(VULKAN_HPP_NAMESPACE::ImageViewType imageViewType_ = VULKAN_HPP_NAMESPACE::ImageViewType::e1D) VULKAN_HPP_NOEXCEPT + : imageViewType( imageViewType_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceImageViewImageFormatInfoEXT( PhysicalDeviceImageViewImageFormatInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImageViewImageFormatInfoEXT( VkPhysicalDeviceImageViewImageFormatInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceImageViewImageFormatInfoEXT & operator=( VkPhysicalDeviceImageViewImageFormatInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceImageViewImageFormatInfoEXT & operator=( PhysicalDeviceImageViewImageFormatInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceImageViewImageFormatInfoEXT ) ); + return *this; + } + + PhysicalDeviceImageViewImageFormatInfoEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImageViewImageFormatInfoEXT & setImageViewType( VULKAN_HPP_NAMESPACE::ImageViewType imageViewType_ ) VULKAN_HPP_NOEXCEPT + { + imageViewType = imageViewType_; + return *this; + } + + + operator VkPhysicalDeviceImageViewImageFormatInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImageViewImageFormatInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceImageViewImageFormatInfoEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceImageViewImageFormatInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageViewType == rhs.imageViewType ); + } + + bool operator!=( PhysicalDeviceImageViewImageFormatInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImageViewImageFormatInfoEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageViewType imageViewType = VULKAN_HPP_NAMESPACE::ImageViewType::e1D; + + }; + static_assert( sizeof( PhysicalDeviceImageViewImageFormatInfoEXT ) == sizeof( VkPhysicalDeviceImageViewImageFormatInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceImageViewImageFormatInfoEXT; + }; + + struct PhysicalDeviceImagelessFramebufferFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceImagelessFramebufferFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceImagelessFramebufferFeatures(VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer_ = {}) VULKAN_HPP_NOEXCEPT + : imagelessFramebuffer( imagelessFramebuffer_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceImagelessFramebufferFeatures( PhysicalDeviceImagelessFramebufferFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceImagelessFramebufferFeatures( VkPhysicalDeviceImagelessFramebufferFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceImagelessFramebufferFeatures & operator=( VkPhysicalDeviceImagelessFramebufferFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceImagelessFramebufferFeatures & operator=( PhysicalDeviceImagelessFramebufferFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceImagelessFramebufferFeatures ) ); + return *this; + } + + PhysicalDeviceImagelessFramebufferFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceImagelessFramebufferFeatures & setImagelessFramebuffer( VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer_ ) VULKAN_HPP_NOEXCEPT + { + imagelessFramebuffer = imagelessFramebuffer_; + return *this; + } + + + operator VkPhysicalDeviceImagelessFramebufferFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceImagelessFramebufferFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceImagelessFramebufferFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceImagelessFramebufferFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imagelessFramebuffer == rhs.imagelessFramebuffer ); + } + + bool operator!=( PhysicalDeviceImagelessFramebufferFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceImagelessFramebufferFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer = {}; + + }; + static_assert( sizeof( PhysicalDeviceImagelessFramebufferFeatures ) == sizeof( VkPhysicalDeviceImagelessFramebufferFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceImagelessFramebufferFeatures; + }; + using PhysicalDeviceImagelessFramebufferFeaturesKHR = PhysicalDeviceImagelessFramebufferFeatures; + + struct PhysicalDeviceIndexTypeUint8FeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceIndexTypeUint8FeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8FeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ = {}) VULKAN_HPP_NOEXCEPT + : indexTypeUint8( indexTypeUint8_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceIndexTypeUint8FeaturesEXT( PhysicalDeviceIndexTypeUint8FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceIndexTypeUint8FeaturesEXT( VkPhysicalDeviceIndexTypeUint8FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceIndexTypeUint8FeaturesEXT & operator=( VkPhysicalDeviceIndexTypeUint8FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceIndexTypeUint8FeaturesEXT & operator=( PhysicalDeviceIndexTypeUint8FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceIndexTypeUint8FeaturesEXT ) ); + return *this; + } + + PhysicalDeviceIndexTypeUint8FeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceIndexTypeUint8FeaturesEXT & setIndexTypeUint8( VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8_ ) VULKAN_HPP_NOEXCEPT + { + indexTypeUint8 = indexTypeUint8_; + return *this; + } + + + operator VkPhysicalDeviceIndexTypeUint8FeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceIndexTypeUint8FeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceIndexTypeUint8FeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceIndexTypeUint8FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( indexTypeUint8 == rhs.indexTypeUint8 ); + } + + bool operator!=( PhysicalDeviceIndexTypeUint8FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceIndexTypeUint8FeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 indexTypeUint8 = {}; + + }; + static_assert( sizeof( PhysicalDeviceIndexTypeUint8FeaturesEXT ) == sizeof( VkPhysicalDeviceIndexTypeUint8FeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceIndexTypeUint8FeaturesEXT; + }; + + struct PhysicalDeviceInlineUniformBlockFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceInlineUniformBlockFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceInlineUniformBlockFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 inlineUniformBlock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingInlineUniformBlockUpdateAfterBind_ = {}) VULKAN_HPP_NOEXCEPT + : inlineUniformBlock( inlineUniformBlock_ ), descriptorBindingInlineUniformBlockUpdateAfterBind( descriptorBindingInlineUniformBlockUpdateAfterBind_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceInlineUniformBlockFeaturesEXT( PhysicalDeviceInlineUniformBlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceInlineUniformBlockFeaturesEXT( VkPhysicalDeviceInlineUniformBlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceInlineUniformBlockFeaturesEXT & operator=( VkPhysicalDeviceInlineUniformBlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceInlineUniformBlockFeaturesEXT & operator=( PhysicalDeviceInlineUniformBlockFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceInlineUniformBlockFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceInlineUniformBlockFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceInlineUniformBlockFeaturesEXT & setInlineUniformBlock( VULKAN_HPP_NAMESPACE::Bool32 inlineUniformBlock_ ) VULKAN_HPP_NOEXCEPT + { + inlineUniformBlock = inlineUniformBlock_; + return *this; + } + + PhysicalDeviceInlineUniformBlockFeaturesEXT & setDescriptorBindingInlineUniformBlockUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingInlineUniformBlockUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingInlineUniformBlockUpdateAfterBind = descriptorBindingInlineUniformBlockUpdateAfterBind_; + return *this; + } + + + operator VkPhysicalDeviceInlineUniformBlockFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceInlineUniformBlockFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceInlineUniformBlockFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceInlineUniformBlockFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( inlineUniformBlock == rhs.inlineUniformBlock ) + && ( descriptorBindingInlineUniformBlockUpdateAfterBind == rhs.descriptorBindingInlineUniformBlockUpdateAfterBind ); + } + + bool operator!=( PhysicalDeviceInlineUniformBlockFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceInlineUniformBlockFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 inlineUniformBlock = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingInlineUniformBlockUpdateAfterBind = {}; + + }; + static_assert( sizeof( PhysicalDeviceInlineUniformBlockFeaturesEXT ) == sizeof( VkPhysicalDeviceInlineUniformBlockFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceInlineUniformBlockFeaturesEXT; + }; + + struct PhysicalDeviceInlineUniformBlockPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceInlineUniformBlockPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceInlineUniformBlockPropertiesEXT(uint32_t maxInlineUniformBlockSize_ = {}, uint32_t maxPerStageDescriptorInlineUniformBlocks_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ = {}, uint32_t maxDescriptorSetInlineUniformBlocks_ = {}, uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ = {}) VULKAN_HPP_NOEXCEPT + : maxInlineUniformBlockSize( maxInlineUniformBlockSize_ ), maxPerStageDescriptorInlineUniformBlocks( maxPerStageDescriptorInlineUniformBlocks_ ), maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks( maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks_ ), maxDescriptorSetInlineUniformBlocks( maxDescriptorSetInlineUniformBlocks_ ), maxDescriptorSetUpdateAfterBindInlineUniformBlocks( maxDescriptorSetUpdateAfterBindInlineUniformBlocks_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceInlineUniformBlockPropertiesEXT( PhysicalDeviceInlineUniformBlockPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceInlineUniformBlockPropertiesEXT( VkPhysicalDeviceInlineUniformBlockPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceInlineUniformBlockPropertiesEXT & operator=( VkPhysicalDeviceInlineUniformBlockPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceInlineUniformBlockPropertiesEXT & operator=( PhysicalDeviceInlineUniformBlockPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceInlineUniformBlockPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceInlineUniformBlockPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceInlineUniformBlockPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceInlineUniformBlockPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceInlineUniformBlockPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxInlineUniformBlockSize == rhs.maxInlineUniformBlockSize ) + && ( maxPerStageDescriptorInlineUniformBlocks == rhs.maxPerStageDescriptorInlineUniformBlocks ) + && ( maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks == rhs.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks ) + && ( maxDescriptorSetInlineUniformBlocks == rhs.maxDescriptorSetInlineUniformBlocks ) + && ( maxDescriptorSetUpdateAfterBindInlineUniformBlocks == rhs.maxDescriptorSetUpdateAfterBindInlineUniformBlocks ); + } + + bool operator!=( PhysicalDeviceInlineUniformBlockPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceInlineUniformBlockPropertiesEXT; + void* pNext = {}; + uint32_t maxInlineUniformBlockSize = {}; + uint32_t maxPerStageDescriptorInlineUniformBlocks = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks = {}; + uint32_t maxDescriptorSetInlineUniformBlocks = {}; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks = {}; + + }; + static_assert( sizeof( PhysicalDeviceInlineUniformBlockPropertiesEXT ) == sizeof( VkPhysicalDeviceInlineUniformBlockPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceInlineUniformBlockPropertiesEXT; + }; + + struct PhysicalDeviceLineRasterizationFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ = {}) VULKAN_HPP_NOEXCEPT + : rectangularLines( rectangularLines_ ), bresenhamLines( bresenhamLines_ ), smoothLines( smoothLines_ ), stippledRectangularLines( stippledRectangularLines_ ), stippledBresenhamLines( stippledBresenhamLines_ ), stippledSmoothLines( stippledSmoothLines_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationFeaturesEXT( PhysicalDeviceLineRasterizationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLineRasterizationFeaturesEXT( VkPhysicalDeviceLineRasterizationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceLineRasterizationFeaturesEXT & operator=( VkPhysicalDeviceLineRasterizationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & operator=( PhysicalDeviceLineRasterizationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceLineRasterizationFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 rectangularLines_ ) VULKAN_HPP_NOEXCEPT + { + rectangularLines = rectangularLines_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines_ ) VULKAN_HPP_NOEXCEPT + { + bresenhamLines = bresenhamLines_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 smoothLines_ ) VULKAN_HPP_NOEXCEPT + { + smoothLines = smoothLines_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setStippledRectangularLines( VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledRectangularLines = stippledRectangularLines_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setStippledBresenhamLines( VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledBresenhamLines = stippledBresenhamLines_; + return *this; + } + + PhysicalDeviceLineRasterizationFeaturesEXT & setStippledSmoothLines( VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines_ ) VULKAN_HPP_NOEXCEPT + { + stippledSmoothLines = stippledSmoothLines_; + return *this; + } + + + operator VkPhysicalDeviceLineRasterizationFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLineRasterizationFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceLineRasterizationFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceLineRasterizationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rectangularLines == rhs.rectangularLines ) + && ( bresenhamLines == rhs.bresenhamLines ) + && ( smoothLines == rhs.smoothLines ) + && ( stippledRectangularLines == rhs.stippledRectangularLines ) + && ( stippledBresenhamLines == rhs.stippledBresenhamLines ) + && ( stippledSmoothLines == rhs.stippledSmoothLines ); + } + + bool operator!=( PhysicalDeviceLineRasterizationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 rectangularLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 bresenhamLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 smoothLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledRectangularLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledBresenhamLines = {}; + VULKAN_HPP_NAMESPACE::Bool32 stippledSmoothLines = {}; + + }; + static_assert( sizeof( PhysicalDeviceLineRasterizationFeaturesEXT ) == sizeof( VkPhysicalDeviceLineRasterizationFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceLineRasterizationFeaturesEXT; + }; + + struct PhysicalDeviceLineRasterizationPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceLineRasterizationPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationPropertiesEXT(uint32_t lineSubPixelPrecisionBits_ = {}) VULKAN_HPP_NOEXCEPT + : lineSubPixelPrecisionBits( lineSubPixelPrecisionBits_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceLineRasterizationPropertiesEXT( PhysicalDeviceLineRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceLineRasterizationPropertiesEXT( VkPhysicalDeviceLineRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceLineRasterizationPropertiesEXT & operator=( VkPhysicalDeviceLineRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceLineRasterizationPropertiesEXT & operator=( PhysicalDeviceLineRasterizationPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceLineRasterizationPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceLineRasterizationPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceLineRasterizationPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceLineRasterizationPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceLineRasterizationPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( lineSubPixelPrecisionBits == rhs.lineSubPixelPrecisionBits ); + } + + bool operator!=( PhysicalDeviceLineRasterizationPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceLineRasterizationPropertiesEXT; + void* pNext = {}; + uint32_t lineSubPixelPrecisionBits = {}; + + }; + static_assert( sizeof( PhysicalDeviceLineRasterizationPropertiesEXT ) == sizeof( VkPhysicalDeviceLineRasterizationPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceLineRasterizationPropertiesEXT; + }; + + struct PhysicalDeviceMaintenance3Properties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMaintenance3Properties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance3Properties(uint32_t maxPerSetDescriptors_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize_ = {}) VULKAN_HPP_NOEXCEPT + : maxPerSetDescriptors( maxPerSetDescriptors_ ), maxMemoryAllocationSize( maxMemoryAllocationSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMaintenance3Properties( PhysicalDeviceMaintenance3Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMaintenance3Properties( VkPhysicalDeviceMaintenance3Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMaintenance3Properties & operator=( VkPhysicalDeviceMaintenance3Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMaintenance3Properties & operator=( PhysicalDeviceMaintenance3Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMaintenance3Properties ) ); + return *this; + } + + + operator VkPhysicalDeviceMaintenance3Properties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMaintenance3Properties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMaintenance3Properties const& ) const = default; +#else + bool operator==( PhysicalDeviceMaintenance3Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxPerSetDescriptors == rhs.maxPerSetDescriptors ) + && ( maxMemoryAllocationSize == rhs.maxMemoryAllocationSize ); + } + + bool operator!=( PhysicalDeviceMaintenance3Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMaintenance3Properties; + void* pNext = {}; + uint32_t maxPerSetDescriptors = {}; + VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceMaintenance3Properties ) == sizeof( VkPhysicalDeviceMaintenance3Properties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMaintenance3Properties; + }; + using PhysicalDeviceMaintenance3PropertiesKHR = PhysicalDeviceMaintenance3Properties; + + struct PhysicalDeviceMemoryBudgetPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMemoryBudgetPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryBudgetPropertiesEXT(std::array const& heapBudget_ = {}, std::array const& heapUsage_ = {}) VULKAN_HPP_NOEXCEPT + : heapBudget( heapBudget_ ), heapUsage( heapUsage_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMemoryBudgetPropertiesEXT( PhysicalDeviceMemoryBudgetPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMemoryBudgetPropertiesEXT( VkPhysicalDeviceMemoryBudgetPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMemoryBudgetPropertiesEXT & operator=( VkPhysicalDeviceMemoryBudgetPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMemoryBudgetPropertiesEXT & operator=( PhysicalDeviceMemoryBudgetPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMemoryBudgetPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceMemoryBudgetPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMemoryBudgetPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMemoryBudgetPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceMemoryBudgetPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( heapBudget == rhs.heapBudget ) + && ( heapUsage == rhs.heapUsage ); + } + + bool operator!=( PhysicalDeviceMemoryBudgetPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMemoryBudgetPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D heapBudget = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D heapUsage = {}; + + }; + static_assert( sizeof( PhysicalDeviceMemoryBudgetPropertiesEXT ) == sizeof( VkPhysicalDeviceMemoryBudgetPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMemoryBudgetPropertiesEXT; + }; + + struct PhysicalDeviceMemoryPriorityFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMemoryPriorityFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMemoryPriorityFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 memoryPriority_ = {}) VULKAN_HPP_NOEXCEPT + : memoryPriority( memoryPriority_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMemoryPriorityFeaturesEXT( PhysicalDeviceMemoryPriorityFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMemoryPriorityFeaturesEXT( VkPhysicalDeviceMemoryPriorityFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMemoryPriorityFeaturesEXT & operator=( VkPhysicalDeviceMemoryPriorityFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMemoryPriorityFeaturesEXT & operator=( PhysicalDeviceMemoryPriorityFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMemoryPriorityFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceMemoryPriorityFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMemoryPriorityFeaturesEXT & setMemoryPriority( VULKAN_HPP_NAMESPACE::Bool32 memoryPriority_ ) VULKAN_HPP_NOEXCEPT + { + memoryPriority = memoryPriority_; + return *this; + } + + + operator VkPhysicalDeviceMemoryPriorityFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMemoryPriorityFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMemoryPriorityFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceMemoryPriorityFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memoryPriority == rhs.memoryPriority ); + } + + bool operator!=( PhysicalDeviceMemoryPriorityFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMemoryPriorityFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 memoryPriority = {}; + + }; + static_assert( sizeof( PhysicalDeviceMemoryPriorityFeaturesEXT ) == sizeof( VkPhysicalDeviceMemoryPriorityFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMemoryPriorityFeaturesEXT; + }; + + struct PhysicalDeviceMeshShaderFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMeshShaderFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMeshShaderFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 taskShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 meshShader_ = {}) VULKAN_HPP_NOEXCEPT + : taskShader( taskShader_ ), meshShader( meshShader_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMeshShaderFeaturesNV( PhysicalDeviceMeshShaderFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMeshShaderFeaturesNV( VkPhysicalDeviceMeshShaderFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMeshShaderFeaturesNV & operator=( VkPhysicalDeviceMeshShaderFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV & operator=( PhysicalDeviceMeshShaderFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMeshShaderFeaturesNV ) ); + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV & setTaskShader( VULKAN_HPP_NAMESPACE::Bool32 taskShader_ ) VULKAN_HPP_NOEXCEPT + { + taskShader = taskShader_; + return *this; + } + + PhysicalDeviceMeshShaderFeaturesNV & setMeshShader( VULKAN_HPP_NAMESPACE::Bool32 meshShader_ ) VULKAN_HPP_NOEXCEPT + { + meshShader = meshShader_; + return *this; + } + + + operator VkPhysicalDeviceMeshShaderFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMeshShaderFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMeshShaderFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceMeshShaderFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( taskShader == rhs.taskShader ) + && ( meshShader == rhs.meshShader ); + } + + bool operator!=( PhysicalDeviceMeshShaderFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMeshShaderFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 taskShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 meshShader = {}; + + }; + static_assert( sizeof( PhysicalDeviceMeshShaderFeaturesNV ) == sizeof( VkPhysicalDeviceMeshShaderFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMeshShaderFeaturesNV; + }; + + struct PhysicalDeviceMeshShaderPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMeshShaderPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMeshShaderPropertiesNV(uint32_t maxDrawMeshTasksCount_ = {}, uint32_t maxTaskWorkGroupInvocations_ = {}, std::array const& maxTaskWorkGroupSize_ = {}, uint32_t maxTaskTotalMemorySize_ = {}, uint32_t maxTaskOutputCount_ = {}, uint32_t maxMeshWorkGroupInvocations_ = {}, std::array const& maxMeshWorkGroupSize_ = {}, uint32_t maxMeshTotalMemorySize_ = {}, uint32_t maxMeshOutputVertices_ = {}, uint32_t maxMeshOutputPrimitives_ = {}, uint32_t maxMeshMultiviewViewCount_ = {}, uint32_t meshOutputPerVertexGranularity_ = {}, uint32_t meshOutputPerPrimitiveGranularity_ = {}) VULKAN_HPP_NOEXCEPT + : maxDrawMeshTasksCount( maxDrawMeshTasksCount_ ), maxTaskWorkGroupInvocations( maxTaskWorkGroupInvocations_ ), maxTaskWorkGroupSize( maxTaskWorkGroupSize_ ), maxTaskTotalMemorySize( maxTaskTotalMemorySize_ ), maxTaskOutputCount( maxTaskOutputCount_ ), maxMeshWorkGroupInvocations( maxMeshWorkGroupInvocations_ ), maxMeshWorkGroupSize( maxMeshWorkGroupSize_ ), maxMeshTotalMemorySize( maxMeshTotalMemorySize_ ), maxMeshOutputVertices( maxMeshOutputVertices_ ), maxMeshOutputPrimitives( maxMeshOutputPrimitives_ ), maxMeshMultiviewViewCount( maxMeshMultiviewViewCount_ ), meshOutputPerVertexGranularity( meshOutputPerVertexGranularity_ ), meshOutputPerPrimitiveGranularity( meshOutputPerPrimitiveGranularity_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceMeshShaderPropertiesNV( PhysicalDeviceMeshShaderPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMeshShaderPropertiesNV( VkPhysicalDeviceMeshShaderPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMeshShaderPropertiesNV & operator=( VkPhysicalDeviceMeshShaderPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMeshShaderPropertiesNV & operator=( PhysicalDeviceMeshShaderPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMeshShaderPropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceMeshShaderPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMeshShaderPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMeshShaderPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceMeshShaderPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxDrawMeshTasksCount == rhs.maxDrawMeshTasksCount ) + && ( maxTaskWorkGroupInvocations == rhs.maxTaskWorkGroupInvocations ) + && ( maxTaskWorkGroupSize == rhs.maxTaskWorkGroupSize ) + && ( maxTaskTotalMemorySize == rhs.maxTaskTotalMemorySize ) + && ( maxTaskOutputCount == rhs.maxTaskOutputCount ) + && ( maxMeshWorkGroupInvocations == rhs.maxMeshWorkGroupInvocations ) + && ( maxMeshWorkGroupSize == rhs.maxMeshWorkGroupSize ) + && ( maxMeshTotalMemorySize == rhs.maxMeshTotalMemorySize ) + && ( maxMeshOutputVertices == rhs.maxMeshOutputVertices ) + && ( maxMeshOutputPrimitives == rhs.maxMeshOutputPrimitives ) + && ( maxMeshMultiviewViewCount == rhs.maxMeshMultiviewViewCount ) + && ( meshOutputPerVertexGranularity == rhs.meshOutputPerVertexGranularity ) + && ( meshOutputPerPrimitiveGranularity == rhs.meshOutputPerPrimitiveGranularity ); + } + + bool operator!=( PhysicalDeviceMeshShaderPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMeshShaderPropertiesNV; + void* pNext = {}; + uint32_t maxDrawMeshTasksCount = {}; + uint32_t maxTaskWorkGroupInvocations = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D maxTaskWorkGroupSize = {}; + uint32_t maxTaskTotalMemorySize = {}; + uint32_t maxTaskOutputCount = {}; + uint32_t maxMeshWorkGroupInvocations = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D maxMeshWorkGroupSize = {}; + uint32_t maxMeshTotalMemorySize = {}; + uint32_t maxMeshOutputVertices = {}; + uint32_t maxMeshOutputPrimitives = {}; + uint32_t maxMeshMultiviewViewCount = {}; + uint32_t meshOutputPerVertexGranularity = {}; + uint32_t meshOutputPerPrimitiveGranularity = {}; + + }; + static_assert( sizeof( PhysicalDeviceMeshShaderPropertiesNV ) == sizeof( VkPhysicalDeviceMeshShaderPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMeshShaderPropertiesNV; + }; + + struct PhysicalDeviceMultiviewFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMultiviewFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewFeatures(VULKAN_HPP_NAMESPACE::Bool32 multiview_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader_ = {}) VULKAN_HPP_NOEXCEPT + : multiview( multiview_ ), multiviewGeometryShader( multiviewGeometryShader_ ), multiviewTessellationShader( multiviewTessellationShader_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewFeatures( PhysicalDeviceMultiviewFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMultiviewFeatures( VkPhysicalDeviceMultiviewFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMultiviewFeatures & operator=( VkPhysicalDeviceMultiviewFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMultiviewFeatures & operator=( PhysicalDeviceMultiviewFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMultiviewFeatures ) ); + return *this; + } + + PhysicalDeviceMultiviewFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceMultiviewFeatures & setMultiview( VULKAN_HPP_NAMESPACE::Bool32 multiview_ ) VULKAN_HPP_NOEXCEPT + { + multiview = multiview_; + return *this; + } + + PhysicalDeviceMultiviewFeatures & setMultiviewGeometryShader( VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader_ ) VULKAN_HPP_NOEXCEPT + { + multiviewGeometryShader = multiviewGeometryShader_; + return *this; + } + + PhysicalDeviceMultiviewFeatures & setMultiviewTessellationShader( VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader_ ) VULKAN_HPP_NOEXCEPT + { + multiviewTessellationShader = multiviewTessellationShader_; + return *this; + } + + + operator VkPhysicalDeviceMultiviewFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMultiviewFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMultiviewFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceMultiviewFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( multiview == rhs.multiview ) + && ( multiviewGeometryShader == rhs.multiviewGeometryShader ) + && ( multiviewTessellationShader == rhs.multiviewTessellationShader ); + } + + bool operator!=( PhysicalDeviceMultiviewFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMultiviewFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiview = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader = {}; + + }; + static_assert( sizeof( PhysicalDeviceMultiviewFeatures ) == sizeof( VkPhysicalDeviceMultiviewFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMultiviewFeatures; + }; + using PhysicalDeviceMultiviewFeaturesKHR = PhysicalDeviceMultiviewFeatures; + + struct PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(VULKAN_HPP_NAMESPACE::Bool32 perViewPositionAllComponents_ = {}) VULKAN_HPP_NOEXCEPT + : perViewPositionAllComponents( perViewPositionAllComponents_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX( VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX & operator=( VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX & operator=( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ) ); + return *this; + } + + + operator VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& ) const = default; +#else + bool operator==( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( perViewPositionAllComponents == rhs.perViewPositionAllComponents ); + } + + bool operator!=( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 perViewPositionAllComponents = {}; + + }; + static_assert( sizeof( PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ) == sizeof( VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + }; + + struct PhysicalDeviceMultiviewProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceMultiviewProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewProperties(uint32_t maxMultiviewViewCount_ = {}, uint32_t maxMultiviewInstanceIndex_ = {}) VULKAN_HPP_NOEXCEPT + : maxMultiviewViewCount( maxMultiviewViewCount_ ), maxMultiviewInstanceIndex( maxMultiviewInstanceIndex_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceMultiviewProperties( PhysicalDeviceMultiviewProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceMultiviewProperties( VkPhysicalDeviceMultiviewProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceMultiviewProperties & operator=( VkPhysicalDeviceMultiviewProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceMultiviewProperties & operator=( PhysicalDeviceMultiviewProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceMultiviewProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceMultiviewProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceMultiviewProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceMultiviewProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceMultiviewProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxMultiviewViewCount == rhs.maxMultiviewViewCount ) + && ( maxMultiviewInstanceIndex == rhs.maxMultiviewInstanceIndex ); + } + + bool operator!=( PhysicalDeviceMultiviewProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceMultiviewProperties; + void* pNext = {}; + uint32_t maxMultiviewViewCount = {}; + uint32_t maxMultiviewInstanceIndex = {}; + + }; + static_assert( sizeof( PhysicalDeviceMultiviewProperties ) == sizeof( VkPhysicalDeviceMultiviewProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceMultiviewProperties; + }; + using PhysicalDeviceMultiviewPropertiesKHR = PhysicalDeviceMultiviewProperties; + + struct PhysicalDevicePCIBusInfoPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePciBusInfoPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePCIBusInfoPropertiesEXT(uint32_t pciDomain_ = {}, uint32_t pciBus_ = {}, uint32_t pciDevice_ = {}, uint32_t pciFunction_ = {}) VULKAN_HPP_NOEXCEPT + : pciDomain( pciDomain_ ), pciBus( pciBus_ ), pciDevice( pciDevice_ ), pciFunction( pciFunction_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePCIBusInfoPropertiesEXT( PhysicalDevicePCIBusInfoPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePCIBusInfoPropertiesEXT( VkPhysicalDevicePCIBusInfoPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePCIBusInfoPropertiesEXT & operator=( VkPhysicalDevicePCIBusInfoPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePCIBusInfoPropertiesEXT & operator=( PhysicalDevicePCIBusInfoPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePCIBusInfoPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDevicePCIBusInfoPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePCIBusInfoPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePCIBusInfoPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDevicePCIBusInfoPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pciDomain == rhs.pciDomain ) + && ( pciBus == rhs.pciBus ) + && ( pciDevice == rhs.pciDevice ) + && ( pciFunction == rhs.pciFunction ); + } + + bool operator!=( PhysicalDevicePCIBusInfoPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePciBusInfoPropertiesEXT; + void* pNext = {}; + uint32_t pciDomain = {}; + uint32_t pciBus = {}; + uint32_t pciDevice = {}; + uint32_t pciFunction = {}; + + }; + static_assert( sizeof( PhysicalDevicePCIBusInfoPropertiesEXT ) == sizeof( VkPhysicalDevicePCIBusInfoPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePCIBusInfoPropertiesEXT; + }; + + struct PhysicalDevicePerformanceQueryFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePerformanceQueryFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 performanceCounterQueryPools_ = {}, VULKAN_HPP_NAMESPACE::Bool32 performanceCounterMultipleQueryPools_ = {}) VULKAN_HPP_NOEXCEPT + : performanceCounterQueryPools( performanceCounterQueryPools_ ), performanceCounterMultipleQueryPools( performanceCounterMultipleQueryPools_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryFeaturesKHR( PhysicalDevicePerformanceQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePerformanceQueryFeaturesKHR( VkPhysicalDevicePerformanceQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePerformanceQueryFeaturesKHR & operator=( VkPhysicalDevicePerformanceQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePerformanceQueryFeaturesKHR & operator=( PhysicalDevicePerformanceQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePerformanceQueryFeaturesKHR ) ); + return *this; + } + + PhysicalDevicePerformanceQueryFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePerformanceQueryFeaturesKHR & setPerformanceCounterQueryPools( VULKAN_HPP_NAMESPACE::Bool32 performanceCounterQueryPools_ ) VULKAN_HPP_NOEXCEPT + { + performanceCounterQueryPools = performanceCounterQueryPools_; + return *this; + } + + PhysicalDevicePerformanceQueryFeaturesKHR & setPerformanceCounterMultipleQueryPools( VULKAN_HPP_NAMESPACE::Bool32 performanceCounterMultipleQueryPools_ ) VULKAN_HPP_NOEXCEPT + { + performanceCounterMultipleQueryPools = performanceCounterMultipleQueryPools_; + return *this; + } + + + operator VkPhysicalDevicePerformanceQueryFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePerformanceQueryFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePerformanceQueryFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePerformanceQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( performanceCounterQueryPools == rhs.performanceCounterQueryPools ) + && ( performanceCounterMultipleQueryPools == rhs.performanceCounterMultipleQueryPools ); + } + + bool operator!=( PhysicalDevicePerformanceQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePerformanceQueryFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 performanceCounterQueryPools = {}; + VULKAN_HPP_NAMESPACE::Bool32 performanceCounterMultipleQueryPools = {}; + + }; + static_assert( sizeof( PhysicalDevicePerformanceQueryFeaturesKHR ) == sizeof( VkPhysicalDevicePerformanceQueryFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePerformanceQueryFeaturesKHR; + }; + + struct PhysicalDevicePerformanceQueryPropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePerformanceQueryPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryPropertiesKHR(VULKAN_HPP_NAMESPACE::Bool32 allowCommandBufferQueryCopies_ = {}) VULKAN_HPP_NOEXCEPT + : allowCommandBufferQueryCopies( allowCommandBufferQueryCopies_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePerformanceQueryPropertiesKHR( PhysicalDevicePerformanceQueryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePerformanceQueryPropertiesKHR( VkPhysicalDevicePerformanceQueryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePerformanceQueryPropertiesKHR & operator=( VkPhysicalDevicePerformanceQueryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePerformanceQueryPropertiesKHR & operator=( PhysicalDevicePerformanceQueryPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePerformanceQueryPropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDevicePerformanceQueryPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePerformanceQueryPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePerformanceQueryPropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePerformanceQueryPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( allowCommandBufferQueryCopies == rhs.allowCommandBufferQueryCopies ); + } + + bool operator!=( PhysicalDevicePerformanceQueryPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePerformanceQueryPropertiesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 allowCommandBufferQueryCopies = {}; + + }; + static_assert( sizeof( PhysicalDevicePerformanceQueryPropertiesKHR ) == sizeof( VkPhysicalDevicePerformanceQueryPropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePerformanceQueryPropertiesKHR; + }; + + struct PhysicalDevicePipelineCreationCacheControlFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineCreationCacheControlFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineCreationCacheControlFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 pipelineCreationCacheControl_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineCreationCacheControl( pipelineCreationCacheControl_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineCreationCacheControlFeaturesEXT( PhysicalDevicePipelineCreationCacheControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePipelineCreationCacheControlFeaturesEXT( VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePipelineCreationCacheControlFeaturesEXT & operator=( VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePipelineCreationCacheControlFeaturesEXT & operator=( PhysicalDevicePipelineCreationCacheControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePipelineCreationCacheControlFeaturesEXT ) ); + return *this; + } + + PhysicalDevicePipelineCreationCacheControlFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePipelineCreationCacheControlFeaturesEXT & setPipelineCreationCacheControl( VULKAN_HPP_NAMESPACE::Bool32 pipelineCreationCacheControl_ ) VULKAN_HPP_NOEXCEPT + { + pipelineCreationCacheControl = pipelineCreationCacheControl_; + return *this; + } + + + operator VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePipelineCreationCacheControlFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDevicePipelineCreationCacheControlFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineCreationCacheControl == rhs.pipelineCreationCacheControl ); + } + + bool operator!=( PhysicalDevicePipelineCreationCacheControlFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineCreationCacheControlFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineCreationCacheControl = {}; + + }; + static_assert( sizeof( PhysicalDevicePipelineCreationCacheControlFeaturesEXT ) == sizeof( VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePipelineCreationCacheControlFeaturesEXT; + }; + + struct PhysicalDevicePipelineExecutablePropertiesFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineExecutablePropertiesFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 pipelineExecutableInfo_ = {}) VULKAN_HPP_NOEXCEPT + : pipelineExecutableInfo( pipelineExecutableInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePipelineExecutablePropertiesFeaturesKHR( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR( VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR & operator=( VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR & operator=( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR ) ); + return *this; + } + + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePipelineExecutablePropertiesFeaturesKHR & setPipelineExecutableInfo( VULKAN_HPP_NAMESPACE::Bool32 pipelineExecutableInfo_ ) VULKAN_HPP_NOEXCEPT + { + pipelineExecutableInfo = pipelineExecutableInfo_; + return *this; + } + + + operator VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pipelineExecutableInfo == rhs.pipelineExecutableInfo ); + } + + bool operator!=( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 pipelineExecutableInfo = {}; + + }; + static_assert( sizeof( PhysicalDevicePipelineExecutablePropertiesFeaturesKHR ) == sizeof( VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + }; + + struct PhysicalDevicePointClippingProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePointClippingProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePointClippingProperties(VULKAN_HPP_NAMESPACE::PointClippingBehavior pointClippingBehavior_ = VULKAN_HPP_NAMESPACE::PointClippingBehavior::eAllClipPlanes) VULKAN_HPP_NOEXCEPT + : pointClippingBehavior( pointClippingBehavior_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePointClippingProperties( PhysicalDevicePointClippingProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePointClippingProperties( VkPhysicalDevicePointClippingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePointClippingProperties & operator=( VkPhysicalDevicePointClippingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePointClippingProperties & operator=( PhysicalDevicePointClippingProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePointClippingProperties ) ); + return *this; + } + + + operator VkPhysicalDevicePointClippingProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePointClippingProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePointClippingProperties const& ) const = default; +#else + bool operator==( PhysicalDevicePointClippingProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pointClippingBehavior == rhs.pointClippingBehavior ); + } + + bool operator!=( PhysicalDevicePointClippingProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePointClippingProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PointClippingBehavior pointClippingBehavior = VULKAN_HPP_NAMESPACE::PointClippingBehavior::eAllClipPlanes; + + }; + static_assert( sizeof( PhysicalDevicePointClippingProperties ) == sizeof( VkPhysicalDevicePointClippingProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePointClippingProperties; + }; + using PhysicalDevicePointClippingPropertiesKHR = PhysicalDevicePointClippingProperties; + +#ifdef VK_ENABLE_BETA_EXTENSIONS + struct PhysicalDevicePortabilitySubsetFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePortabilitySubsetFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePortabilitySubsetFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 constantAlphaColorBlendFactors_ = {}, VULKAN_HPP_NAMESPACE::Bool32 events_ = {}, VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatReinterpretation_ = {}, VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatSwizzle_ = {}, VULKAN_HPP_NAMESPACE::Bool32 imageView2DOn3DImage_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multisampleArrayImage_ = {}, VULKAN_HPP_NAMESPACE::Bool32 mutableComparisonSamplers_ = {}, VULKAN_HPP_NAMESPACE::Bool32 pointPolygons_ = {}, VULKAN_HPP_NAMESPACE::Bool32 samplerMipLodBias_ = {}, VULKAN_HPP_NAMESPACE::Bool32 separateStencilMaskRef_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampleRateInterpolationFunctions_ = {}, VULKAN_HPP_NAMESPACE::Bool32 tessellationIsolines_ = {}, VULKAN_HPP_NAMESPACE::Bool32 tessellationPointMode_ = {}, VULKAN_HPP_NAMESPACE::Bool32 triangleFans_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeAccessBeyondStride_ = {}) VULKAN_HPP_NOEXCEPT + : constantAlphaColorBlendFactors( constantAlphaColorBlendFactors_ ), events( events_ ), imageViewFormatReinterpretation( imageViewFormatReinterpretation_ ), imageViewFormatSwizzle( imageViewFormatSwizzle_ ), imageView2DOn3DImage( imageView2DOn3DImage_ ), multisampleArrayImage( multisampleArrayImage_ ), mutableComparisonSamplers( mutableComparisonSamplers_ ), pointPolygons( pointPolygons_ ), samplerMipLodBias( samplerMipLodBias_ ), separateStencilMaskRef( separateStencilMaskRef_ ), shaderSampleRateInterpolationFunctions( shaderSampleRateInterpolationFunctions_ ), tessellationIsolines( tessellationIsolines_ ), tessellationPointMode( tessellationPointMode_ ), triangleFans( triangleFans_ ), vertexAttributeAccessBeyondStride( vertexAttributeAccessBeyondStride_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePortabilitySubsetFeaturesKHR( PhysicalDevicePortabilitySubsetFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePortabilitySubsetFeaturesKHR( VkPhysicalDevicePortabilitySubsetFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePortabilitySubsetFeaturesKHR & operator=( VkPhysicalDevicePortabilitySubsetFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & operator=( PhysicalDevicePortabilitySubsetFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePortabilitySubsetFeaturesKHR ) ); + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setConstantAlphaColorBlendFactors( VULKAN_HPP_NAMESPACE::Bool32 constantAlphaColorBlendFactors_ ) VULKAN_HPP_NOEXCEPT + { + constantAlphaColorBlendFactors = constantAlphaColorBlendFactors_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setEvents( VULKAN_HPP_NAMESPACE::Bool32 events_ ) VULKAN_HPP_NOEXCEPT + { + events = events_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setImageViewFormatReinterpretation( VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatReinterpretation_ ) VULKAN_HPP_NOEXCEPT + { + imageViewFormatReinterpretation = imageViewFormatReinterpretation_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setImageViewFormatSwizzle( VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatSwizzle_ ) VULKAN_HPP_NOEXCEPT + { + imageViewFormatSwizzle = imageViewFormatSwizzle_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setImageView2DOn3DImage( VULKAN_HPP_NAMESPACE::Bool32 imageView2DOn3DImage_ ) VULKAN_HPP_NOEXCEPT + { + imageView2DOn3DImage = imageView2DOn3DImage_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setMultisampleArrayImage( VULKAN_HPP_NAMESPACE::Bool32 multisampleArrayImage_ ) VULKAN_HPP_NOEXCEPT + { + multisampleArrayImage = multisampleArrayImage_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setMutableComparisonSamplers( VULKAN_HPP_NAMESPACE::Bool32 mutableComparisonSamplers_ ) VULKAN_HPP_NOEXCEPT + { + mutableComparisonSamplers = mutableComparisonSamplers_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setPointPolygons( VULKAN_HPP_NAMESPACE::Bool32 pointPolygons_ ) VULKAN_HPP_NOEXCEPT + { + pointPolygons = pointPolygons_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setSamplerMipLodBias( VULKAN_HPP_NAMESPACE::Bool32 samplerMipLodBias_ ) VULKAN_HPP_NOEXCEPT + { + samplerMipLodBias = samplerMipLodBias_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setSeparateStencilMaskRef( VULKAN_HPP_NAMESPACE::Bool32 separateStencilMaskRef_ ) VULKAN_HPP_NOEXCEPT + { + separateStencilMaskRef = separateStencilMaskRef_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setShaderSampleRateInterpolationFunctions( VULKAN_HPP_NAMESPACE::Bool32 shaderSampleRateInterpolationFunctions_ ) VULKAN_HPP_NOEXCEPT + { + shaderSampleRateInterpolationFunctions = shaderSampleRateInterpolationFunctions_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setTessellationIsolines( VULKAN_HPP_NAMESPACE::Bool32 tessellationIsolines_ ) VULKAN_HPP_NOEXCEPT + { + tessellationIsolines = tessellationIsolines_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setTessellationPointMode( VULKAN_HPP_NAMESPACE::Bool32 tessellationPointMode_ ) VULKAN_HPP_NOEXCEPT + { + tessellationPointMode = tessellationPointMode_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setTriangleFans( VULKAN_HPP_NAMESPACE::Bool32 triangleFans_ ) VULKAN_HPP_NOEXCEPT + { + triangleFans = triangleFans_; + return *this; + } + + PhysicalDevicePortabilitySubsetFeaturesKHR & setVertexAttributeAccessBeyondStride( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeAccessBeyondStride_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeAccessBeyondStride = vertexAttributeAccessBeyondStride_; + return *this; + } + + + operator VkPhysicalDevicePortabilitySubsetFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePortabilitySubsetFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePortabilitySubsetFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePortabilitySubsetFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( constantAlphaColorBlendFactors == rhs.constantAlphaColorBlendFactors ) + && ( events == rhs.events ) + && ( imageViewFormatReinterpretation == rhs.imageViewFormatReinterpretation ) + && ( imageViewFormatSwizzle == rhs.imageViewFormatSwizzle ) + && ( imageView2DOn3DImage == rhs.imageView2DOn3DImage ) + && ( multisampleArrayImage == rhs.multisampleArrayImage ) + && ( mutableComparisonSamplers == rhs.mutableComparisonSamplers ) + && ( pointPolygons == rhs.pointPolygons ) + && ( samplerMipLodBias == rhs.samplerMipLodBias ) + && ( separateStencilMaskRef == rhs.separateStencilMaskRef ) + && ( shaderSampleRateInterpolationFunctions == rhs.shaderSampleRateInterpolationFunctions ) + && ( tessellationIsolines == rhs.tessellationIsolines ) + && ( tessellationPointMode == rhs.tessellationPointMode ) + && ( triangleFans == rhs.triangleFans ) + && ( vertexAttributeAccessBeyondStride == rhs.vertexAttributeAccessBeyondStride ); + } + + bool operator!=( PhysicalDevicePortabilitySubsetFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePortabilitySubsetFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 constantAlphaColorBlendFactors = {}; + VULKAN_HPP_NAMESPACE::Bool32 events = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatReinterpretation = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageViewFormatSwizzle = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageView2DOn3DImage = {}; + VULKAN_HPP_NAMESPACE::Bool32 multisampleArrayImage = {}; + VULKAN_HPP_NAMESPACE::Bool32 mutableComparisonSamplers = {}; + VULKAN_HPP_NAMESPACE::Bool32 pointPolygons = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerMipLodBias = {}; + VULKAN_HPP_NAMESPACE::Bool32 separateStencilMaskRef = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampleRateInterpolationFunctions = {}; + VULKAN_HPP_NAMESPACE::Bool32 tessellationIsolines = {}; + VULKAN_HPP_NAMESPACE::Bool32 tessellationPointMode = {}; + VULKAN_HPP_NAMESPACE::Bool32 triangleFans = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeAccessBeyondStride = {}; + + }; + static_assert( sizeof( PhysicalDevicePortabilitySubsetFeaturesKHR ) == sizeof( VkPhysicalDevicePortabilitySubsetFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePortabilitySubsetFeaturesKHR; + }; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + +#ifdef VK_ENABLE_BETA_EXTENSIONS + struct PhysicalDevicePortabilitySubsetPropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePortabilitySubsetPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePortabilitySubsetPropertiesKHR(uint32_t minVertexInputBindingStrideAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : minVertexInputBindingStrideAlignment( minVertexInputBindingStrideAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePortabilitySubsetPropertiesKHR( PhysicalDevicePortabilitySubsetPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePortabilitySubsetPropertiesKHR( VkPhysicalDevicePortabilitySubsetPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePortabilitySubsetPropertiesKHR & operator=( VkPhysicalDevicePortabilitySubsetPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePortabilitySubsetPropertiesKHR & operator=( PhysicalDevicePortabilitySubsetPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePortabilitySubsetPropertiesKHR ) ); + return *this; + } + + PhysicalDevicePortabilitySubsetPropertiesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePortabilitySubsetPropertiesKHR & setMinVertexInputBindingStrideAlignment( uint32_t minVertexInputBindingStrideAlignment_ ) VULKAN_HPP_NOEXCEPT + { + minVertexInputBindingStrideAlignment = minVertexInputBindingStrideAlignment_; + return *this; + } + + + operator VkPhysicalDevicePortabilitySubsetPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePortabilitySubsetPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePortabilitySubsetPropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePortabilitySubsetPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minVertexInputBindingStrideAlignment == rhs.minVertexInputBindingStrideAlignment ); + } + + bool operator!=( PhysicalDevicePortabilitySubsetPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePortabilitySubsetPropertiesKHR; + void* pNext = {}; + uint32_t minVertexInputBindingStrideAlignment = {}; + + }; + static_assert( sizeof( PhysicalDevicePortabilitySubsetPropertiesKHR ) == sizeof( VkPhysicalDevicePortabilitySubsetPropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePortabilitySubsetPropertiesKHR; + }; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + + struct PhysicalDevicePrivateDataFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePrivateDataFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePrivateDataFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 privateData_ = {}) VULKAN_HPP_NOEXCEPT + : privateData( privateData_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePrivateDataFeaturesEXT( PhysicalDevicePrivateDataFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePrivateDataFeaturesEXT( VkPhysicalDevicePrivateDataFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePrivateDataFeaturesEXT & operator=( VkPhysicalDevicePrivateDataFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePrivateDataFeaturesEXT & operator=( PhysicalDevicePrivateDataFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePrivateDataFeaturesEXT ) ); + return *this; + } + + PhysicalDevicePrivateDataFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDevicePrivateDataFeaturesEXT & setPrivateData( VULKAN_HPP_NAMESPACE::Bool32 privateData_ ) VULKAN_HPP_NOEXCEPT + { + privateData = privateData_; + return *this; + } + + + operator VkPhysicalDevicePrivateDataFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePrivateDataFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePrivateDataFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDevicePrivateDataFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( privateData == rhs.privateData ); + } + + bool operator!=( PhysicalDevicePrivateDataFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePrivateDataFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 privateData = {}; + + }; + static_assert( sizeof( PhysicalDevicePrivateDataFeaturesEXT ) == sizeof( VkPhysicalDevicePrivateDataFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePrivateDataFeaturesEXT; + }; + + struct PhysicalDeviceProtectedMemoryFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceProtectedMemoryFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryFeatures(VULKAN_HPP_NAMESPACE::Bool32 protectedMemory_ = {}) VULKAN_HPP_NOEXCEPT + : protectedMemory( protectedMemory_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryFeatures( PhysicalDeviceProtectedMemoryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProtectedMemoryFeatures( VkPhysicalDeviceProtectedMemoryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceProtectedMemoryFeatures & operator=( VkPhysicalDeviceProtectedMemoryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceProtectedMemoryFeatures & operator=( PhysicalDeviceProtectedMemoryFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceProtectedMemoryFeatures ) ); + return *this; + } + + PhysicalDeviceProtectedMemoryFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceProtectedMemoryFeatures & setProtectedMemory( VULKAN_HPP_NAMESPACE::Bool32 protectedMemory_ ) VULKAN_HPP_NOEXCEPT + { + protectedMemory = protectedMemory_; + return *this; + } + + + operator VkPhysicalDeviceProtectedMemoryFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProtectedMemoryFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceProtectedMemoryFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceProtectedMemoryFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedMemory == rhs.protectedMemory ); + } + + bool operator!=( PhysicalDeviceProtectedMemoryFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceProtectedMemoryFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 protectedMemory = {}; + + }; + static_assert( sizeof( PhysicalDeviceProtectedMemoryFeatures ) == sizeof( VkPhysicalDeviceProtectedMemoryFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceProtectedMemoryFeatures; + }; + + struct PhysicalDeviceProtectedMemoryProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceProtectedMemoryProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryProperties(VULKAN_HPP_NAMESPACE::Bool32 protectedNoFault_ = {}) VULKAN_HPP_NOEXCEPT + : protectedNoFault( protectedNoFault_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceProtectedMemoryProperties( PhysicalDeviceProtectedMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceProtectedMemoryProperties( VkPhysicalDeviceProtectedMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceProtectedMemoryProperties & operator=( VkPhysicalDeviceProtectedMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceProtectedMemoryProperties & operator=( PhysicalDeviceProtectedMemoryProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceProtectedMemoryProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceProtectedMemoryProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceProtectedMemoryProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceProtectedMemoryProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceProtectedMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedNoFault == rhs.protectedNoFault ); + } + + bool operator!=( PhysicalDeviceProtectedMemoryProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceProtectedMemoryProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 protectedNoFault = {}; + + }; + static_assert( sizeof( PhysicalDeviceProtectedMemoryProperties ) == sizeof( VkPhysicalDeviceProtectedMemoryProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceProtectedMemoryProperties; + }; + + struct PhysicalDevicePushDescriptorPropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDevicePushDescriptorPropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorPropertiesKHR(uint32_t maxPushDescriptors_ = {}) VULKAN_HPP_NOEXCEPT + : maxPushDescriptors( maxPushDescriptors_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDevicePushDescriptorPropertiesKHR( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDevicePushDescriptorPropertiesKHR( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDevicePushDescriptorPropertiesKHR & operator=( VkPhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDevicePushDescriptorPropertiesKHR & operator=( PhysicalDevicePushDescriptorPropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDevicePushDescriptorPropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDevicePushDescriptorPropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDevicePushDescriptorPropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxPushDescriptors == rhs.maxPushDescriptors ); + } + + bool operator!=( PhysicalDevicePushDescriptorPropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDevicePushDescriptorPropertiesKHR; + void* pNext = {}; + uint32_t maxPushDescriptors = {}; + + }; + static_assert( sizeof( PhysicalDevicePushDescriptorPropertiesKHR ) == sizeof( VkPhysicalDevicePushDescriptorPropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDevicePushDescriptorPropertiesKHR; + }; + + struct PhysicalDeviceRayQueryFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayQueryFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayQueryFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ = {}) VULKAN_HPP_NOEXCEPT + : rayQuery( rayQuery_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayQueryFeaturesKHR( PhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRayQueryFeaturesKHR( VkPhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRayQueryFeaturesKHR & operator=( VkPhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRayQueryFeaturesKHR & operator=( PhysicalDeviceRayQueryFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRayQueryFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceRayQueryFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRayQueryFeaturesKHR & setRayQuery( VULKAN_HPP_NAMESPACE::Bool32 rayQuery_ ) VULKAN_HPP_NOEXCEPT + { + rayQuery = rayQuery_; + return *this; + } + + + operator VkPhysicalDeviceRayQueryFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRayQueryFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRayQueryFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceRayQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rayQuery == rhs.rayQuery ); + } + + bool operator!=( PhysicalDeviceRayQueryFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayQueryFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayQuery = {}; + + }; + static_assert( sizeof( PhysicalDeviceRayQueryFeaturesKHR ) == sizeof( VkPhysicalDeviceRayQueryFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRayQueryFeaturesKHR; + }; + + struct PhysicalDeviceRayTracingPipelineFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelineFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling_ = {}) VULKAN_HPP_NOEXCEPT + : rayTracingPipeline( rayTracingPipeline_ ), rayTracingPipelineShaderGroupHandleCaptureReplay( rayTracingPipelineShaderGroupHandleCaptureReplay_ ), rayTracingPipelineShaderGroupHandleCaptureReplayMixed( rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ ), rayTracingPipelineTraceRaysIndirect( rayTracingPipelineTraceRaysIndirect_ ), rayTraversalPrimitiveCulling( rayTraversalPrimitiveCulling_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelineFeaturesKHR( PhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRayTracingPipelineFeaturesKHR( VkPhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRayTracingPipelineFeaturesKHR & operator=( VkPhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & operator=( PhysicalDeviceRayTracingPipelineFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRayTracingPipelineFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipeline( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline_ ) VULKAN_HPP_NOEXCEPT + { + rayTracingPipeline = rayTracingPipeline_; + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineShaderGroupHandleCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + rayTracingPipelineShaderGroupHandleCaptureReplay = rayTracingPipelineShaderGroupHandleCaptureReplay_; + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineShaderGroupHandleCaptureReplayMixed( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed_ ) VULKAN_HPP_NOEXCEPT + { + rayTracingPipelineShaderGroupHandleCaptureReplayMixed = rayTracingPipelineShaderGroupHandleCaptureReplayMixed_; + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTracingPipelineTraceRaysIndirect( VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect_ ) VULKAN_HPP_NOEXCEPT + { + rayTracingPipelineTraceRaysIndirect = rayTracingPipelineTraceRaysIndirect_; + return *this; + } + + PhysicalDeviceRayTracingPipelineFeaturesKHR & setRayTraversalPrimitiveCulling( VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling_ ) VULKAN_HPP_NOEXCEPT + { + rayTraversalPrimitiveCulling = rayTraversalPrimitiveCulling_; + return *this; + } + + + operator VkPhysicalDeviceRayTracingPipelineFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRayTracingPipelineFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRayTracingPipelineFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceRayTracingPipelineFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rayTracingPipeline == rhs.rayTracingPipeline ) + && ( rayTracingPipelineShaderGroupHandleCaptureReplay == rhs.rayTracingPipelineShaderGroupHandleCaptureReplay ) + && ( rayTracingPipelineShaderGroupHandleCaptureReplayMixed == rhs.rayTracingPipelineShaderGroupHandleCaptureReplayMixed ) + && ( rayTracingPipelineTraceRaysIndirect == rhs.rayTracingPipelineTraceRaysIndirect ) + && ( rayTraversalPrimitiveCulling == rhs.rayTraversalPrimitiveCulling ); + } + + bool operator!=( PhysicalDeviceRayTracingPipelineFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPipelineFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipeline = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTracingPipelineTraceRaysIndirect = {}; + VULKAN_HPP_NAMESPACE::Bool32 rayTraversalPrimitiveCulling = {}; + + }; + static_assert( sizeof( PhysicalDeviceRayTracingPipelineFeaturesKHR ) == sizeof( VkPhysicalDeviceRayTracingPipelineFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRayTracingPipelineFeaturesKHR; + }; + + struct PhysicalDeviceRayTracingPipelinePropertiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelinePropertiesKHR(uint32_t shaderGroupHandleSize_ = {}, uint32_t maxRayRecursionDepth_ = {}, uint32_t maxShaderGroupStride_ = {}, uint32_t shaderGroupBaseAlignment_ = {}, uint32_t shaderGroupHandleCaptureReplaySize_ = {}, uint32_t maxRayDispatchInvocationCount_ = {}, uint32_t shaderGroupHandleAlignment_ = {}, uint32_t maxRayHitAttributeSize_ = {}) VULKAN_HPP_NOEXCEPT + : shaderGroupHandleSize( shaderGroupHandleSize_ ), maxRayRecursionDepth( maxRayRecursionDepth_ ), maxShaderGroupStride( maxShaderGroupStride_ ), shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ), shaderGroupHandleCaptureReplaySize( shaderGroupHandleCaptureReplaySize_ ), maxRayDispatchInvocationCount( maxRayDispatchInvocationCount_ ), shaderGroupHandleAlignment( shaderGroupHandleAlignment_ ), maxRayHitAttributeSize( maxRayHitAttributeSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPipelinePropertiesKHR( PhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRayTracingPipelinePropertiesKHR( VkPhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRayTracingPipelinePropertiesKHR & operator=( VkPhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRayTracingPipelinePropertiesKHR & operator=( PhysicalDeviceRayTracingPipelinePropertiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRayTracingPipelinePropertiesKHR ) ); + return *this; + } + + + operator VkPhysicalDeviceRayTracingPipelinePropertiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRayTracingPipelinePropertiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRayTracingPipelinePropertiesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceRayTracingPipelinePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderGroupHandleSize == rhs.shaderGroupHandleSize ) + && ( maxRayRecursionDepth == rhs.maxRayRecursionDepth ) + && ( maxShaderGroupStride == rhs.maxShaderGroupStride ) + && ( shaderGroupBaseAlignment == rhs.shaderGroupBaseAlignment ) + && ( shaderGroupHandleCaptureReplaySize == rhs.shaderGroupHandleCaptureReplaySize ) + && ( maxRayDispatchInvocationCount == rhs.maxRayDispatchInvocationCount ) + && ( shaderGroupHandleAlignment == rhs.shaderGroupHandleAlignment ) + && ( maxRayHitAttributeSize == rhs.maxRayHitAttributeSize ); + } + + bool operator!=( PhysicalDeviceRayTracingPipelinePropertiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPipelinePropertiesKHR; + void* pNext = {}; + uint32_t shaderGroupHandleSize = {}; + uint32_t maxRayRecursionDepth = {}; + uint32_t maxShaderGroupStride = {}; + uint32_t shaderGroupBaseAlignment = {}; + uint32_t shaderGroupHandleCaptureReplaySize = {}; + uint32_t maxRayDispatchInvocationCount = {}; + uint32_t shaderGroupHandleAlignment = {}; + uint32_t maxRayHitAttributeSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceRayTracingPipelinePropertiesKHR ) == sizeof( VkPhysicalDeviceRayTracingPipelinePropertiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRayTracingPipelinePropertiesKHR; + }; + + struct PhysicalDeviceRayTracingPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRayTracingPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPropertiesNV(uint32_t shaderGroupHandleSize_ = {}, uint32_t maxRecursionDepth_ = {}, uint32_t maxShaderGroupStride_ = {}, uint32_t shaderGroupBaseAlignment_ = {}, uint64_t maxGeometryCount_ = {}, uint64_t maxInstanceCount_ = {}, uint64_t maxTriangleCount_ = {}, uint32_t maxDescriptorSetAccelerationStructures_ = {}) VULKAN_HPP_NOEXCEPT + : shaderGroupHandleSize( shaderGroupHandleSize_ ), maxRecursionDepth( maxRecursionDepth_ ), maxShaderGroupStride( maxShaderGroupStride_ ), shaderGroupBaseAlignment( shaderGroupBaseAlignment_ ), maxGeometryCount( maxGeometryCount_ ), maxInstanceCount( maxInstanceCount_ ), maxTriangleCount( maxTriangleCount_ ), maxDescriptorSetAccelerationStructures( maxDescriptorSetAccelerationStructures_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRayTracingPropertiesNV( PhysicalDeviceRayTracingPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRayTracingPropertiesNV( VkPhysicalDeviceRayTracingPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRayTracingPropertiesNV & operator=( VkPhysicalDeviceRayTracingPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRayTracingPropertiesNV & operator=( PhysicalDeviceRayTracingPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRayTracingPropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceRayTracingPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRayTracingPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRayTracingPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceRayTracingPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderGroupHandleSize == rhs.shaderGroupHandleSize ) + && ( maxRecursionDepth == rhs.maxRecursionDepth ) + && ( maxShaderGroupStride == rhs.maxShaderGroupStride ) + && ( shaderGroupBaseAlignment == rhs.shaderGroupBaseAlignment ) + && ( maxGeometryCount == rhs.maxGeometryCount ) + && ( maxInstanceCount == rhs.maxInstanceCount ) + && ( maxTriangleCount == rhs.maxTriangleCount ) + && ( maxDescriptorSetAccelerationStructures == rhs.maxDescriptorSetAccelerationStructures ); + } + + bool operator!=( PhysicalDeviceRayTracingPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRayTracingPropertiesNV; + void* pNext = {}; + uint32_t shaderGroupHandleSize = {}; + uint32_t maxRecursionDepth = {}; + uint32_t maxShaderGroupStride = {}; + uint32_t shaderGroupBaseAlignment = {}; + uint64_t maxGeometryCount = {}; + uint64_t maxInstanceCount = {}; + uint64_t maxTriangleCount = {}; + uint32_t maxDescriptorSetAccelerationStructures = {}; + + }; + static_assert( sizeof( PhysicalDeviceRayTracingPropertiesNV ) == sizeof( VkPhysicalDeviceRayTracingPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRayTracingPropertiesNV; + }; + + struct PhysicalDeviceRepresentativeFragmentTestFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRepresentativeFragmentTestFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTest_ = {}) VULKAN_HPP_NOEXCEPT + : representativeFragmentTest( representativeFragmentTest_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRepresentativeFragmentTestFeaturesNV( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV & operator=( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV & operator=( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRepresentativeFragmentTestFeaturesNV ) ); + return *this; + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRepresentativeFragmentTestFeaturesNV & setRepresentativeFragmentTest( VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTest_ ) VULKAN_HPP_NOEXCEPT + { + representativeFragmentTest = representativeFragmentTest_; + return *this; + } + + + operator VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( representativeFragmentTest == rhs.representativeFragmentTest ); + } + + bool operator!=( PhysicalDeviceRepresentativeFragmentTestFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRepresentativeFragmentTestFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTest = {}; + + }; + static_assert( sizeof( PhysicalDeviceRepresentativeFragmentTestFeaturesNV ) == sizeof( VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRepresentativeFragmentTestFeaturesNV; + }; + + struct PhysicalDeviceRobustness2FeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRobustness2FeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRobustness2FeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess2_ = {}, VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess2_ = {}, VULKAN_HPP_NAMESPACE::Bool32 nullDescriptor_ = {}) VULKAN_HPP_NOEXCEPT + : robustBufferAccess2( robustBufferAccess2_ ), robustImageAccess2( robustImageAccess2_ ), nullDescriptor( nullDescriptor_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRobustness2FeaturesEXT( PhysicalDeviceRobustness2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRobustness2FeaturesEXT( VkPhysicalDeviceRobustness2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRobustness2FeaturesEXT & operator=( VkPhysicalDeviceRobustness2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRobustness2FeaturesEXT & operator=( PhysicalDeviceRobustness2FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRobustness2FeaturesEXT ) ); + return *this; + } + + PhysicalDeviceRobustness2FeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceRobustness2FeaturesEXT & setRobustBufferAccess2( VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess2_ ) VULKAN_HPP_NOEXCEPT + { + robustBufferAccess2 = robustBufferAccess2_; + return *this; + } + + PhysicalDeviceRobustness2FeaturesEXT & setRobustImageAccess2( VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess2_ ) VULKAN_HPP_NOEXCEPT + { + robustImageAccess2 = robustImageAccess2_; + return *this; + } + + PhysicalDeviceRobustness2FeaturesEXT & setNullDescriptor( VULKAN_HPP_NAMESPACE::Bool32 nullDescriptor_ ) VULKAN_HPP_NOEXCEPT + { + nullDescriptor = nullDescriptor_; + return *this; + } + + + operator VkPhysicalDeviceRobustness2FeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRobustness2FeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRobustness2FeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceRobustness2FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( robustBufferAccess2 == rhs.robustBufferAccess2 ) + && ( robustImageAccess2 == rhs.robustImageAccess2 ) + && ( nullDescriptor == rhs.nullDescriptor ); + } + + bool operator!=( PhysicalDeviceRobustness2FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRobustness2FeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccess2 = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustImageAccess2 = {}; + VULKAN_HPP_NAMESPACE::Bool32 nullDescriptor = {}; + + }; + static_assert( sizeof( PhysicalDeviceRobustness2FeaturesEXT ) == sizeof( VkPhysicalDeviceRobustness2FeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRobustness2FeaturesEXT; + }; + + struct PhysicalDeviceRobustness2PropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceRobustness2PropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceRobustness2PropertiesEXT(VULKAN_HPP_NAMESPACE::DeviceSize robustStorageBufferAccessSizeAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize robustUniformBufferAccessSizeAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : robustStorageBufferAccessSizeAlignment( robustStorageBufferAccessSizeAlignment_ ), robustUniformBufferAccessSizeAlignment( robustUniformBufferAccessSizeAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceRobustness2PropertiesEXT( PhysicalDeviceRobustness2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceRobustness2PropertiesEXT( VkPhysicalDeviceRobustness2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceRobustness2PropertiesEXT & operator=( VkPhysicalDeviceRobustness2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceRobustness2PropertiesEXT & operator=( PhysicalDeviceRobustness2PropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceRobustness2PropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceRobustness2PropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceRobustness2PropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceRobustness2PropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceRobustness2PropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( robustStorageBufferAccessSizeAlignment == rhs.robustStorageBufferAccessSizeAlignment ) + && ( robustUniformBufferAccessSizeAlignment == rhs.robustUniformBufferAccessSizeAlignment ); + } + + bool operator!=( PhysicalDeviceRobustness2PropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceRobustness2PropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize robustStorageBufferAccessSizeAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize robustUniformBufferAccessSizeAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceRobustness2PropertiesEXT ) == sizeof( VkPhysicalDeviceRobustness2PropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceRobustness2PropertiesEXT; + }; + + struct PhysicalDeviceSampleLocationsPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSampleLocationsPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceSampleLocationsPropertiesEXT(VULKAN_HPP_NAMESPACE::SampleCountFlags sampleLocationSampleCounts_ = {}, VULKAN_HPP_NAMESPACE::Extent2D maxSampleLocationGridSize_ = {}, std::array const& sampleLocationCoordinateRange_ = {}, uint32_t sampleLocationSubPixelBits_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variableSampleLocations_ = {}) VULKAN_HPP_NOEXCEPT + : sampleLocationSampleCounts( sampleLocationSampleCounts_ ), maxSampleLocationGridSize( maxSampleLocationGridSize_ ), sampleLocationCoordinateRange( sampleLocationCoordinateRange_ ), sampleLocationSubPixelBits( sampleLocationSubPixelBits_ ), variableSampleLocations( variableSampleLocations_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceSampleLocationsPropertiesEXT( PhysicalDeviceSampleLocationsPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSampleLocationsPropertiesEXT( VkPhysicalDeviceSampleLocationsPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSampleLocationsPropertiesEXT & operator=( VkPhysicalDeviceSampleLocationsPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSampleLocationsPropertiesEXT & operator=( PhysicalDeviceSampleLocationsPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSampleLocationsPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceSampleLocationsPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSampleLocationsPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSampleLocationsPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceSampleLocationsPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationSampleCounts == rhs.sampleLocationSampleCounts ) + && ( maxSampleLocationGridSize == rhs.maxSampleLocationGridSize ) + && ( sampleLocationCoordinateRange == rhs.sampleLocationCoordinateRange ) + && ( sampleLocationSubPixelBits == rhs.sampleLocationSubPixelBits ) + && ( variableSampleLocations == rhs.variableSampleLocations ); + } + + bool operator!=( PhysicalDeviceSampleLocationsPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSampleLocationsPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags sampleLocationSampleCounts = {}; + VULKAN_HPP_NAMESPACE::Extent2D maxSampleLocationGridSize = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D sampleLocationCoordinateRange = {}; + uint32_t sampleLocationSubPixelBits = {}; + VULKAN_HPP_NAMESPACE::Bool32 variableSampleLocations = {}; + + }; + static_assert( sizeof( PhysicalDeviceSampleLocationsPropertiesEXT ) == sizeof( VkPhysicalDeviceSampleLocationsPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSampleLocationsPropertiesEXT; + }; + + struct PhysicalDeviceSamplerFilterMinmaxProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSamplerFilterMinmaxProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerFilterMinmaxProperties(VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping_ = {}) VULKAN_HPP_NOEXCEPT + : filterMinmaxSingleComponentFormats( filterMinmaxSingleComponentFormats_ ), filterMinmaxImageComponentMapping( filterMinmaxImageComponentMapping_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerFilterMinmaxProperties( PhysicalDeviceSamplerFilterMinmaxProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSamplerFilterMinmaxProperties( VkPhysicalDeviceSamplerFilterMinmaxProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSamplerFilterMinmaxProperties & operator=( VkPhysicalDeviceSamplerFilterMinmaxProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSamplerFilterMinmaxProperties & operator=( PhysicalDeviceSamplerFilterMinmaxProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSamplerFilterMinmaxProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceSamplerFilterMinmaxProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSamplerFilterMinmaxProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSamplerFilterMinmaxProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceSamplerFilterMinmaxProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( filterMinmaxSingleComponentFormats == rhs.filterMinmaxSingleComponentFormats ) + && ( filterMinmaxImageComponentMapping == rhs.filterMinmaxImageComponentMapping ); + } + + bool operator!=( PhysicalDeviceSamplerFilterMinmaxProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSamplerFilterMinmaxProperties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping = {}; + + }; + static_assert( sizeof( PhysicalDeviceSamplerFilterMinmaxProperties ) == sizeof( VkPhysicalDeviceSamplerFilterMinmaxProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSamplerFilterMinmaxProperties; + }; + using PhysicalDeviceSamplerFilterMinmaxPropertiesEXT = PhysicalDeviceSamplerFilterMinmaxProperties; + + struct PhysicalDeviceSamplerYcbcrConversionFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSamplerYcbcrConversionFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerYcbcrConversionFeatures(VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ = {}) VULKAN_HPP_NOEXCEPT + : samplerYcbcrConversion( samplerYcbcrConversion_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSamplerYcbcrConversionFeatures( PhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSamplerYcbcrConversionFeatures( VkPhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSamplerYcbcrConversionFeatures & operator=( VkPhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSamplerYcbcrConversionFeatures & operator=( PhysicalDeviceSamplerYcbcrConversionFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSamplerYcbcrConversionFeatures ) ); + return *this; + } + + PhysicalDeviceSamplerYcbcrConversionFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSamplerYcbcrConversionFeatures & setSamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ ) VULKAN_HPP_NOEXCEPT + { + samplerYcbcrConversion = samplerYcbcrConversion_; + return *this; + } + + + operator VkPhysicalDeviceSamplerYcbcrConversionFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSamplerYcbcrConversionFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSamplerYcbcrConversionFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceSamplerYcbcrConversionFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( samplerYcbcrConversion == rhs.samplerYcbcrConversion ); + } + + bool operator!=( PhysicalDeviceSamplerYcbcrConversionFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSamplerYcbcrConversionFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion = {}; + + }; + static_assert( sizeof( PhysicalDeviceSamplerYcbcrConversionFeatures ) == sizeof( VkPhysicalDeviceSamplerYcbcrConversionFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSamplerYcbcrConversionFeatures; + }; + using PhysicalDeviceSamplerYcbcrConversionFeaturesKHR = PhysicalDeviceSamplerYcbcrConversionFeatures; + + struct PhysicalDeviceScalarBlockLayoutFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceScalarBlockLayoutFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceScalarBlockLayoutFeatures(VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout_ = {}) VULKAN_HPP_NOEXCEPT + : scalarBlockLayout( scalarBlockLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceScalarBlockLayoutFeatures( PhysicalDeviceScalarBlockLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceScalarBlockLayoutFeatures( VkPhysicalDeviceScalarBlockLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceScalarBlockLayoutFeatures & operator=( VkPhysicalDeviceScalarBlockLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceScalarBlockLayoutFeatures & operator=( PhysicalDeviceScalarBlockLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceScalarBlockLayoutFeatures ) ); + return *this; + } + + PhysicalDeviceScalarBlockLayoutFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceScalarBlockLayoutFeatures & setScalarBlockLayout( VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout_ ) VULKAN_HPP_NOEXCEPT + { + scalarBlockLayout = scalarBlockLayout_; + return *this; + } + + + operator VkPhysicalDeviceScalarBlockLayoutFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceScalarBlockLayoutFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceScalarBlockLayoutFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceScalarBlockLayoutFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( scalarBlockLayout == rhs.scalarBlockLayout ); + } + + bool operator!=( PhysicalDeviceScalarBlockLayoutFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceScalarBlockLayoutFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout = {}; + + }; + static_assert( sizeof( PhysicalDeviceScalarBlockLayoutFeatures ) == sizeof( VkPhysicalDeviceScalarBlockLayoutFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceScalarBlockLayoutFeatures; + }; + using PhysicalDeviceScalarBlockLayoutFeaturesEXT = PhysicalDeviceScalarBlockLayoutFeatures; + + struct PhysicalDeviceSeparateDepthStencilLayoutsFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSeparateDepthStencilLayoutsFeatures(VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts_ = {}) VULKAN_HPP_NOEXCEPT + : separateDepthStencilLayouts( separateDepthStencilLayouts_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSeparateDepthStencilLayoutsFeatures( PhysicalDeviceSeparateDepthStencilLayoutsFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSeparateDepthStencilLayoutsFeatures( VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSeparateDepthStencilLayoutsFeatures & operator=( VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSeparateDepthStencilLayoutsFeatures & operator=( PhysicalDeviceSeparateDepthStencilLayoutsFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSeparateDepthStencilLayoutsFeatures ) ); + return *this; + } + + PhysicalDeviceSeparateDepthStencilLayoutsFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSeparateDepthStencilLayoutsFeatures & setSeparateDepthStencilLayouts( VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts_ ) VULKAN_HPP_NOEXCEPT + { + separateDepthStencilLayouts = separateDepthStencilLayouts_; + return *this; + } + + + operator VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSeparateDepthStencilLayoutsFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceSeparateDepthStencilLayoutsFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( separateDepthStencilLayouts == rhs.separateDepthStencilLayouts ); + } + + bool operator!=( PhysicalDeviceSeparateDepthStencilLayoutsFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSeparateDepthStencilLayoutsFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts = {}; + + }; + static_assert( sizeof( PhysicalDeviceSeparateDepthStencilLayoutsFeatures ) == sizeof( VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSeparateDepthStencilLayoutsFeatures; + }; + using PhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR = PhysicalDeviceSeparateDepthStencilLayoutsFeatures; + + struct PhysicalDeviceShaderAtomicFloatFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderAtomicFloatFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicFloatFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32AtomicAdd_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64AtomicAdd_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32AtomicAdd_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64AtomicAdd_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32AtomicAdd_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32AtomicAdd_ = {}) VULKAN_HPP_NOEXCEPT + : shaderBufferFloat32Atomics( shaderBufferFloat32Atomics_ ), shaderBufferFloat32AtomicAdd( shaderBufferFloat32AtomicAdd_ ), shaderBufferFloat64Atomics( shaderBufferFloat64Atomics_ ), shaderBufferFloat64AtomicAdd( shaderBufferFloat64AtomicAdd_ ), shaderSharedFloat32Atomics( shaderSharedFloat32Atomics_ ), shaderSharedFloat32AtomicAdd( shaderSharedFloat32AtomicAdd_ ), shaderSharedFloat64Atomics( shaderSharedFloat64Atomics_ ), shaderSharedFloat64AtomicAdd( shaderSharedFloat64AtomicAdd_ ), shaderImageFloat32Atomics( shaderImageFloat32Atomics_ ), shaderImageFloat32AtomicAdd( shaderImageFloat32AtomicAdd_ ), sparseImageFloat32Atomics( sparseImageFloat32Atomics_ ), sparseImageFloat32AtomicAdd( sparseImageFloat32AtomicAdd_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicFloatFeaturesEXT( PhysicalDeviceShaderAtomicFloatFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderAtomicFloatFeaturesEXT( VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & operator=( VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & operator=( PhysicalDeviceShaderAtomicFloatFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderAtomicFloatFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderBufferFloat32Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferFloat32Atomics = shaderBufferFloat32Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderBufferFloat32AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferFloat32AtomicAdd = shaderBufferFloat32AtomicAdd_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderBufferFloat64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferFloat64Atomics = shaderBufferFloat64Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderBufferFloat64AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferFloat64AtomicAdd = shaderBufferFloat64AtomicAdd_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderSharedFloat32Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedFloat32Atomics = shaderSharedFloat32Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderSharedFloat32AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedFloat32AtomicAdd = shaderSharedFloat32AtomicAdd_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderSharedFloat64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedFloat64Atomics = shaderSharedFloat64Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderSharedFloat64AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedFloat64AtomicAdd = shaderSharedFloat64AtomicAdd_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderImageFloat32Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderImageFloat32Atomics = shaderImageFloat32Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setShaderImageFloat32AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + shaderImageFloat32AtomicAdd = shaderImageFloat32AtomicAdd_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setSparseImageFloat32Atomics( VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32Atomics_ ) VULKAN_HPP_NOEXCEPT + { + sparseImageFloat32Atomics = sparseImageFloat32Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicFloatFeaturesEXT & setSparseImageFloat32AtomicAdd( VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32AtomicAdd_ ) VULKAN_HPP_NOEXCEPT + { + sparseImageFloat32AtomicAdd = sparseImageFloat32AtomicAdd_; + return *this; + } + + + operator VkPhysicalDeviceShaderAtomicFloatFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderAtomicFloatFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderAtomicFloatFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderAtomicFloatFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderBufferFloat32Atomics == rhs.shaderBufferFloat32Atomics ) + && ( shaderBufferFloat32AtomicAdd == rhs.shaderBufferFloat32AtomicAdd ) + && ( shaderBufferFloat64Atomics == rhs.shaderBufferFloat64Atomics ) + && ( shaderBufferFloat64AtomicAdd == rhs.shaderBufferFloat64AtomicAdd ) + && ( shaderSharedFloat32Atomics == rhs.shaderSharedFloat32Atomics ) + && ( shaderSharedFloat32AtomicAdd == rhs.shaderSharedFloat32AtomicAdd ) + && ( shaderSharedFloat64Atomics == rhs.shaderSharedFloat64Atomics ) + && ( shaderSharedFloat64AtomicAdd == rhs.shaderSharedFloat64AtomicAdd ) + && ( shaderImageFloat32Atomics == rhs.shaderImageFloat32Atomics ) + && ( shaderImageFloat32AtomicAdd == rhs.shaderImageFloat32AtomicAdd ) + && ( sparseImageFloat32Atomics == rhs.sparseImageFloat32Atomics ) + && ( sparseImageFloat32AtomicAdd == rhs.sparseImageFloat32AtomicAdd ); + } + + bool operator!=( PhysicalDeviceShaderAtomicFloatFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderAtomicFloatFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat32AtomicAdd = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferFloat64AtomicAdd = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat32AtomicAdd = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedFloat64AtomicAdd = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderImageFloat32AtomicAdd = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseImageFloat32AtomicAdd = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderAtomicFloatFeaturesEXT ) == sizeof( VkPhysicalDeviceShaderAtomicFloatFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderAtomicFloatFeaturesEXT; + }; + + struct PhysicalDeviceShaderAtomicInt64Features + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderAtomicInt64Features; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicInt64Features(VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics_ = {}) VULKAN_HPP_NOEXCEPT + : shaderBufferInt64Atomics( shaderBufferInt64Atomics_ ), shaderSharedInt64Atomics( shaderSharedInt64Atomics_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderAtomicInt64Features( PhysicalDeviceShaderAtomicInt64Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderAtomicInt64Features( VkPhysicalDeviceShaderAtomicInt64Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderAtomicInt64Features & operator=( VkPhysicalDeviceShaderAtomicInt64Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderAtomicInt64Features & operator=( PhysicalDeviceShaderAtomicInt64Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderAtomicInt64Features ) ); + return *this; + } + + PhysicalDeviceShaderAtomicInt64Features & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderAtomicInt64Features & setShaderBufferInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferInt64Atomics = shaderBufferInt64Atomics_; + return *this; + } + + PhysicalDeviceShaderAtomicInt64Features & setShaderSharedInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedInt64Atomics = shaderSharedInt64Atomics_; + return *this; + } + + + operator VkPhysicalDeviceShaderAtomicInt64Features const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderAtomicInt64Features &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderAtomicInt64Features const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderAtomicInt64Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderBufferInt64Atomics == rhs.shaderBufferInt64Atomics ) + && ( shaderSharedInt64Atomics == rhs.shaderSharedInt64Atomics ); + } + + bool operator!=( PhysicalDeviceShaderAtomicInt64Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderAtomicInt64Features; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderAtomicInt64Features ) == sizeof( VkPhysicalDeviceShaderAtomicInt64Features ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderAtomicInt64Features; + }; + using PhysicalDeviceShaderAtomicInt64FeaturesKHR = PhysicalDeviceShaderAtomicInt64Features; + + struct PhysicalDeviceShaderClockFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderClockFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderClockFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupClock_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDeviceClock_ = {}) VULKAN_HPP_NOEXCEPT + : shaderSubgroupClock( shaderSubgroupClock_ ), shaderDeviceClock( shaderDeviceClock_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderClockFeaturesKHR( PhysicalDeviceShaderClockFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderClockFeaturesKHR( VkPhysicalDeviceShaderClockFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderClockFeaturesKHR & operator=( VkPhysicalDeviceShaderClockFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderClockFeaturesKHR & operator=( PhysicalDeviceShaderClockFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderClockFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceShaderClockFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderClockFeaturesKHR & setShaderSubgroupClock( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupClock_ ) VULKAN_HPP_NOEXCEPT + { + shaderSubgroupClock = shaderSubgroupClock_; + return *this; + } + + PhysicalDeviceShaderClockFeaturesKHR & setShaderDeviceClock( VULKAN_HPP_NAMESPACE::Bool32 shaderDeviceClock_ ) VULKAN_HPP_NOEXCEPT + { + shaderDeviceClock = shaderDeviceClock_; + return *this; + } + + + operator VkPhysicalDeviceShaderClockFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderClockFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderClockFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderClockFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderSubgroupClock == rhs.shaderSubgroupClock ) + && ( shaderDeviceClock == rhs.shaderDeviceClock ); + } + + bool operator!=( PhysicalDeviceShaderClockFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderClockFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupClock = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDeviceClock = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderClockFeaturesKHR ) == sizeof( VkPhysicalDeviceShaderClockFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderClockFeaturesKHR; + }; + + struct PhysicalDeviceShaderCoreProperties2AMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderCoreProperties2AMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCoreProperties2AMD(VULKAN_HPP_NAMESPACE::ShaderCorePropertiesFlagsAMD shaderCoreFeatures_ = {}, uint32_t activeComputeUnitCount_ = {}) VULKAN_HPP_NOEXCEPT + : shaderCoreFeatures( shaderCoreFeatures_ ), activeComputeUnitCount( activeComputeUnitCount_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCoreProperties2AMD( PhysicalDeviceShaderCoreProperties2AMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderCoreProperties2AMD( VkPhysicalDeviceShaderCoreProperties2AMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderCoreProperties2AMD & operator=( VkPhysicalDeviceShaderCoreProperties2AMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderCoreProperties2AMD & operator=( PhysicalDeviceShaderCoreProperties2AMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderCoreProperties2AMD ) ); + return *this; + } + + + operator VkPhysicalDeviceShaderCoreProperties2AMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderCoreProperties2AMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderCoreProperties2AMD const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderCoreProperties2AMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderCoreFeatures == rhs.shaderCoreFeatures ) + && ( activeComputeUnitCount == rhs.activeComputeUnitCount ); + } + + bool operator!=( PhysicalDeviceShaderCoreProperties2AMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderCoreProperties2AMD; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ShaderCorePropertiesFlagsAMD shaderCoreFeatures = {}; + uint32_t activeComputeUnitCount = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderCoreProperties2AMD ) == sizeof( VkPhysicalDeviceShaderCoreProperties2AMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderCoreProperties2AMD; + }; + + struct PhysicalDeviceShaderCorePropertiesAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderCorePropertiesAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCorePropertiesAMD(uint32_t shaderEngineCount_ = {}, uint32_t shaderArraysPerEngineCount_ = {}, uint32_t computeUnitsPerShaderArray_ = {}, uint32_t simdPerComputeUnit_ = {}, uint32_t wavefrontsPerSimd_ = {}, uint32_t wavefrontSize_ = {}, uint32_t sgprsPerSimd_ = {}, uint32_t minSgprAllocation_ = {}, uint32_t maxSgprAllocation_ = {}, uint32_t sgprAllocationGranularity_ = {}, uint32_t vgprsPerSimd_ = {}, uint32_t minVgprAllocation_ = {}, uint32_t maxVgprAllocation_ = {}, uint32_t vgprAllocationGranularity_ = {}) VULKAN_HPP_NOEXCEPT + : shaderEngineCount( shaderEngineCount_ ), shaderArraysPerEngineCount( shaderArraysPerEngineCount_ ), computeUnitsPerShaderArray( computeUnitsPerShaderArray_ ), simdPerComputeUnit( simdPerComputeUnit_ ), wavefrontsPerSimd( wavefrontsPerSimd_ ), wavefrontSize( wavefrontSize_ ), sgprsPerSimd( sgprsPerSimd_ ), minSgprAllocation( minSgprAllocation_ ), maxSgprAllocation( maxSgprAllocation_ ), sgprAllocationGranularity( sgprAllocationGranularity_ ), vgprsPerSimd( vgprsPerSimd_ ), minVgprAllocation( minVgprAllocation_ ), maxVgprAllocation( maxVgprAllocation_ ), vgprAllocationGranularity( vgprAllocationGranularity_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderCorePropertiesAMD( PhysicalDeviceShaderCorePropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderCorePropertiesAMD( VkPhysicalDeviceShaderCorePropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderCorePropertiesAMD & operator=( VkPhysicalDeviceShaderCorePropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderCorePropertiesAMD & operator=( PhysicalDeviceShaderCorePropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderCorePropertiesAMD ) ); + return *this; + } + + + operator VkPhysicalDeviceShaderCorePropertiesAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderCorePropertiesAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderCorePropertiesAMD const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderCorePropertiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderEngineCount == rhs.shaderEngineCount ) + && ( shaderArraysPerEngineCount == rhs.shaderArraysPerEngineCount ) + && ( computeUnitsPerShaderArray == rhs.computeUnitsPerShaderArray ) + && ( simdPerComputeUnit == rhs.simdPerComputeUnit ) + && ( wavefrontsPerSimd == rhs.wavefrontsPerSimd ) + && ( wavefrontSize == rhs.wavefrontSize ) + && ( sgprsPerSimd == rhs.sgprsPerSimd ) + && ( minSgprAllocation == rhs.minSgprAllocation ) + && ( maxSgprAllocation == rhs.maxSgprAllocation ) + && ( sgprAllocationGranularity == rhs.sgprAllocationGranularity ) + && ( vgprsPerSimd == rhs.vgprsPerSimd ) + && ( minVgprAllocation == rhs.minVgprAllocation ) + && ( maxVgprAllocation == rhs.maxVgprAllocation ) + && ( vgprAllocationGranularity == rhs.vgprAllocationGranularity ); + } + + bool operator!=( PhysicalDeviceShaderCorePropertiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderCorePropertiesAMD; + void* pNext = {}; + uint32_t shaderEngineCount = {}; + uint32_t shaderArraysPerEngineCount = {}; + uint32_t computeUnitsPerShaderArray = {}; + uint32_t simdPerComputeUnit = {}; + uint32_t wavefrontsPerSimd = {}; + uint32_t wavefrontSize = {}; + uint32_t sgprsPerSimd = {}; + uint32_t minSgprAllocation = {}; + uint32_t maxSgprAllocation = {}; + uint32_t sgprAllocationGranularity = {}; + uint32_t vgprsPerSimd = {}; + uint32_t minVgprAllocation = {}; + uint32_t maxVgprAllocation = {}; + uint32_t vgprAllocationGranularity = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderCorePropertiesAMD ) == sizeof( VkPhysicalDeviceShaderCorePropertiesAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderCorePropertiesAMD; + }; + + struct PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 shaderDemoteToHelperInvocation_ = {}) VULKAN_HPP_NOEXCEPT + : shaderDemoteToHelperInvocation( shaderDemoteToHelperInvocation_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT( VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & operator=( VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & operator=( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT & setShaderDemoteToHelperInvocation( VULKAN_HPP_NAMESPACE::Bool32 shaderDemoteToHelperInvocation_ ) VULKAN_HPP_NOEXCEPT + { + shaderDemoteToHelperInvocation = shaderDemoteToHelperInvocation_; + return *this; + } + + + operator VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderDemoteToHelperInvocation == rhs.shaderDemoteToHelperInvocation ); + } + + bool operator!=( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDemoteToHelperInvocation = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT ) == sizeof( VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + }; + + struct PhysicalDeviceShaderDrawParametersFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderDrawParametersFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDrawParametersFeatures(VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ = {}) VULKAN_HPP_NOEXCEPT + : shaderDrawParameters( shaderDrawParameters_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderDrawParametersFeatures( PhysicalDeviceShaderDrawParametersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderDrawParametersFeatures( VkPhysicalDeviceShaderDrawParametersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderDrawParametersFeatures & operator=( VkPhysicalDeviceShaderDrawParametersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderDrawParametersFeatures & operator=( PhysicalDeviceShaderDrawParametersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderDrawParametersFeatures ) ); + return *this; + } + + PhysicalDeviceShaderDrawParametersFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderDrawParametersFeatures & setShaderDrawParameters( VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ ) VULKAN_HPP_NOEXCEPT + { + shaderDrawParameters = shaderDrawParameters_; + return *this; + } + + + operator VkPhysicalDeviceShaderDrawParametersFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderDrawParametersFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderDrawParametersFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderDrawParametersFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderDrawParameters == rhs.shaderDrawParameters ); + } + + bool operator!=( PhysicalDeviceShaderDrawParametersFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderDrawParametersFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderDrawParametersFeatures ) == sizeof( VkPhysicalDeviceShaderDrawParametersFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderDrawParametersFeatures; + }; + using PhysicalDeviceShaderDrawParameterFeatures = PhysicalDeviceShaderDrawParametersFeatures; + + struct PhysicalDeviceShaderFloat16Int8Features + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderFloat16Int8Features; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloat16Int8Features(VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInt8_ = {}) VULKAN_HPP_NOEXCEPT + : shaderFloat16( shaderFloat16_ ), shaderInt8( shaderInt8_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderFloat16Int8Features( PhysicalDeviceShaderFloat16Int8Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderFloat16Int8Features( VkPhysicalDeviceShaderFloat16Int8Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderFloat16Int8Features & operator=( VkPhysicalDeviceShaderFloat16Int8Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderFloat16Int8Features & operator=( PhysicalDeviceShaderFloat16Int8Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderFloat16Int8Features ) ); + return *this; + } + + PhysicalDeviceShaderFloat16Int8Features & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderFloat16Int8Features & setShaderFloat16( VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16_ ) VULKAN_HPP_NOEXCEPT + { + shaderFloat16 = shaderFloat16_; + return *this; + } + + PhysicalDeviceShaderFloat16Int8Features & setShaderInt8( VULKAN_HPP_NAMESPACE::Bool32 shaderInt8_ ) VULKAN_HPP_NOEXCEPT + { + shaderInt8 = shaderInt8_; + return *this; + } + + + operator VkPhysicalDeviceShaderFloat16Int8Features const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderFloat16Int8Features &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderFloat16Int8Features const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderFloat16Int8Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderFloat16 == rhs.shaderFloat16 ) + && ( shaderInt8 == rhs.shaderInt8 ); + } + + bool operator!=( PhysicalDeviceShaderFloat16Int8Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderFloat16Int8Features; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInt8 = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderFloat16Int8Features ) == sizeof( VkPhysicalDeviceShaderFloat16Int8Features ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderFloat16Int8Features; + }; + using PhysicalDeviceFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; + using PhysicalDeviceShaderFloat16Int8FeaturesKHR = PhysicalDeviceShaderFloat16Int8Features; + + struct PhysicalDeviceShaderImageAtomicInt64FeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageAtomicInt64FeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics_ = {}) VULKAN_HPP_NOEXCEPT + : shaderImageInt64Atomics( shaderImageInt64Atomics_ ), sparseImageInt64Atomics( sparseImageInt64Atomics_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageAtomicInt64FeaturesEXT( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & operator=( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & operator=( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT ) ); + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setShaderImageInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderImageInt64Atomics = shaderImageInt64Atomics_; + return *this; + } + + PhysicalDeviceShaderImageAtomicInt64FeaturesEXT & setSparseImageInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + sparseImageInt64Atomics = sparseImageInt64Atomics_; + return *this; + } + + + operator VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderImageInt64Atomics == rhs.shaderImageInt64Atomics ) + && ( sparseImageInt64Atomics == rhs.sparseImageInt64Atomics ); + } + + bool operator!=( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderImageInt64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 sparseImageInt64Atomics = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderImageAtomicInt64FeaturesEXT ) == sizeof( VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + }; + + struct PhysicalDeviceShaderImageFootprintFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageFootprintFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 imageFootprint_ = {}) VULKAN_HPP_NOEXCEPT + : imageFootprint( imageFootprint_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderImageFootprintFeaturesNV( PhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderImageFootprintFeaturesNV( VkPhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderImageFootprintFeaturesNV & operator=( VkPhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderImageFootprintFeaturesNV & operator=( PhysicalDeviceShaderImageFootprintFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderImageFootprintFeaturesNV ) ); + return *this; + } + + PhysicalDeviceShaderImageFootprintFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderImageFootprintFeaturesNV & setImageFootprint( VULKAN_HPP_NAMESPACE::Bool32 imageFootprint_ ) VULKAN_HPP_NOEXCEPT + { + imageFootprint = imageFootprint_; + return *this; + } + + + operator VkPhysicalDeviceShaderImageFootprintFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderImageFootprintFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderImageFootprintFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderImageFootprintFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( imageFootprint == rhs.imageFootprint ); + } + + bool operator!=( PhysicalDeviceShaderImageFootprintFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderImageFootprintFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 imageFootprint = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderImageFootprintFeaturesNV ) == sizeof( VkPhysicalDeviceShaderImageFootprintFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderImageFootprintFeaturesNV; + }; + + struct PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerFunctions2_ = {}) VULKAN_HPP_NOEXCEPT + : shaderIntegerFunctions2( shaderIntegerFunctions2_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL( VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & operator=( VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & operator=( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL ) ); + return *this; + } + + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL & setShaderIntegerFunctions2( VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerFunctions2_ ) VULKAN_HPP_NOEXCEPT + { + shaderIntegerFunctions2 = shaderIntegerFunctions2_; + return *this; + } + + + operator VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderIntegerFunctions2 == rhs.shaderIntegerFunctions2 ); + } + + bool operator!=( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderIntegerFunctions2 = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL ) == sizeof( VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + }; + + struct PhysicalDeviceShaderSMBuiltinsFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderSmBuiltinsFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 shaderSMBuiltins_ = {}) VULKAN_HPP_NOEXCEPT + : shaderSMBuiltins( shaderSMBuiltins_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsFeaturesNV( PhysicalDeviceShaderSMBuiltinsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderSMBuiltinsFeaturesNV( VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderSMBuiltinsFeaturesNV & operator=( VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderSMBuiltinsFeaturesNV & operator=( PhysicalDeviceShaderSMBuiltinsFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderSMBuiltinsFeaturesNV ) ); + return *this; + } + + PhysicalDeviceShaderSMBuiltinsFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderSMBuiltinsFeaturesNV & setShaderSMBuiltins( VULKAN_HPP_NAMESPACE::Bool32 shaderSMBuiltins_ ) VULKAN_HPP_NOEXCEPT + { + shaderSMBuiltins = shaderSMBuiltins_; + return *this; + } + + + operator VkPhysicalDeviceShaderSMBuiltinsFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderSMBuiltinsFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderSMBuiltinsFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderSMBuiltinsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderSMBuiltins == rhs.shaderSMBuiltins ); + } + + bool operator!=( PhysicalDeviceShaderSMBuiltinsFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderSmBuiltinsFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSMBuiltins = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderSMBuiltinsFeaturesNV ) == sizeof( VkPhysicalDeviceShaderSMBuiltinsFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderSMBuiltinsFeaturesNV; + }; + + struct PhysicalDeviceShaderSMBuiltinsPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderSmBuiltinsPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsPropertiesNV(uint32_t shaderSMCount_ = {}, uint32_t shaderWarpsPerSM_ = {}) VULKAN_HPP_NOEXCEPT + : shaderSMCount( shaderSMCount_ ), shaderWarpsPerSM( shaderWarpsPerSM_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSMBuiltinsPropertiesNV( PhysicalDeviceShaderSMBuiltinsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderSMBuiltinsPropertiesNV( VkPhysicalDeviceShaderSMBuiltinsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderSMBuiltinsPropertiesNV & operator=( VkPhysicalDeviceShaderSMBuiltinsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderSMBuiltinsPropertiesNV & operator=( PhysicalDeviceShaderSMBuiltinsPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderSMBuiltinsPropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceShaderSMBuiltinsPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderSMBuiltinsPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderSMBuiltinsPropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderSMBuiltinsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderSMCount == rhs.shaderSMCount ) + && ( shaderWarpsPerSM == rhs.shaderWarpsPerSM ); + } + + bool operator!=( PhysicalDeviceShaderSMBuiltinsPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderSmBuiltinsPropertiesNV; + void* pNext = {}; + uint32_t shaderSMCount = {}; + uint32_t shaderWarpsPerSM = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderSMBuiltinsPropertiesNV ) == sizeof( VkPhysicalDeviceShaderSMBuiltinsPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderSMBuiltinsPropertiesNV; + }; + + struct PhysicalDeviceShaderSubgroupExtendedTypesFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupExtendedTypesFeatures(VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes_ = {}) VULKAN_HPP_NOEXCEPT + : shaderSubgroupExtendedTypes( shaderSubgroupExtendedTypes_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderSubgroupExtendedTypesFeatures( PhysicalDeviceShaderSubgroupExtendedTypesFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderSubgroupExtendedTypesFeatures( VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderSubgroupExtendedTypesFeatures & operator=( VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderSubgroupExtendedTypesFeatures & operator=( PhysicalDeviceShaderSubgroupExtendedTypesFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderSubgroupExtendedTypesFeatures ) ); + return *this; + } + + PhysicalDeviceShaderSubgroupExtendedTypesFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderSubgroupExtendedTypesFeatures & setShaderSubgroupExtendedTypes( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes_ ) VULKAN_HPP_NOEXCEPT + { + shaderSubgroupExtendedTypes = shaderSubgroupExtendedTypes_; + return *this; + } + + + operator VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderSubgroupExtendedTypesFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderSubgroupExtendedTypesFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderSubgroupExtendedTypes == rhs.shaderSubgroupExtendedTypes ); + } + + bool operator!=( PhysicalDeviceShaderSubgroupExtendedTypesFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderSubgroupExtendedTypesFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderSubgroupExtendedTypesFeatures ) == sizeof( VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderSubgroupExtendedTypesFeatures; + }; + using PhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR = PhysicalDeviceShaderSubgroupExtendedTypesFeatures; + + struct PhysicalDeviceShaderTerminateInvocationFeaturesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderTerminateInvocationFeaturesKHR(VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation_ = {}) VULKAN_HPP_NOEXCEPT + : shaderTerminateInvocation( shaderTerminateInvocation_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShaderTerminateInvocationFeaturesKHR( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & operator=( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & operator=( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShaderTerminateInvocationFeaturesKHR ) ); + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShaderTerminateInvocationFeaturesKHR & setShaderTerminateInvocation( VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation_ ) VULKAN_HPP_NOEXCEPT + { + shaderTerminateInvocation = shaderTerminateInvocation_; + return *this; + } + + + operator VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& ) const = default; +#else + bool operator==( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shaderTerminateInvocation == rhs.shaderTerminateInvocation ); + } + + bool operator!=( PhysicalDeviceShaderTerminateInvocationFeaturesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShaderTerminateInvocationFeaturesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderTerminateInvocation = {}; + + }; + static_assert( sizeof( PhysicalDeviceShaderTerminateInvocationFeaturesKHR ) == sizeof( VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShaderTerminateInvocationFeaturesKHR; + }; + + struct PhysicalDeviceShadingRateImageFeaturesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShadingRateImageFeaturesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShadingRateImageFeaturesNV(VULKAN_HPP_NAMESPACE::Bool32 shadingRateImage_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shadingRateCoarseSampleOrder_ = {}) VULKAN_HPP_NOEXCEPT + : shadingRateImage( shadingRateImage_ ), shadingRateCoarseSampleOrder( shadingRateCoarseSampleOrder_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShadingRateImageFeaturesNV( PhysicalDeviceShadingRateImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShadingRateImageFeaturesNV( VkPhysicalDeviceShadingRateImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShadingRateImageFeaturesNV & operator=( VkPhysicalDeviceShadingRateImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV & operator=( PhysicalDeviceShadingRateImageFeaturesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShadingRateImageFeaturesNV ) ); + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV & setShadingRateImage( VULKAN_HPP_NAMESPACE::Bool32 shadingRateImage_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateImage = shadingRateImage_; + return *this; + } + + PhysicalDeviceShadingRateImageFeaturesNV & setShadingRateCoarseSampleOrder( VULKAN_HPP_NAMESPACE::Bool32 shadingRateCoarseSampleOrder_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateCoarseSampleOrder = shadingRateCoarseSampleOrder_; + return *this; + } + + + operator VkPhysicalDeviceShadingRateImageFeaturesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShadingRateImageFeaturesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShadingRateImageFeaturesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceShadingRateImageFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateImage == rhs.shadingRateImage ) + && ( shadingRateCoarseSampleOrder == rhs.shadingRateCoarseSampleOrder ); + } + + bool operator!=( PhysicalDeviceShadingRateImageFeaturesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShadingRateImageFeaturesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shadingRateImage = {}; + VULKAN_HPP_NAMESPACE::Bool32 shadingRateCoarseSampleOrder = {}; + + }; + static_assert( sizeof( PhysicalDeviceShadingRateImageFeaturesNV ) == sizeof( VkPhysicalDeviceShadingRateImageFeaturesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShadingRateImageFeaturesNV; + }; + + struct PhysicalDeviceShadingRateImagePropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceShadingRateImagePropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceShadingRateImagePropertiesNV(VULKAN_HPP_NAMESPACE::Extent2D shadingRateTexelSize_ = {}, uint32_t shadingRatePaletteSize_ = {}, uint32_t shadingRateMaxCoarseSamples_ = {}) VULKAN_HPP_NOEXCEPT + : shadingRateTexelSize( shadingRateTexelSize_ ), shadingRatePaletteSize( shadingRatePaletteSize_ ), shadingRateMaxCoarseSamples( shadingRateMaxCoarseSamples_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceShadingRateImagePropertiesNV( PhysicalDeviceShadingRateImagePropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceShadingRateImagePropertiesNV( VkPhysicalDeviceShadingRateImagePropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceShadingRateImagePropertiesNV & operator=( VkPhysicalDeviceShadingRateImagePropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceShadingRateImagePropertiesNV & operator=( PhysicalDeviceShadingRateImagePropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceShadingRateImagePropertiesNV ) ); + return *this; + } + + + operator VkPhysicalDeviceShadingRateImagePropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceShadingRateImagePropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceShadingRateImagePropertiesNV const& ) const = default; +#else + bool operator==( PhysicalDeviceShadingRateImagePropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateTexelSize == rhs.shadingRateTexelSize ) + && ( shadingRatePaletteSize == rhs.shadingRatePaletteSize ) + && ( shadingRateMaxCoarseSamples == rhs.shadingRateMaxCoarseSamples ); + } + + bool operator!=( PhysicalDeviceShadingRateImagePropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceShadingRateImagePropertiesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D shadingRateTexelSize = {}; + uint32_t shadingRatePaletteSize = {}; + uint32_t shadingRateMaxCoarseSamples = {}; + + }; + static_assert( sizeof( PhysicalDeviceShadingRateImagePropertiesNV ) == sizeof( VkPhysicalDeviceShadingRateImagePropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceShadingRateImagePropertiesNV; + }; + + struct PhysicalDeviceSubgroupProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSubgroupProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupProperties(uint32_t subgroupSize_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags supportedStages_ = {}, VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags supportedOperations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 quadOperationsInAllStages_ = {}) VULKAN_HPP_NOEXCEPT + : subgroupSize( subgroupSize_ ), supportedStages( supportedStages_ ), supportedOperations( supportedOperations_ ), quadOperationsInAllStages( quadOperationsInAllStages_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupProperties( PhysicalDeviceSubgroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSubgroupProperties( VkPhysicalDeviceSubgroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSubgroupProperties & operator=( VkPhysicalDeviceSubgroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSubgroupProperties & operator=( PhysicalDeviceSubgroupProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSubgroupProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceSubgroupProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSubgroupProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSubgroupProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceSubgroupProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subgroupSize == rhs.subgroupSize ) + && ( supportedStages == rhs.supportedStages ) + && ( supportedOperations == rhs.supportedOperations ) + && ( quadOperationsInAllStages == rhs.quadOperationsInAllStages ); + } + + bool operator!=( PhysicalDeviceSubgroupProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSubgroupProperties; + void* pNext = {}; + uint32_t subgroupSize = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags supportedStages = {}; + VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags supportedOperations = {}; + VULKAN_HPP_NAMESPACE::Bool32 quadOperationsInAllStages = {}; + + }; + static_assert( sizeof( PhysicalDeviceSubgroupProperties ) == sizeof( VkPhysicalDeviceSubgroupProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSubgroupProperties; + }; + + struct PhysicalDeviceSubgroupSizeControlFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSubgroupSizeControlFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupSizeControlFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 subgroupSizeControl_ = {}, VULKAN_HPP_NAMESPACE::Bool32 computeFullSubgroups_ = {}) VULKAN_HPP_NOEXCEPT + : subgroupSizeControl( subgroupSizeControl_ ), computeFullSubgroups( computeFullSubgroups_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupSizeControlFeaturesEXT( PhysicalDeviceSubgroupSizeControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSubgroupSizeControlFeaturesEXT( VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSubgroupSizeControlFeaturesEXT & operator=( VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSubgroupSizeControlFeaturesEXT & operator=( PhysicalDeviceSubgroupSizeControlFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSubgroupSizeControlFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceSubgroupSizeControlFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceSubgroupSizeControlFeaturesEXT & setSubgroupSizeControl( VULKAN_HPP_NAMESPACE::Bool32 subgroupSizeControl_ ) VULKAN_HPP_NOEXCEPT + { + subgroupSizeControl = subgroupSizeControl_; + return *this; + } + + PhysicalDeviceSubgroupSizeControlFeaturesEXT & setComputeFullSubgroups( VULKAN_HPP_NAMESPACE::Bool32 computeFullSubgroups_ ) VULKAN_HPP_NOEXCEPT + { + computeFullSubgroups = computeFullSubgroups_; + return *this; + } + + + operator VkPhysicalDeviceSubgroupSizeControlFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSubgroupSizeControlFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSubgroupSizeControlFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceSubgroupSizeControlFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subgroupSizeControl == rhs.subgroupSizeControl ) + && ( computeFullSubgroups == rhs.computeFullSubgroups ); + } + + bool operator!=( PhysicalDeviceSubgroupSizeControlFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSubgroupSizeControlFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 subgroupSizeControl = {}; + VULKAN_HPP_NAMESPACE::Bool32 computeFullSubgroups = {}; + + }; + static_assert( sizeof( PhysicalDeviceSubgroupSizeControlFeaturesEXT ) == sizeof( VkPhysicalDeviceSubgroupSizeControlFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSubgroupSizeControlFeaturesEXT; + }; + + struct PhysicalDeviceSubgroupSizeControlPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceSubgroupSizeControlPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupSizeControlPropertiesEXT(uint32_t minSubgroupSize_ = {}, uint32_t maxSubgroupSize_ = {}, uint32_t maxComputeWorkgroupSubgroups_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags requiredSubgroupSizeStages_ = {}) VULKAN_HPP_NOEXCEPT + : minSubgroupSize( minSubgroupSize_ ), maxSubgroupSize( maxSubgroupSize_ ), maxComputeWorkgroupSubgroups( maxComputeWorkgroupSubgroups_ ), requiredSubgroupSizeStages( requiredSubgroupSizeStages_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceSubgroupSizeControlPropertiesEXT( PhysicalDeviceSubgroupSizeControlPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceSubgroupSizeControlPropertiesEXT( VkPhysicalDeviceSubgroupSizeControlPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceSubgroupSizeControlPropertiesEXT & operator=( VkPhysicalDeviceSubgroupSizeControlPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceSubgroupSizeControlPropertiesEXT & operator=( PhysicalDeviceSubgroupSizeControlPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceSubgroupSizeControlPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceSubgroupSizeControlPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceSubgroupSizeControlPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceSubgroupSizeControlPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceSubgroupSizeControlPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( minSubgroupSize == rhs.minSubgroupSize ) + && ( maxSubgroupSize == rhs.maxSubgroupSize ) + && ( maxComputeWorkgroupSubgroups == rhs.maxComputeWorkgroupSubgroups ) + && ( requiredSubgroupSizeStages == rhs.requiredSubgroupSizeStages ); + } + + bool operator!=( PhysicalDeviceSubgroupSizeControlPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceSubgroupSizeControlPropertiesEXT; + void* pNext = {}; + uint32_t minSubgroupSize = {}; + uint32_t maxSubgroupSize = {}; + uint32_t maxComputeWorkgroupSubgroups = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags requiredSubgroupSizeStages = {}; + + }; + static_assert( sizeof( PhysicalDeviceSubgroupSizeControlPropertiesEXT ) == sizeof( VkPhysicalDeviceSubgroupSizeControlPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceSubgroupSizeControlPropertiesEXT; + }; + + struct PhysicalDeviceTexelBufferAlignmentFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTexelBufferAlignmentFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 texelBufferAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : texelBufferAlignment( texelBufferAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTexelBufferAlignmentFeaturesEXT( PhysicalDeviceTexelBufferAlignmentFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTexelBufferAlignmentFeaturesEXT( VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTexelBufferAlignmentFeaturesEXT & operator=( VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTexelBufferAlignmentFeaturesEXT & operator=( PhysicalDeviceTexelBufferAlignmentFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTexelBufferAlignmentFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceTexelBufferAlignmentFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceTexelBufferAlignmentFeaturesEXT & setTexelBufferAlignment( VULKAN_HPP_NAMESPACE::Bool32 texelBufferAlignment_ ) VULKAN_HPP_NOEXCEPT + { + texelBufferAlignment = texelBufferAlignment_; + return *this; + } + + + operator VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTexelBufferAlignmentFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceTexelBufferAlignmentFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( texelBufferAlignment == rhs.texelBufferAlignment ); + } + + bool operator!=( PhysicalDeviceTexelBufferAlignmentFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTexelBufferAlignmentFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 texelBufferAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceTexelBufferAlignmentFeaturesEXT ) == sizeof( VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTexelBufferAlignmentFeaturesEXT; + }; + + struct PhysicalDeviceTexelBufferAlignmentPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTexelBufferAlignmentPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTexelBufferAlignmentPropertiesEXT(VULKAN_HPP_NAMESPACE::DeviceSize storageTexelBufferOffsetAlignmentBytes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storageTexelBufferOffsetSingleTexelAlignment_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize uniformTexelBufferOffsetAlignmentBytes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformTexelBufferOffsetSingleTexelAlignment_ = {}) VULKAN_HPP_NOEXCEPT + : storageTexelBufferOffsetAlignmentBytes( storageTexelBufferOffsetAlignmentBytes_ ), storageTexelBufferOffsetSingleTexelAlignment( storageTexelBufferOffsetSingleTexelAlignment_ ), uniformTexelBufferOffsetAlignmentBytes( uniformTexelBufferOffsetAlignmentBytes_ ), uniformTexelBufferOffsetSingleTexelAlignment( uniformTexelBufferOffsetSingleTexelAlignment_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTexelBufferAlignmentPropertiesEXT( PhysicalDeviceTexelBufferAlignmentPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTexelBufferAlignmentPropertiesEXT( VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTexelBufferAlignmentPropertiesEXT & operator=( VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTexelBufferAlignmentPropertiesEXT & operator=( PhysicalDeviceTexelBufferAlignmentPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTexelBufferAlignmentPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTexelBufferAlignmentPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceTexelBufferAlignmentPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageTexelBufferOffsetAlignmentBytes == rhs.storageTexelBufferOffsetAlignmentBytes ) + && ( storageTexelBufferOffsetSingleTexelAlignment == rhs.storageTexelBufferOffsetSingleTexelAlignment ) + && ( uniformTexelBufferOffsetAlignmentBytes == rhs.uniformTexelBufferOffsetAlignmentBytes ) + && ( uniformTexelBufferOffsetSingleTexelAlignment == rhs.uniformTexelBufferOffsetSingleTexelAlignment ); + } + + bool operator!=( PhysicalDeviceTexelBufferAlignmentPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTexelBufferAlignmentPropertiesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DeviceSize storageTexelBufferOffsetAlignmentBytes = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageTexelBufferOffsetSingleTexelAlignment = {}; + VULKAN_HPP_NAMESPACE::DeviceSize uniformTexelBufferOffsetAlignmentBytes = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformTexelBufferOffsetSingleTexelAlignment = {}; + + }; + static_assert( sizeof( PhysicalDeviceTexelBufferAlignmentPropertiesEXT ) == sizeof( VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTexelBufferAlignmentPropertiesEXT; + }; + + struct PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTextureCompressionAstcHdrFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_HDR_ = {}) VULKAN_HPP_NOEXCEPT + : textureCompressionASTC_HDR( textureCompressionASTC_HDR_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT( VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & operator=( VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & operator=( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT & setTextureCompressionASTC_HDR( VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_HDR_ ) VULKAN_HPP_NOEXCEPT + { + textureCompressionASTC_HDR = textureCompressionASTC_HDR_; + return *this; + } + + + operator VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( textureCompressionASTC_HDR == rhs.textureCompressionASTC_HDR ); + } + + bool operator!=( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTextureCompressionAstcHdrFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 textureCompressionASTC_HDR = {}; + + }; + static_assert( sizeof( PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT ) == sizeof( VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + }; + + struct PhysicalDeviceTimelineSemaphoreFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTimelineSemaphoreFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreFeatures(VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore_ = {}) VULKAN_HPP_NOEXCEPT + : timelineSemaphore( timelineSemaphore_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreFeatures( PhysicalDeviceTimelineSemaphoreFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTimelineSemaphoreFeatures( VkPhysicalDeviceTimelineSemaphoreFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTimelineSemaphoreFeatures & operator=( VkPhysicalDeviceTimelineSemaphoreFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTimelineSemaphoreFeatures & operator=( PhysicalDeviceTimelineSemaphoreFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTimelineSemaphoreFeatures ) ); + return *this; + } + + PhysicalDeviceTimelineSemaphoreFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceTimelineSemaphoreFeatures & setTimelineSemaphore( VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore_ ) VULKAN_HPP_NOEXCEPT + { + timelineSemaphore = timelineSemaphore_; + return *this; + } + + + operator VkPhysicalDeviceTimelineSemaphoreFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTimelineSemaphoreFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTimelineSemaphoreFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceTimelineSemaphoreFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( timelineSemaphore == rhs.timelineSemaphore ); + } + + bool operator!=( PhysicalDeviceTimelineSemaphoreFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTimelineSemaphoreFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore = {}; + + }; + static_assert( sizeof( PhysicalDeviceTimelineSemaphoreFeatures ) == sizeof( VkPhysicalDeviceTimelineSemaphoreFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTimelineSemaphoreFeatures; + }; + using PhysicalDeviceTimelineSemaphoreFeaturesKHR = PhysicalDeviceTimelineSemaphoreFeatures; + + struct PhysicalDeviceTimelineSemaphoreProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTimelineSemaphoreProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreProperties(uint64_t maxTimelineSemaphoreValueDifference_ = {}) VULKAN_HPP_NOEXCEPT + : maxTimelineSemaphoreValueDifference( maxTimelineSemaphoreValueDifference_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTimelineSemaphoreProperties( PhysicalDeviceTimelineSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTimelineSemaphoreProperties( VkPhysicalDeviceTimelineSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTimelineSemaphoreProperties & operator=( VkPhysicalDeviceTimelineSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTimelineSemaphoreProperties & operator=( PhysicalDeviceTimelineSemaphoreProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTimelineSemaphoreProperties ) ); + return *this; + } + + + operator VkPhysicalDeviceTimelineSemaphoreProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTimelineSemaphoreProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTimelineSemaphoreProperties const& ) const = default; +#else + bool operator==( PhysicalDeviceTimelineSemaphoreProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxTimelineSemaphoreValueDifference == rhs.maxTimelineSemaphoreValueDifference ); + } + + bool operator!=( PhysicalDeviceTimelineSemaphoreProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTimelineSemaphoreProperties; + void* pNext = {}; + uint64_t maxTimelineSemaphoreValueDifference = {}; + + }; + static_assert( sizeof( PhysicalDeviceTimelineSemaphoreProperties ) == sizeof( VkPhysicalDeviceTimelineSemaphoreProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTimelineSemaphoreProperties; + }; + using PhysicalDeviceTimelineSemaphorePropertiesKHR = PhysicalDeviceTimelineSemaphoreProperties; + + struct PhysicalDeviceTransformFeedbackFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTransformFeedbackFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTransformFeedbackFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 transformFeedback_ = {}, VULKAN_HPP_NAMESPACE::Bool32 geometryStreams_ = {}) VULKAN_HPP_NOEXCEPT + : transformFeedback( transformFeedback_ ), geometryStreams( geometryStreams_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTransformFeedbackFeaturesEXT( PhysicalDeviceTransformFeedbackFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTransformFeedbackFeaturesEXT( VkPhysicalDeviceTransformFeedbackFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTransformFeedbackFeaturesEXT & operator=( VkPhysicalDeviceTransformFeedbackFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTransformFeedbackFeaturesEXT & operator=( PhysicalDeviceTransformFeedbackFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTransformFeedbackFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceTransformFeedbackFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceTransformFeedbackFeaturesEXT & setTransformFeedback( VULKAN_HPP_NAMESPACE::Bool32 transformFeedback_ ) VULKAN_HPP_NOEXCEPT + { + transformFeedback = transformFeedback_; + return *this; + } + + PhysicalDeviceTransformFeedbackFeaturesEXT & setGeometryStreams( VULKAN_HPP_NAMESPACE::Bool32 geometryStreams_ ) VULKAN_HPP_NOEXCEPT + { + geometryStreams = geometryStreams_; + return *this; + } + + + operator VkPhysicalDeviceTransformFeedbackFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTransformFeedbackFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTransformFeedbackFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceTransformFeedbackFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( transformFeedback == rhs.transformFeedback ) + && ( geometryStreams == rhs.geometryStreams ); + } + + bool operator!=( PhysicalDeviceTransformFeedbackFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTransformFeedbackFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 transformFeedback = {}; + VULKAN_HPP_NAMESPACE::Bool32 geometryStreams = {}; + + }; + static_assert( sizeof( PhysicalDeviceTransformFeedbackFeaturesEXT ) == sizeof( VkPhysicalDeviceTransformFeedbackFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTransformFeedbackFeaturesEXT; + }; + + struct PhysicalDeviceTransformFeedbackPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceTransformFeedbackPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceTransformFeedbackPropertiesEXT(uint32_t maxTransformFeedbackStreams_ = {}, uint32_t maxTransformFeedbackBuffers_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxTransformFeedbackBufferSize_ = {}, uint32_t maxTransformFeedbackStreamDataSize_ = {}, uint32_t maxTransformFeedbackBufferDataSize_ = {}, uint32_t maxTransformFeedbackBufferDataStride_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackQueries_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackStreamsLinesTriangles_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackRasterizationStreamSelect_ = {}, VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackDraw_ = {}) VULKAN_HPP_NOEXCEPT + : maxTransformFeedbackStreams( maxTransformFeedbackStreams_ ), maxTransformFeedbackBuffers( maxTransformFeedbackBuffers_ ), maxTransformFeedbackBufferSize( maxTransformFeedbackBufferSize_ ), maxTransformFeedbackStreamDataSize( maxTransformFeedbackStreamDataSize_ ), maxTransformFeedbackBufferDataSize( maxTransformFeedbackBufferDataSize_ ), maxTransformFeedbackBufferDataStride( maxTransformFeedbackBufferDataStride_ ), transformFeedbackQueries( transformFeedbackQueries_ ), transformFeedbackStreamsLinesTriangles( transformFeedbackStreamsLinesTriangles_ ), transformFeedbackRasterizationStreamSelect( transformFeedbackRasterizationStreamSelect_ ), transformFeedbackDraw( transformFeedbackDraw_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceTransformFeedbackPropertiesEXT( PhysicalDeviceTransformFeedbackPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceTransformFeedbackPropertiesEXT( VkPhysicalDeviceTransformFeedbackPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceTransformFeedbackPropertiesEXT & operator=( VkPhysicalDeviceTransformFeedbackPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceTransformFeedbackPropertiesEXT & operator=( PhysicalDeviceTransformFeedbackPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceTransformFeedbackPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceTransformFeedbackPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceTransformFeedbackPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceTransformFeedbackPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceTransformFeedbackPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxTransformFeedbackStreams == rhs.maxTransformFeedbackStreams ) + && ( maxTransformFeedbackBuffers == rhs.maxTransformFeedbackBuffers ) + && ( maxTransformFeedbackBufferSize == rhs.maxTransformFeedbackBufferSize ) + && ( maxTransformFeedbackStreamDataSize == rhs.maxTransformFeedbackStreamDataSize ) + && ( maxTransformFeedbackBufferDataSize == rhs.maxTransformFeedbackBufferDataSize ) + && ( maxTransformFeedbackBufferDataStride == rhs.maxTransformFeedbackBufferDataStride ) + && ( transformFeedbackQueries == rhs.transformFeedbackQueries ) + && ( transformFeedbackStreamsLinesTriangles == rhs.transformFeedbackStreamsLinesTriangles ) + && ( transformFeedbackRasterizationStreamSelect == rhs.transformFeedbackRasterizationStreamSelect ) + && ( transformFeedbackDraw == rhs.transformFeedbackDraw ); + } + + bool operator!=( PhysicalDeviceTransformFeedbackPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceTransformFeedbackPropertiesEXT; + void* pNext = {}; + uint32_t maxTransformFeedbackStreams = {}; + uint32_t maxTransformFeedbackBuffers = {}; + VULKAN_HPP_NAMESPACE::DeviceSize maxTransformFeedbackBufferSize = {}; + uint32_t maxTransformFeedbackStreamDataSize = {}; + uint32_t maxTransformFeedbackBufferDataSize = {}; + uint32_t maxTransformFeedbackBufferDataStride = {}; + VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackQueries = {}; + VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackStreamsLinesTriangles = {}; + VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackRasterizationStreamSelect = {}; + VULKAN_HPP_NAMESPACE::Bool32 transformFeedbackDraw = {}; + + }; + static_assert( sizeof( PhysicalDeviceTransformFeedbackPropertiesEXT ) == sizeof( VkPhysicalDeviceTransformFeedbackPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceTransformFeedbackPropertiesEXT; + }; + + struct PhysicalDeviceUniformBufferStandardLayoutFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceUniformBufferStandardLayoutFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceUniformBufferStandardLayoutFeatures(VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout_ = {}) VULKAN_HPP_NOEXCEPT + : uniformBufferStandardLayout( uniformBufferStandardLayout_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceUniformBufferStandardLayoutFeatures( PhysicalDeviceUniformBufferStandardLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceUniformBufferStandardLayoutFeatures( VkPhysicalDeviceUniformBufferStandardLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceUniformBufferStandardLayoutFeatures & operator=( VkPhysicalDeviceUniformBufferStandardLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceUniformBufferStandardLayoutFeatures & operator=( PhysicalDeviceUniformBufferStandardLayoutFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceUniformBufferStandardLayoutFeatures ) ); + return *this; + } + + PhysicalDeviceUniformBufferStandardLayoutFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceUniformBufferStandardLayoutFeatures & setUniformBufferStandardLayout( VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout_ ) VULKAN_HPP_NOEXCEPT + { + uniformBufferStandardLayout = uniformBufferStandardLayout_; + return *this; + } + + + operator VkPhysicalDeviceUniformBufferStandardLayoutFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceUniformBufferStandardLayoutFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceUniformBufferStandardLayoutFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceUniformBufferStandardLayoutFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( uniformBufferStandardLayout == rhs.uniformBufferStandardLayout ); + } + + bool operator!=( PhysicalDeviceUniformBufferStandardLayoutFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceUniformBufferStandardLayoutFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout = {}; + + }; + static_assert( sizeof( PhysicalDeviceUniformBufferStandardLayoutFeatures ) == sizeof( VkPhysicalDeviceUniformBufferStandardLayoutFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceUniformBufferStandardLayoutFeatures; + }; + using PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR = PhysicalDeviceUniformBufferStandardLayoutFeatures; + + struct PhysicalDeviceVariablePointersFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVariablePointersFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVariablePointersFeatures(VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variablePointers_ = {}) VULKAN_HPP_NOEXCEPT + : variablePointersStorageBuffer( variablePointersStorageBuffer_ ), variablePointers( variablePointers_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVariablePointersFeatures( PhysicalDeviceVariablePointersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVariablePointersFeatures( VkPhysicalDeviceVariablePointersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVariablePointersFeatures & operator=( VkPhysicalDeviceVariablePointersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVariablePointersFeatures & operator=( PhysicalDeviceVariablePointersFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVariablePointersFeatures ) ); + return *this; + } + + PhysicalDeviceVariablePointersFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVariablePointersFeatures & setVariablePointersStorageBuffer( VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer_ ) VULKAN_HPP_NOEXCEPT + { + variablePointersStorageBuffer = variablePointersStorageBuffer_; + return *this; + } + + PhysicalDeviceVariablePointersFeatures & setVariablePointers( VULKAN_HPP_NAMESPACE::Bool32 variablePointers_ ) VULKAN_HPP_NOEXCEPT + { + variablePointers = variablePointers_; + return *this; + } + + + operator VkPhysicalDeviceVariablePointersFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVariablePointersFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVariablePointersFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceVariablePointersFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( variablePointersStorageBuffer == rhs.variablePointersStorageBuffer ) + && ( variablePointers == rhs.variablePointers ); + } + + bool operator!=( PhysicalDeviceVariablePointersFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVariablePointersFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer = {}; + VULKAN_HPP_NAMESPACE::Bool32 variablePointers = {}; + + }; + static_assert( sizeof( PhysicalDeviceVariablePointersFeatures ) == sizeof( VkPhysicalDeviceVariablePointersFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVariablePointersFeatures; + }; + using PhysicalDeviceVariablePointerFeatures = PhysicalDeviceVariablePointersFeatures; + using PhysicalDeviceVariablePointerFeaturesKHR = PhysicalDeviceVariablePointersFeatures; + using PhysicalDeviceVariablePointersFeaturesKHR = PhysicalDeviceVariablePointersFeatures; + + struct PhysicalDeviceVertexAttributeDivisorFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ = {}) VULKAN_HPP_NOEXCEPT + : vertexAttributeInstanceRateDivisor( vertexAttributeInstanceRateDivisor_ ), vertexAttributeInstanceRateZeroDivisor( vertexAttributeInstanceRateZeroDivisor_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorFeaturesEXT( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT & operator=( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT & operator=( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT & setVertexAttributeInstanceRateDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeInstanceRateDivisor = vertexAttributeInstanceRateDivisor_; + return *this; + } + + PhysicalDeviceVertexAttributeDivisorFeaturesEXT & setVertexAttributeInstanceRateZeroDivisor( VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor_ ) VULKAN_HPP_NOEXCEPT + { + vertexAttributeInstanceRateZeroDivisor = vertexAttributeInstanceRateZeroDivisor_; + return *this; + } + + + operator VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexAttributeInstanceRateDivisor == rhs.vertexAttributeInstanceRateDivisor ) + && ( vertexAttributeInstanceRateZeroDivisor == rhs.vertexAttributeInstanceRateZeroDivisor ); + } + + bool operator!=( PhysicalDeviceVertexAttributeDivisorFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateDivisor = {}; + VULKAN_HPP_NAMESPACE::Bool32 vertexAttributeInstanceRateZeroDivisor = {}; + + }; + static_assert( sizeof( PhysicalDeviceVertexAttributeDivisorFeaturesEXT ) == sizeof( VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVertexAttributeDivisorFeaturesEXT; + }; + + struct PhysicalDeviceVertexAttributeDivisorPropertiesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorPropertiesEXT(uint32_t maxVertexAttribDivisor_ = {}) VULKAN_HPP_NOEXCEPT + : maxVertexAttribDivisor( maxVertexAttribDivisor_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVertexAttributeDivisorPropertiesEXT( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT & operator=( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVertexAttributeDivisorPropertiesEXT & operator=( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVertexAttributeDivisorPropertiesEXT ) ); + return *this; + } + + + operator VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( maxVertexAttribDivisor == rhs.maxVertexAttribDivisor ); + } + + bool operator!=( PhysicalDeviceVertexAttributeDivisorPropertiesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVertexAttributeDivisorPropertiesEXT; + void* pNext = {}; + uint32_t maxVertexAttribDivisor = {}; + + }; + static_assert( sizeof( PhysicalDeviceVertexAttributeDivisorPropertiesEXT ) == sizeof( VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVertexAttributeDivisorPropertiesEXT; + }; + + struct PhysicalDeviceVulkan11Features + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan11Features; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan11Features(VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiview_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 variablePointers_ = {}, VULKAN_HPP_NAMESPACE::Bool32 protectedMemory_ = {}, VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ = {}) VULKAN_HPP_NOEXCEPT + : storageBuffer16BitAccess( storageBuffer16BitAccess_ ), uniformAndStorageBuffer16BitAccess( uniformAndStorageBuffer16BitAccess_ ), storagePushConstant16( storagePushConstant16_ ), storageInputOutput16( storageInputOutput16_ ), multiview( multiview_ ), multiviewGeometryShader( multiviewGeometryShader_ ), multiviewTessellationShader( multiviewTessellationShader_ ), variablePointersStorageBuffer( variablePointersStorageBuffer_ ), variablePointers( variablePointers_ ), protectedMemory( protectedMemory_ ), samplerYcbcrConversion( samplerYcbcrConversion_ ), shaderDrawParameters( shaderDrawParameters_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan11Features( PhysicalDeviceVulkan11Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan11Features( VkPhysicalDeviceVulkan11Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVulkan11Features & operator=( VkPhysicalDeviceVulkan11Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVulkan11Features & operator=( PhysicalDeviceVulkan11Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVulkan11Features ) ); + return *this; + } + + PhysicalDeviceVulkan11Features & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVulkan11Features & setStorageBuffer16BitAccess( VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + storageBuffer16BitAccess = storageBuffer16BitAccess_; + return *this; + } + + PhysicalDeviceVulkan11Features & setUniformAndStorageBuffer16BitAccess( VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + uniformAndStorageBuffer16BitAccess = uniformAndStorageBuffer16BitAccess_; + return *this; + } + + PhysicalDeviceVulkan11Features & setStoragePushConstant16( VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16_ ) VULKAN_HPP_NOEXCEPT + { + storagePushConstant16 = storagePushConstant16_; + return *this; + } + + PhysicalDeviceVulkan11Features & setStorageInputOutput16( VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16_ ) VULKAN_HPP_NOEXCEPT + { + storageInputOutput16 = storageInputOutput16_; + return *this; + } + + PhysicalDeviceVulkan11Features & setMultiview( VULKAN_HPP_NAMESPACE::Bool32 multiview_ ) VULKAN_HPP_NOEXCEPT + { + multiview = multiview_; + return *this; + } + + PhysicalDeviceVulkan11Features & setMultiviewGeometryShader( VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader_ ) VULKAN_HPP_NOEXCEPT + { + multiviewGeometryShader = multiviewGeometryShader_; + return *this; + } + + PhysicalDeviceVulkan11Features & setMultiviewTessellationShader( VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader_ ) VULKAN_HPP_NOEXCEPT + { + multiviewTessellationShader = multiviewTessellationShader_; + return *this; + } + + PhysicalDeviceVulkan11Features & setVariablePointersStorageBuffer( VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer_ ) VULKAN_HPP_NOEXCEPT + { + variablePointersStorageBuffer = variablePointersStorageBuffer_; + return *this; + } + + PhysicalDeviceVulkan11Features & setVariablePointers( VULKAN_HPP_NAMESPACE::Bool32 variablePointers_ ) VULKAN_HPP_NOEXCEPT + { + variablePointers = variablePointers_; + return *this; + } + + PhysicalDeviceVulkan11Features & setProtectedMemory( VULKAN_HPP_NAMESPACE::Bool32 protectedMemory_ ) VULKAN_HPP_NOEXCEPT + { + protectedMemory = protectedMemory_; + return *this; + } + + PhysicalDeviceVulkan11Features & setSamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion_ ) VULKAN_HPP_NOEXCEPT + { + samplerYcbcrConversion = samplerYcbcrConversion_; + return *this; + } + + PhysicalDeviceVulkan11Features & setShaderDrawParameters( VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters_ ) VULKAN_HPP_NOEXCEPT + { + shaderDrawParameters = shaderDrawParameters_; + return *this; + } + + + operator VkPhysicalDeviceVulkan11Features const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan11Features &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVulkan11Features const& ) const = default; +#else + bool operator==( PhysicalDeviceVulkan11Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( storageBuffer16BitAccess == rhs.storageBuffer16BitAccess ) + && ( uniformAndStorageBuffer16BitAccess == rhs.uniformAndStorageBuffer16BitAccess ) + && ( storagePushConstant16 == rhs.storagePushConstant16 ) + && ( storageInputOutput16 == rhs.storageInputOutput16 ) + && ( multiview == rhs.multiview ) + && ( multiviewGeometryShader == rhs.multiviewGeometryShader ) + && ( multiviewTessellationShader == rhs.multiviewTessellationShader ) + && ( variablePointersStorageBuffer == rhs.variablePointersStorageBuffer ) + && ( variablePointers == rhs.variablePointers ) + && ( protectedMemory == rhs.protectedMemory ) + && ( samplerYcbcrConversion == rhs.samplerYcbcrConversion ) + && ( shaderDrawParameters == rhs.shaderDrawParameters ); + } + + bool operator!=( PhysicalDeviceVulkan11Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan11Features; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageBuffer16BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer16BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageInputOutput16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiview = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiviewGeometryShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 multiviewTessellationShader = {}; + VULKAN_HPP_NAMESPACE::Bool32 variablePointersStorageBuffer = {}; + VULKAN_HPP_NAMESPACE::Bool32 variablePointers = {}; + VULKAN_HPP_NAMESPACE::Bool32 protectedMemory = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerYcbcrConversion = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDrawParameters = {}; + + }; + static_assert( sizeof( PhysicalDeviceVulkan11Features ) == sizeof( VkPhysicalDeviceVulkan11Features ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan11Features; + }; + + struct PhysicalDeviceVulkan11Properties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan11Properties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan11Properties(std::array const& deviceUUID_ = {}, std::array const& driverUUID_ = {}, std::array const& deviceLUID_ = {}, uint32_t deviceNodeMask_ = {}, VULKAN_HPP_NAMESPACE::Bool32 deviceLUIDValid_ = {}, uint32_t subgroupSize_ = {}, VULKAN_HPP_NAMESPACE::ShaderStageFlags subgroupSupportedStages_ = {}, VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags subgroupSupportedOperations_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subgroupQuadOperationsInAllStages_ = {}, VULKAN_HPP_NAMESPACE::PointClippingBehavior pointClippingBehavior_ = VULKAN_HPP_NAMESPACE::PointClippingBehavior::eAllClipPlanes, uint32_t maxMultiviewViewCount_ = {}, uint32_t maxMultiviewInstanceIndex_ = {}, VULKAN_HPP_NAMESPACE::Bool32 protectedNoFault_ = {}, uint32_t maxPerSetDescriptors_ = {}, VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize_ = {}) VULKAN_HPP_NOEXCEPT + : deviceUUID( deviceUUID_ ), driverUUID( driverUUID_ ), deviceLUID( deviceLUID_ ), deviceNodeMask( deviceNodeMask_ ), deviceLUIDValid( deviceLUIDValid_ ), subgroupSize( subgroupSize_ ), subgroupSupportedStages( subgroupSupportedStages_ ), subgroupSupportedOperations( subgroupSupportedOperations_ ), subgroupQuadOperationsInAllStages( subgroupQuadOperationsInAllStages_ ), pointClippingBehavior( pointClippingBehavior_ ), maxMultiviewViewCount( maxMultiviewViewCount_ ), maxMultiviewInstanceIndex( maxMultiviewInstanceIndex_ ), protectedNoFault( protectedNoFault_ ), maxPerSetDescriptors( maxPerSetDescriptors_ ), maxMemoryAllocationSize( maxMemoryAllocationSize_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan11Properties( PhysicalDeviceVulkan11Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan11Properties( VkPhysicalDeviceVulkan11Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVulkan11Properties & operator=( VkPhysicalDeviceVulkan11Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVulkan11Properties & operator=( PhysicalDeviceVulkan11Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVulkan11Properties ) ); + return *this; + } + + + operator VkPhysicalDeviceVulkan11Properties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan11Properties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVulkan11Properties const& ) const = default; +#else + bool operator==( PhysicalDeviceVulkan11Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( deviceUUID == rhs.deviceUUID ) + && ( driverUUID == rhs.driverUUID ) + && ( deviceLUID == rhs.deviceLUID ) + && ( deviceNodeMask == rhs.deviceNodeMask ) + && ( deviceLUIDValid == rhs.deviceLUIDValid ) + && ( subgroupSize == rhs.subgroupSize ) + && ( subgroupSupportedStages == rhs.subgroupSupportedStages ) + && ( subgroupSupportedOperations == rhs.subgroupSupportedOperations ) + && ( subgroupQuadOperationsInAllStages == rhs.subgroupQuadOperationsInAllStages ) + && ( pointClippingBehavior == rhs.pointClippingBehavior ) + && ( maxMultiviewViewCount == rhs.maxMultiviewViewCount ) + && ( maxMultiviewInstanceIndex == rhs.maxMultiviewInstanceIndex ) + && ( protectedNoFault == rhs.protectedNoFault ) + && ( maxPerSetDescriptors == rhs.maxPerSetDescriptors ) + && ( maxMemoryAllocationSize == rhs.maxMemoryAllocationSize ); + } + + bool operator!=( PhysicalDeviceVulkan11Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan11Properties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceUUID = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverUUID = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D deviceLUID = {}; + uint32_t deviceNodeMask = {}; + VULKAN_HPP_NAMESPACE::Bool32 deviceLUIDValid = {}; + uint32_t subgroupSize = {}; + VULKAN_HPP_NAMESPACE::ShaderStageFlags subgroupSupportedStages = {}; + VULKAN_HPP_NAMESPACE::SubgroupFeatureFlags subgroupSupportedOperations = {}; + VULKAN_HPP_NAMESPACE::Bool32 subgroupQuadOperationsInAllStages = {}; + VULKAN_HPP_NAMESPACE::PointClippingBehavior pointClippingBehavior = VULKAN_HPP_NAMESPACE::PointClippingBehavior::eAllClipPlanes; + uint32_t maxMultiviewViewCount = {}; + uint32_t maxMultiviewInstanceIndex = {}; + VULKAN_HPP_NAMESPACE::Bool32 protectedNoFault = {}; + uint32_t maxPerSetDescriptors = {}; + VULKAN_HPP_NAMESPACE::DeviceSize maxMemoryAllocationSize = {}; + + }; + static_assert( sizeof( PhysicalDeviceVulkan11Properties ) == sizeof( VkPhysicalDeviceVulkan11Properties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan11Properties; + }; + + struct PhysicalDeviceVulkan12Features + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan12Features; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan12Features(VULKAN_HPP_NAMESPACE::Bool32 samplerMirrorClampToEdge_ = {}, VULKAN_HPP_NAMESPACE::Bool32 drawIndirectCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess_ = {}, VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInt8_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound_ = {}, VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount_ = {}, VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray_ = {}, VULKAN_HPP_NAMESPACE::Bool32 samplerFilterMinmax_ = {}, VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout_ = {}, VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts_ = {}, VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset_ = {}, VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ = {}, VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderOutputViewportIndex_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderOutputLayer_ = {}, VULKAN_HPP_NAMESPACE::Bool32 subgroupBroadcastDynamicId_ = {}) VULKAN_HPP_NOEXCEPT + : samplerMirrorClampToEdge( samplerMirrorClampToEdge_ ), drawIndirectCount( drawIndirectCount_ ), storageBuffer8BitAccess( storageBuffer8BitAccess_ ), uniformAndStorageBuffer8BitAccess( uniformAndStorageBuffer8BitAccess_ ), storagePushConstant8( storagePushConstant8_ ), shaderBufferInt64Atomics( shaderBufferInt64Atomics_ ), shaderSharedInt64Atomics( shaderSharedInt64Atomics_ ), shaderFloat16( shaderFloat16_ ), shaderInt8( shaderInt8_ ), descriptorIndexing( descriptorIndexing_ ), shaderInputAttachmentArrayDynamicIndexing( shaderInputAttachmentArrayDynamicIndexing_ ), shaderUniformTexelBufferArrayDynamicIndexing( shaderUniformTexelBufferArrayDynamicIndexing_ ), shaderStorageTexelBufferArrayDynamicIndexing( shaderStorageTexelBufferArrayDynamicIndexing_ ), shaderUniformBufferArrayNonUniformIndexing( shaderUniformBufferArrayNonUniformIndexing_ ), shaderSampledImageArrayNonUniformIndexing( shaderSampledImageArrayNonUniformIndexing_ ), shaderStorageBufferArrayNonUniformIndexing( shaderStorageBufferArrayNonUniformIndexing_ ), shaderStorageImageArrayNonUniformIndexing( shaderStorageImageArrayNonUniformIndexing_ ), shaderInputAttachmentArrayNonUniformIndexing( shaderInputAttachmentArrayNonUniformIndexing_ ), shaderUniformTexelBufferArrayNonUniformIndexing( shaderUniformTexelBufferArrayNonUniformIndexing_ ), shaderStorageTexelBufferArrayNonUniformIndexing( shaderStorageTexelBufferArrayNonUniformIndexing_ ), descriptorBindingUniformBufferUpdateAfterBind( descriptorBindingUniformBufferUpdateAfterBind_ ), descriptorBindingSampledImageUpdateAfterBind( descriptorBindingSampledImageUpdateAfterBind_ ), descriptorBindingStorageImageUpdateAfterBind( descriptorBindingStorageImageUpdateAfterBind_ ), descriptorBindingStorageBufferUpdateAfterBind( descriptorBindingStorageBufferUpdateAfterBind_ ), descriptorBindingUniformTexelBufferUpdateAfterBind( descriptorBindingUniformTexelBufferUpdateAfterBind_ ), descriptorBindingStorageTexelBufferUpdateAfterBind( descriptorBindingStorageTexelBufferUpdateAfterBind_ ), descriptorBindingUpdateUnusedWhilePending( descriptorBindingUpdateUnusedWhilePending_ ), descriptorBindingPartiallyBound( descriptorBindingPartiallyBound_ ), descriptorBindingVariableDescriptorCount( descriptorBindingVariableDescriptorCount_ ), runtimeDescriptorArray( runtimeDescriptorArray_ ), samplerFilterMinmax( samplerFilterMinmax_ ), scalarBlockLayout( scalarBlockLayout_ ), imagelessFramebuffer( imagelessFramebuffer_ ), uniformBufferStandardLayout( uniformBufferStandardLayout_ ), shaderSubgroupExtendedTypes( shaderSubgroupExtendedTypes_ ), separateDepthStencilLayouts( separateDepthStencilLayouts_ ), hostQueryReset( hostQueryReset_ ), timelineSemaphore( timelineSemaphore_ ), bufferDeviceAddress( bufferDeviceAddress_ ), bufferDeviceAddressCaptureReplay( bufferDeviceAddressCaptureReplay_ ), bufferDeviceAddressMultiDevice( bufferDeviceAddressMultiDevice_ ), vulkanMemoryModel( vulkanMemoryModel_ ), vulkanMemoryModelDeviceScope( vulkanMemoryModelDeviceScope_ ), vulkanMemoryModelAvailabilityVisibilityChains( vulkanMemoryModelAvailabilityVisibilityChains_ ), shaderOutputViewportIndex( shaderOutputViewportIndex_ ), shaderOutputLayer( shaderOutputLayer_ ), subgroupBroadcastDynamicId( subgroupBroadcastDynamicId_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkan12Features( PhysicalDeviceVulkan12Features const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan12Features( VkPhysicalDeviceVulkan12Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVulkan12Features & operator=( VkPhysicalDeviceVulkan12Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVulkan12Features & operator=( PhysicalDeviceVulkan12Features const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVulkan12Features ) ); + return *this; + } + + PhysicalDeviceVulkan12Features & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVulkan12Features & setSamplerMirrorClampToEdge( VULKAN_HPP_NAMESPACE::Bool32 samplerMirrorClampToEdge_ ) VULKAN_HPP_NOEXCEPT + { + samplerMirrorClampToEdge = samplerMirrorClampToEdge_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDrawIndirectCount( VULKAN_HPP_NAMESPACE::Bool32 drawIndirectCount_ ) VULKAN_HPP_NOEXCEPT + { + drawIndirectCount = drawIndirectCount_; + return *this; + } + + PhysicalDeviceVulkan12Features & setStorageBuffer8BitAccess( VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + storageBuffer8BitAccess = storageBuffer8BitAccess_; + return *this; + } + + PhysicalDeviceVulkan12Features & setUniformAndStorageBuffer8BitAccess( VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess_ ) VULKAN_HPP_NOEXCEPT + { + uniformAndStorageBuffer8BitAccess = uniformAndStorageBuffer8BitAccess_; + return *this; + } + + PhysicalDeviceVulkan12Features & setStoragePushConstant8( VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8_ ) VULKAN_HPP_NOEXCEPT + { + storagePushConstant8 = storagePushConstant8_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderBufferInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderBufferInt64Atomics = shaderBufferInt64Atomics_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderSharedInt64Atomics( VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics_ ) VULKAN_HPP_NOEXCEPT + { + shaderSharedInt64Atomics = shaderSharedInt64Atomics_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderFloat16( VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16_ ) VULKAN_HPP_NOEXCEPT + { + shaderFloat16 = shaderFloat16_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderInt8( VULKAN_HPP_NAMESPACE::Bool32 shaderInt8_ ) VULKAN_HPP_NOEXCEPT + { + shaderInt8 = shaderInt8_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorIndexing( VULKAN_HPP_NAMESPACE::Bool32 descriptorIndexing_ ) VULKAN_HPP_NOEXCEPT + { + descriptorIndexing = descriptorIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderInputAttachmentArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderInputAttachmentArrayDynamicIndexing = shaderInputAttachmentArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderUniformTexelBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformTexelBufferArrayDynamicIndexing = shaderUniformTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderStorageTexelBufferArrayDynamicIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageTexelBufferArrayDynamicIndexing = shaderStorageTexelBufferArrayDynamicIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderUniformBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformBufferArrayNonUniformIndexing = shaderUniformBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderSampledImageArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderSampledImageArrayNonUniformIndexing = shaderSampledImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderStorageBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageBufferArrayNonUniformIndexing = shaderStorageBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderStorageImageArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageImageArrayNonUniformIndexing = shaderStorageImageArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderInputAttachmentArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderInputAttachmentArrayNonUniformIndexing = shaderInputAttachmentArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderUniformTexelBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderUniformTexelBufferArrayNonUniformIndexing = shaderUniformTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderStorageTexelBufferArrayNonUniformIndexing( VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing_ ) VULKAN_HPP_NOEXCEPT + { + shaderStorageTexelBufferArrayNonUniformIndexing = shaderStorageTexelBufferArrayNonUniformIndexing_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingUniformBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUniformBufferUpdateAfterBind = descriptorBindingUniformBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingSampledImageUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingSampledImageUpdateAfterBind = descriptorBindingSampledImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingStorageImageUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageImageUpdateAfterBind = descriptorBindingStorageImageUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingStorageBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageBufferUpdateAfterBind = descriptorBindingStorageBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingUniformTexelBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUniformTexelBufferUpdateAfterBind = descriptorBindingUniformTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingStorageTexelBufferUpdateAfterBind( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingStorageTexelBufferUpdateAfterBind = descriptorBindingStorageTexelBufferUpdateAfterBind_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingUpdateUnusedWhilePending( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingUpdateUnusedWhilePending = descriptorBindingUpdateUnusedWhilePending_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingPartiallyBound( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingPartiallyBound = descriptorBindingPartiallyBound_; + return *this; + } + + PhysicalDeviceVulkan12Features & setDescriptorBindingVariableDescriptorCount( VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount_ ) VULKAN_HPP_NOEXCEPT + { + descriptorBindingVariableDescriptorCount = descriptorBindingVariableDescriptorCount_; + return *this; + } + + PhysicalDeviceVulkan12Features & setRuntimeDescriptorArray( VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray_ ) VULKAN_HPP_NOEXCEPT + { + runtimeDescriptorArray = runtimeDescriptorArray_; + return *this; + } + + PhysicalDeviceVulkan12Features & setSamplerFilterMinmax( VULKAN_HPP_NAMESPACE::Bool32 samplerFilterMinmax_ ) VULKAN_HPP_NOEXCEPT + { + samplerFilterMinmax = samplerFilterMinmax_; + return *this; + } + + PhysicalDeviceVulkan12Features & setScalarBlockLayout( VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout_ ) VULKAN_HPP_NOEXCEPT + { + scalarBlockLayout = scalarBlockLayout_; + return *this; + } + + PhysicalDeviceVulkan12Features & setImagelessFramebuffer( VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer_ ) VULKAN_HPP_NOEXCEPT + { + imagelessFramebuffer = imagelessFramebuffer_; + return *this; + } + + PhysicalDeviceVulkan12Features & setUniformBufferStandardLayout( VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout_ ) VULKAN_HPP_NOEXCEPT + { + uniformBufferStandardLayout = uniformBufferStandardLayout_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderSubgroupExtendedTypes( VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes_ ) VULKAN_HPP_NOEXCEPT + { + shaderSubgroupExtendedTypes = shaderSubgroupExtendedTypes_; + return *this; + } + + PhysicalDeviceVulkan12Features & setSeparateDepthStencilLayouts( VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts_ ) VULKAN_HPP_NOEXCEPT + { + separateDepthStencilLayouts = separateDepthStencilLayouts_; + return *this; + } + + PhysicalDeviceVulkan12Features & setHostQueryReset( VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset_ ) VULKAN_HPP_NOEXCEPT + { + hostQueryReset = hostQueryReset_; + return *this; + } + + PhysicalDeviceVulkan12Features & setTimelineSemaphore( VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore_ ) VULKAN_HPP_NOEXCEPT + { + timelineSemaphore = timelineSemaphore_; + return *this; + } + + PhysicalDeviceVulkan12Features & setBufferDeviceAddress( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddress = bufferDeviceAddress_; + return *this; + } + + PhysicalDeviceVulkan12Features & setBufferDeviceAddressCaptureReplay( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressCaptureReplay = bufferDeviceAddressCaptureReplay_; + return *this; + } + + PhysicalDeviceVulkan12Features & setBufferDeviceAddressMultiDevice( VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice_ ) VULKAN_HPP_NOEXCEPT + { + bufferDeviceAddressMultiDevice = bufferDeviceAddressMultiDevice_; + return *this; + } + + PhysicalDeviceVulkan12Features & setVulkanMemoryModel( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModel = vulkanMemoryModel_; + return *this; + } + + PhysicalDeviceVulkan12Features & setVulkanMemoryModelDeviceScope( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModelDeviceScope = vulkanMemoryModelDeviceScope_; + return *this; + } + + PhysicalDeviceVulkan12Features & setVulkanMemoryModelAvailabilityVisibilityChains( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModelAvailabilityVisibilityChains = vulkanMemoryModelAvailabilityVisibilityChains_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderOutputViewportIndex( VULKAN_HPP_NAMESPACE::Bool32 shaderOutputViewportIndex_ ) VULKAN_HPP_NOEXCEPT + { + shaderOutputViewportIndex = shaderOutputViewportIndex_; + return *this; + } + + PhysicalDeviceVulkan12Features & setShaderOutputLayer( VULKAN_HPP_NAMESPACE::Bool32 shaderOutputLayer_ ) VULKAN_HPP_NOEXCEPT + { + shaderOutputLayer = shaderOutputLayer_; + return *this; + } + + PhysicalDeviceVulkan12Features & setSubgroupBroadcastDynamicId( VULKAN_HPP_NAMESPACE::Bool32 subgroupBroadcastDynamicId_ ) VULKAN_HPP_NOEXCEPT + { + subgroupBroadcastDynamicId = subgroupBroadcastDynamicId_; + return *this; + } + + + operator VkPhysicalDeviceVulkan12Features const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan12Features &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVulkan12Features const& ) const = default; +#else + bool operator==( PhysicalDeviceVulkan12Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( samplerMirrorClampToEdge == rhs.samplerMirrorClampToEdge ) + && ( drawIndirectCount == rhs.drawIndirectCount ) + && ( storageBuffer8BitAccess == rhs.storageBuffer8BitAccess ) + && ( uniformAndStorageBuffer8BitAccess == rhs.uniformAndStorageBuffer8BitAccess ) + && ( storagePushConstant8 == rhs.storagePushConstant8 ) + && ( shaderBufferInt64Atomics == rhs.shaderBufferInt64Atomics ) + && ( shaderSharedInt64Atomics == rhs.shaderSharedInt64Atomics ) + && ( shaderFloat16 == rhs.shaderFloat16 ) + && ( shaderInt8 == rhs.shaderInt8 ) + && ( descriptorIndexing == rhs.descriptorIndexing ) + && ( shaderInputAttachmentArrayDynamicIndexing == rhs.shaderInputAttachmentArrayDynamicIndexing ) + && ( shaderUniformTexelBufferArrayDynamicIndexing == rhs.shaderUniformTexelBufferArrayDynamicIndexing ) + && ( shaderStorageTexelBufferArrayDynamicIndexing == rhs.shaderStorageTexelBufferArrayDynamicIndexing ) + && ( shaderUniformBufferArrayNonUniformIndexing == rhs.shaderUniformBufferArrayNonUniformIndexing ) + && ( shaderSampledImageArrayNonUniformIndexing == rhs.shaderSampledImageArrayNonUniformIndexing ) + && ( shaderStorageBufferArrayNonUniformIndexing == rhs.shaderStorageBufferArrayNonUniformIndexing ) + && ( shaderStorageImageArrayNonUniformIndexing == rhs.shaderStorageImageArrayNonUniformIndexing ) + && ( shaderInputAttachmentArrayNonUniformIndexing == rhs.shaderInputAttachmentArrayNonUniformIndexing ) + && ( shaderUniformTexelBufferArrayNonUniformIndexing == rhs.shaderUniformTexelBufferArrayNonUniformIndexing ) + && ( shaderStorageTexelBufferArrayNonUniformIndexing == rhs.shaderStorageTexelBufferArrayNonUniformIndexing ) + && ( descriptorBindingUniformBufferUpdateAfterBind == rhs.descriptorBindingUniformBufferUpdateAfterBind ) + && ( descriptorBindingSampledImageUpdateAfterBind == rhs.descriptorBindingSampledImageUpdateAfterBind ) + && ( descriptorBindingStorageImageUpdateAfterBind == rhs.descriptorBindingStorageImageUpdateAfterBind ) + && ( descriptorBindingStorageBufferUpdateAfterBind == rhs.descriptorBindingStorageBufferUpdateAfterBind ) + && ( descriptorBindingUniformTexelBufferUpdateAfterBind == rhs.descriptorBindingUniformTexelBufferUpdateAfterBind ) + && ( descriptorBindingStorageTexelBufferUpdateAfterBind == rhs.descriptorBindingStorageTexelBufferUpdateAfterBind ) + && ( descriptorBindingUpdateUnusedWhilePending == rhs.descriptorBindingUpdateUnusedWhilePending ) + && ( descriptorBindingPartiallyBound == rhs.descriptorBindingPartiallyBound ) + && ( descriptorBindingVariableDescriptorCount == rhs.descriptorBindingVariableDescriptorCount ) + && ( runtimeDescriptorArray == rhs.runtimeDescriptorArray ) + && ( samplerFilterMinmax == rhs.samplerFilterMinmax ) + && ( scalarBlockLayout == rhs.scalarBlockLayout ) + && ( imagelessFramebuffer == rhs.imagelessFramebuffer ) + && ( uniformBufferStandardLayout == rhs.uniformBufferStandardLayout ) + && ( shaderSubgroupExtendedTypes == rhs.shaderSubgroupExtendedTypes ) + && ( separateDepthStencilLayouts == rhs.separateDepthStencilLayouts ) + && ( hostQueryReset == rhs.hostQueryReset ) + && ( timelineSemaphore == rhs.timelineSemaphore ) + && ( bufferDeviceAddress == rhs.bufferDeviceAddress ) + && ( bufferDeviceAddressCaptureReplay == rhs.bufferDeviceAddressCaptureReplay ) + && ( bufferDeviceAddressMultiDevice == rhs.bufferDeviceAddressMultiDevice ) + && ( vulkanMemoryModel == rhs.vulkanMemoryModel ) + && ( vulkanMemoryModelDeviceScope == rhs.vulkanMemoryModelDeviceScope ) + && ( vulkanMemoryModelAvailabilityVisibilityChains == rhs.vulkanMemoryModelAvailabilityVisibilityChains ) + && ( shaderOutputViewportIndex == rhs.shaderOutputViewportIndex ) + && ( shaderOutputLayer == rhs.shaderOutputLayer ) + && ( subgroupBroadcastDynamicId == rhs.subgroupBroadcastDynamicId ); + } + + bool operator!=( PhysicalDeviceVulkan12Features const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan12Features; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerMirrorClampToEdge = {}; + VULKAN_HPP_NAMESPACE::Bool32 drawIndirectCount = {}; + VULKAN_HPP_NAMESPACE::Bool32 storageBuffer8BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformAndStorageBuffer8BitAccess = {}; + VULKAN_HPP_NAMESPACE::Bool32 storagePushConstant8 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderBufferInt64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSharedInt64Atomics = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInt8 = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayDynamicIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformTexelBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageTexelBufferArrayNonUniformIndexing = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingSampledImageUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageImageUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUniformTexelBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingStorageTexelBufferUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingUpdateUnusedWhilePending = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingPartiallyBound = {}; + VULKAN_HPP_NAMESPACE::Bool32 descriptorBindingVariableDescriptorCount = {}; + VULKAN_HPP_NAMESPACE::Bool32 runtimeDescriptorArray = {}; + VULKAN_HPP_NAMESPACE::Bool32 samplerFilterMinmax = {}; + VULKAN_HPP_NAMESPACE::Bool32 scalarBlockLayout = {}; + VULKAN_HPP_NAMESPACE::Bool32 imagelessFramebuffer = {}; + VULKAN_HPP_NAMESPACE::Bool32 uniformBufferStandardLayout = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSubgroupExtendedTypes = {}; + VULKAN_HPP_NAMESPACE::Bool32 separateDepthStencilLayouts = {}; + VULKAN_HPP_NAMESPACE::Bool32 hostQueryReset = {}; + VULKAN_HPP_NAMESPACE::Bool32 timelineSemaphore = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddress = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressCaptureReplay = {}; + VULKAN_HPP_NAMESPACE::Bool32 bufferDeviceAddressMultiDevice = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderOutputViewportIndex = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderOutputLayer = {}; + VULKAN_HPP_NAMESPACE::Bool32 subgroupBroadcastDynamicId = {}; + + }; + static_assert( sizeof( PhysicalDeviceVulkan12Features ) == sizeof( VkPhysicalDeviceVulkan12Features ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan12Features; + }; + + struct PhysicalDeviceVulkan12Properties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkan12Properties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan12Properties(VULKAN_HPP_NAMESPACE::DriverId driverID_ = VULKAN_HPP_NAMESPACE::DriverId::eAmdProprietary, std::array const& driverName_ = {}, std::array const& driverInfo_ = {}, VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion_ = {}, VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence denormBehaviorIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence roundingModeIndependence_ = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat64_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat16_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64_ = {}, uint32_t maxUpdateAfterBindDescriptorsInAllPools_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexingNative_ = {}, VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccessUpdateAfterBind_ = {}, VULKAN_HPP_NAMESPACE::Bool32 quadDivergentImplicitLod_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindSamplers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages_ = {}, uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments_ = {}, uint32_t maxPerStageUpdateAfterBindResources_ = {}, uint32_t maxDescriptorSetUpdateAfterBindSamplers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ = {}, uint32_t maxDescriptorSetUpdateAfterBindSampledImages_ = {}, uint32_t maxDescriptorSetUpdateAfterBindStorageImages_ = {}, uint32_t maxDescriptorSetUpdateAfterBindInputAttachments_ = {}, VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedDepthResolveModes_ = {}, VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedStencilResolveModes_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone_ = {}, VULKAN_HPP_NAMESPACE::Bool32 independentResolve_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats_ = {}, VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping_ = {}, uint64_t maxTimelineSemaphoreValueDifference_ = {}, VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferIntegerColorSampleCounts_ = {}) VULKAN_HPP_NOEXCEPT + : driverID( driverID_ ), driverName( driverName_ ), driverInfo( driverInfo_ ), conformanceVersion( conformanceVersion_ ), denormBehaviorIndependence( denormBehaviorIndependence_ ), roundingModeIndependence( roundingModeIndependence_ ), shaderSignedZeroInfNanPreserveFloat16( shaderSignedZeroInfNanPreserveFloat16_ ), shaderSignedZeroInfNanPreserveFloat32( shaderSignedZeroInfNanPreserveFloat32_ ), shaderSignedZeroInfNanPreserveFloat64( shaderSignedZeroInfNanPreserveFloat64_ ), shaderDenormPreserveFloat16( shaderDenormPreserveFloat16_ ), shaderDenormPreserveFloat32( shaderDenormPreserveFloat32_ ), shaderDenormPreserveFloat64( shaderDenormPreserveFloat64_ ), shaderDenormFlushToZeroFloat16( shaderDenormFlushToZeroFloat16_ ), shaderDenormFlushToZeroFloat32( shaderDenormFlushToZeroFloat32_ ), shaderDenormFlushToZeroFloat64( shaderDenormFlushToZeroFloat64_ ), shaderRoundingModeRTEFloat16( shaderRoundingModeRTEFloat16_ ), shaderRoundingModeRTEFloat32( shaderRoundingModeRTEFloat32_ ), shaderRoundingModeRTEFloat64( shaderRoundingModeRTEFloat64_ ), shaderRoundingModeRTZFloat16( shaderRoundingModeRTZFloat16_ ), shaderRoundingModeRTZFloat32( shaderRoundingModeRTZFloat32_ ), shaderRoundingModeRTZFloat64( shaderRoundingModeRTZFloat64_ ), maxUpdateAfterBindDescriptorsInAllPools( maxUpdateAfterBindDescriptorsInAllPools_ ), shaderUniformBufferArrayNonUniformIndexingNative( shaderUniformBufferArrayNonUniformIndexingNative_ ), shaderSampledImageArrayNonUniformIndexingNative( shaderSampledImageArrayNonUniformIndexingNative_ ), shaderStorageBufferArrayNonUniformIndexingNative( shaderStorageBufferArrayNonUniformIndexingNative_ ), shaderStorageImageArrayNonUniformIndexingNative( shaderStorageImageArrayNonUniformIndexingNative_ ), shaderInputAttachmentArrayNonUniformIndexingNative( shaderInputAttachmentArrayNonUniformIndexingNative_ ), robustBufferAccessUpdateAfterBind( robustBufferAccessUpdateAfterBind_ ), quadDivergentImplicitLod( quadDivergentImplicitLod_ ), maxPerStageDescriptorUpdateAfterBindSamplers( maxPerStageDescriptorUpdateAfterBindSamplers_ ), maxPerStageDescriptorUpdateAfterBindUniformBuffers( maxPerStageDescriptorUpdateAfterBindUniformBuffers_ ), maxPerStageDescriptorUpdateAfterBindStorageBuffers( maxPerStageDescriptorUpdateAfterBindStorageBuffers_ ), maxPerStageDescriptorUpdateAfterBindSampledImages( maxPerStageDescriptorUpdateAfterBindSampledImages_ ), maxPerStageDescriptorUpdateAfterBindStorageImages( maxPerStageDescriptorUpdateAfterBindStorageImages_ ), maxPerStageDescriptorUpdateAfterBindInputAttachments( maxPerStageDescriptorUpdateAfterBindInputAttachments_ ), maxPerStageUpdateAfterBindResources( maxPerStageUpdateAfterBindResources_ ), maxDescriptorSetUpdateAfterBindSamplers( maxDescriptorSetUpdateAfterBindSamplers_ ), maxDescriptorSetUpdateAfterBindUniformBuffers( maxDescriptorSetUpdateAfterBindUniformBuffers_ ), maxDescriptorSetUpdateAfterBindUniformBuffersDynamic( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic_ ), maxDescriptorSetUpdateAfterBindStorageBuffers( maxDescriptorSetUpdateAfterBindStorageBuffers_ ), maxDescriptorSetUpdateAfterBindStorageBuffersDynamic( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic_ ), maxDescriptorSetUpdateAfterBindSampledImages( maxDescriptorSetUpdateAfterBindSampledImages_ ), maxDescriptorSetUpdateAfterBindStorageImages( maxDescriptorSetUpdateAfterBindStorageImages_ ), maxDescriptorSetUpdateAfterBindInputAttachments( maxDescriptorSetUpdateAfterBindInputAttachments_ ), supportedDepthResolveModes( supportedDepthResolveModes_ ), supportedStencilResolveModes( supportedStencilResolveModes_ ), independentResolveNone( independentResolveNone_ ), independentResolve( independentResolve_ ), filterMinmaxSingleComponentFormats( filterMinmaxSingleComponentFormats_ ), filterMinmaxImageComponentMapping( filterMinmaxImageComponentMapping_ ), maxTimelineSemaphoreValueDifference( maxTimelineSemaphoreValueDifference_ ), framebufferIntegerColorSampleCounts( framebufferIntegerColorSampleCounts_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PhysicalDeviceVulkan12Properties( PhysicalDeviceVulkan12Properties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkan12Properties( VkPhysicalDeviceVulkan12Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVulkan12Properties & operator=( VkPhysicalDeviceVulkan12Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVulkan12Properties & operator=( PhysicalDeviceVulkan12Properties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVulkan12Properties ) ); + return *this; + } + + + operator VkPhysicalDeviceVulkan12Properties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkan12Properties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVulkan12Properties const& ) const = default; +#else + bool operator==( PhysicalDeviceVulkan12Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( driverID == rhs.driverID ) + && ( driverName == rhs.driverName ) + && ( driverInfo == rhs.driverInfo ) + && ( conformanceVersion == rhs.conformanceVersion ) + && ( denormBehaviorIndependence == rhs.denormBehaviorIndependence ) + && ( roundingModeIndependence == rhs.roundingModeIndependence ) + && ( shaderSignedZeroInfNanPreserveFloat16 == rhs.shaderSignedZeroInfNanPreserveFloat16 ) + && ( shaderSignedZeroInfNanPreserveFloat32 == rhs.shaderSignedZeroInfNanPreserveFloat32 ) + && ( shaderSignedZeroInfNanPreserveFloat64 == rhs.shaderSignedZeroInfNanPreserveFloat64 ) + && ( shaderDenormPreserveFloat16 == rhs.shaderDenormPreserveFloat16 ) + && ( shaderDenormPreserveFloat32 == rhs.shaderDenormPreserveFloat32 ) + && ( shaderDenormPreserveFloat64 == rhs.shaderDenormPreserveFloat64 ) + && ( shaderDenormFlushToZeroFloat16 == rhs.shaderDenormFlushToZeroFloat16 ) + && ( shaderDenormFlushToZeroFloat32 == rhs.shaderDenormFlushToZeroFloat32 ) + && ( shaderDenormFlushToZeroFloat64 == rhs.shaderDenormFlushToZeroFloat64 ) + && ( shaderRoundingModeRTEFloat16 == rhs.shaderRoundingModeRTEFloat16 ) + && ( shaderRoundingModeRTEFloat32 == rhs.shaderRoundingModeRTEFloat32 ) + && ( shaderRoundingModeRTEFloat64 == rhs.shaderRoundingModeRTEFloat64 ) + && ( shaderRoundingModeRTZFloat16 == rhs.shaderRoundingModeRTZFloat16 ) + && ( shaderRoundingModeRTZFloat32 == rhs.shaderRoundingModeRTZFloat32 ) + && ( shaderRoundingModeRTZFloat64 == rhs.shaderRoundingModeRTZFloat64 ) + && ( maxUpdateAfterBindDescriptorsInAllPools == rhs.maxUpdateAfterBindDescriptorsInAllPools ) + && ( shaderUniformBufferArrayNonUniformIndexingNative == rhs.shaderUniformBufferArrayNonUniformIndexingNative ) + && ( shaderSampledImageArrayNonUniformIndexingNative == rhs.shaderSampledImageArrayNonUniformIndexingNative ) + && ( shaderStorageBufferArrayNonUniformIndexingNative == rhs.shaderStorageBufferArrayNonUniformIndexingNative ) + && ( shaderStorageImageArrayNonUniformIndexingNative == rhs.shaderStorageImageArrayNonUniformIndexingNative ) + && ( shaderInputAttachmentArrayNonUniformIndexingNative == rhs.shaderInputAttachmentArrayNonUniformIndexingNative ) + && ( robustBufferAccessUpdateAfterBind == rhs.robustBufferAccessUpdateAfterBind ) + && ( quadDivergentImplicitLod == rhs.quadDivergentImplicitLod ) + && ( maxPerStageDescriptorUpdateAfterBindSamplers == rhs.maxPerStageDescriptorUpdateAfterBindSamplers ) + && ( maxPerStageDescriptorUpdateAfterBindUniformBuffers == rhs.maxPerStageDescriptorUpdateAfterBindUniformBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindStorageBuffers == rhs.maxPerStageDescriptorUpdateAfterBindStorageBuffers ) + && ( maxPerStageDescriptorUpdateAfterBindSampledImages == rhs.maxPerStageDescriptorUpdateAfterBindSampledImages ) + && ( maxPerStageDescriptorUpdateAfterBindStorageImages == rhs.maxPerStageDescriptorUpdateAfterBindStorageImages ) + && ( maxPerStageDescriptorUpdateAfterBindInputAttachments == rhs.maxPerStageDescriptorUpdateAfterBindInputAttachments ) + && ( maxPerStageUpdateAfterBindResources == rhs.maxPerStageUpdateAfterBindResources ) + && ( maxDescriptorSetUpdateAfterBindSamplers == rhs.maxDescriptorSetUpdateAfterBindSamplers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffers == rhs.maxDescriptorSetUpdateAfterBindUniformBuffers ) + && ( maxDescriptorSetUpdateAfterBindUniformBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffers == rhs.maxDescriptorSetUpdateAfterBindStorageBuffers ) + && ( maxDescriptorSetUpdateAfterBindStorageBuffersDynamic == rhs.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic ) + && ( maxDescriptorSetUpdateAfterBindSampledImages == rhs.maxDescriptorSetUpdateAfterBindSampledImages ) + && ( maxDescriptorSetUpdateAfterBindStorageImages == rhs.maxDescriptorSetUpdateAfterBindStorageImages ) + && ( maxDescriptorSetUpdateAfterBindInputAttachments == rhs.maxDescriptorSetUpdateAfterBindInputAttachments ) + && ( supportedDepthResolveModes == rhs.supportedDepthResolveModes ) + && ( supportedStencilResolveModes == rhs.supportedStencilResolveModes ) + && ( independentResolveNone == rhs.independentResolveNone ) + && ( independentResolve == rhs.independentResolve ) + && ( filterMinmaxSingleComponentFormats == rhs.filterMinmaxSingleComponentFormats ) + && ( filterMinmaxImageComponentMapping == rhs.filterMinmaxImageComponentMapping ) + && ( maxTimelineSemaphoreValueDifference == rhs.maxTimelineSemaphoreValueDifference ) + && ( framebufferIntegerColorSampleCounts == rhs.framebufferIntegerColorSampleCounts ); + } + + bool operator!=( PhysicalDeviceVulkan12Properties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkan12Properties; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::DriverId driverID = VULKAN_HPP_NAMESPACE::DriverId::eAmdProprietary; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverName = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D driverInfo = {}; + VULKAN_HPP_NAMESPACE::ConformanceVersion conformanceVersion = {}; + VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence denormBehaviorIndependence = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly; + VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence roundingModeIndependence = VULKAN_HPP_NAMESPACE::ShaderFloatControlsIndependence::e32BitOnly; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSignedZeroInfNanPreserveFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormPreserveFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderDenormFlushToZeroFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTEFloat64 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat16 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat32 = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderRoundingModeRTZFloat64 = {}; + uint32_t maxUpdateAfterBindDescriptorsInAllPools = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderUniformBufferArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderSampledImageArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageBufferArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderStorageImageArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 shaderInputAttachmentArrayNonUniformIndexingNative = {}; + VULKAN_HPP_NAMESPACE::Bool32 robustBufferAccessUpdateAfterBind = {}; + VULKAN_HPP_NAMESPACE::Bool32 quadDivergentImplicitLod = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages = {}; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments = {}; + uint32_t maxPerStageUpdateAfterBindResources = {}; + uint32_t maxDescriptorSetUpdateAfterBindSamplers = {}; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers = {}; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = {}; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages = {}; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages = {}; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments = {}; + VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedDepthResolveModes = {}; + VULKAN_HPP_NAMESPACE::ResolveModeFlags supportedStencilResolveModes = {}; + VULKAN_HPP_NAMESPACE::Bool32 independentResolveNone = {}; + VULKAN_HPP_NAMESPACE::Bool32 independentResolve = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxSingleComponentFormats = {}; + VULKAN_HPP_NAMESPACE::Bool32 filterMinmaxImageComponentMapping = {}; + uint64_t maxTimelineSemaphoreValueDifference = {}; + VULKAN_HPP_NAMESPACE::SampleCountFlags framebufferIntegerColorSampleCounts = {}; + + }; + static_assert( sizeof( PhysicalDeviceVulkan12Properties ) == sizeof( VkPhysicalDeviceVulkan12Properties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkan12Properties; + }; + + struct PhysicalDeviceVulkanMemoryModelFeatures + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceVulkanMemoryModelFeatures; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkanMemoryModelFeatures(VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope_ = {}, VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains_ = {}) VULKAN_HPP_NOEXCEPT + : vulkanMemoryModel( vulkanMemoryModel_ ), vulkanMemoryModelDeviceScope( vulkanMemoryModelDeviceScope_ ), vulkanMemoryModelAvailabilityVisibilityChains( vulkanMemoryModelAvailabilityVisibilityChains_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceVulkanMemoryModelFeatures( PhysicalDeviceVulkanMemoryModelFeatures const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceVulkanMemoryModelFeatures( VkPhysicalDeviceVulkanMemoryModelFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceVulkanMemoryModelFeatures & operator=( VkPhysicalDeviceVulkanMemoryModelFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceVulkanMemoryModelFeatures & operator=( PhysicalDeviceVulkanMemoryModelFeatures const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceVulkanMemoryModelFeatures ) ); + return *this; + } + + PhysicalDeviceVulkanMemoryModelFeatures & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceVulkanMemoryModelFeatures & setVulkanMemoryModel( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModel = vulkanMemoryModel_; + return *this; + } + + PhysicalDeviceVulkanMemoryModelFeatures & setVulkanMemoryModelDeviceScope( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModelDeviceScope = vulkanMemoryModelDeviceScope_; + return *this; + } + + PhysicalDeviceVulkanMemoryModelFeatures & setVulkanMemoryModelAvailabilityVisibilityChains( VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains_ ) VULKAN_HPP_NOEXCEPT + { + vulkanMemoryModelAvailabilityVisibilityChains = vulkanMemoryModelAvailabilityVisibilityChains_; + return *this; + } + + + operator VkPhysicalDeviceVulkanMemoryModelFeatures const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceVulkanMemoryModelFeatures &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceVulkanMemoryModelFeatures const& ) const = default; +#else + bool operator==( PhysicalDeviceVulkanMemoryModelFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vulkanMemoryModel == rhs.vulkanMemoryModel ) + && ( vulkanMemoryModelDeviceScope == rhs.vulkanMemoryModelDeviceScope ) + && ( vulkanMemoryModelAvailabilityVisibilityChains == rhs.vulkanMemoryModelAvailabilityVisibilityChains ); + } + + bool operator!=( PhysicalDeviceVulkanMemoryModelFeatures const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceVulkanMemoryModelFeatures; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModel = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelDeviceScope = {}; + VULKAN_HPP_NAMESPACE::Bool32 vulkanMemoryModelAvailabilityVisibilityChains = {}; + + }; + static_assert( sizeof( PhysicalDeviceVulkanMemoryModelFeatures ) == sizeof( VkPhysicalDeviceVulkanMemoryModelFeatures ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceVulkanMemoryModelFeatures; + }; + using PhysicalDeviceVulkanMemoryModelFeaturesKHR = PhysicalDeviceVulkanMemoryModelFeatures; + + struct PhysicalDeviceYcbcrImageArraysFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePhysicalDeviceYcbcrImageArraysFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PhysicalDeviceYcbcrImageArraysFeaturesEXT(VULKAN_HPP_NAMESPACE::Bool32 ycbcrImageArrays_ = {}) VULKAN_HPP_NOEXCEPT + : ycbcrImageArrays( ycbcrImageArrays_ ) + {} + + VULKAN_HPP_CONSTEXPR PhysicalDeviceYcbcrImageArraysFeaturesEXT( PhysicalDeviceYcbcrImageArraysFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PhysicalDeviceYcbcrImageArraysFeaturesEXT( VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PhysicalDeviceYcbcrImageArraysFeaturesEXT & operator=( VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PhysicalDeviceYcbcrImageArraysFeaturesEXT & operator=( PhysicalDeviceYcbcrImageArraysFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PhysicalDeviceYcbcrImageArraysFeaturesEXT ) ); + return *this; + } + + PhysicalDeviceYcbcrImageArraysFeaturesEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PhysicalDeviceYcbcrImageArraysFeaturesEXT & setYcbcrImageArrays( VULKAN_HPP_NAMESPACE::Bool32 ycbcrImageArrays_ ) VULKAN_HPP_NOEXCEPT + { + ycbcrImageArrays = ycbcrImageArrays_; + return *this; + } + + + operator VkPhysicalDeviceYcbcrImageArraysFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPhysicalDeviceYcbcrImageArraysFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PhysicalDeviceYcbcrImageArraysFeaturesEXT const& ) const = default; +#else + bool operator==( PhysicalDeviceYcbcrImageArraysFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( ycbcrImageArrays == rhs.ycbcrImageArrays ); + } + + bool operator!=( PhysicalDeviceYcbcrImageArraysFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePhysicalDeviceYcbcrImageArraysFeaturesEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 ycbcrImageArrays = {}; + + }; + static_assert( sizeof( PhysicalDeviceYcbcrImageArraysFeaturesEXT ) == sizeof( VkPhysicalDeviceYcbcrImageArraysFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PhysicalDeviceYcbcrImageArraysFeaturesEXT; + }; + + struct PipelineColorBlendAdvancedStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineColorBlendAdvancedStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::Bool32 srcPremultiplied_ = {}, VULKAN_HPP_NAMESPACE::Bool32 dstPremultiplied_ = {}, VULKAN_HPP_NAMESPACE::BlendOverlapEXT blendOverlap_ = VULKAN_HPP_NAMESPACE::BlendOverlapEXT::eUncorrelated) VULKAN_HPP_NOEXCEPT + : srcPremultiplied( srcPremultiplied_ ), dstPremultiplied( dstPremultiplied_ ), blendOverlap( blendOverlap_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineColorBlendAdvancedStateCreateInfoEXT( PipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineColorBlendAdvancedStateCreateInfoEXT( VkPipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineColorBlendAdvancedStateCreateInfoEXT & operator=( VkPipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT & operator=( PipelineColorBlendAdvancedStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineColorBlendAdvancedStateCreateInfoEXT ) ); + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT & setSrcPremultiplied( VULKAN_HPP_NAMESPACE::Bool32 srcPremultiplied_ ) VULKAN_HPP_NOEXCEPT + { + srcPremultiplied = srcPremultiplied_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT & setDstPremultiplied( VULKAN_HPP_NAMESPACE::Bool32 dstPremultiplied_ ) VULKAN_HPP_NOEXCEPT + { + dstPremultiplied = dstPremultiplied_; + return *this; + } + + PipelineColorBlendAdvancedStateCreateInfoEXT & setBlendOverlap( VULKAN_HPP_NAMESPACE::BlendOverlapEXT blendOverlap_ ) VULKAN_HPP_NOEXCEPT + { + blendOverlap = blendOverlap_; + return *this; + } + + + operator VkPipelineColorBlendAdvancedStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineColorBlendAdvancedStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineColorBlendAdvancedStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineColorBlendAdvancedStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( srcPremultiplied == rhs.srcPremultiplied ) + && ( dstPremultiplied == rhs.dstPremultiplied ) + && ( blendOverlap == rhs.blendOverlap ); + } + + bool operator!=( PipelineColorBlendAdvancedStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineColorBlendAdvancedStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 srcPremultiplied = {}; + VULKAN_HPP_NAMESPACE::Bool32 dstPremultiplied = {}; + VULKAN_HPP_NAMESPACE::BlendOverlapEXT blendOverlap = VULKAN_HPP_NAMESPACE::BlendOverlapEXT::eUncorrelated; + + }; + static_assert( sizeof( PipelineColorBlendAdvancedStateCreateInfoEXT ) == sizeof( VkPipelineColorBlendAdvancedStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineColorBlendAdvancedStateCreateInfoEXT; + }; + + struct PipelineCompilerControlCreateInfoAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCompilerControlCreateInfoAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCompilerControlCreateInfoAMD(VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagsAMD compilerControlFlags_ = {}) VULKAN_HPP_NOEXCEPT + : compilerControlFlags( compilerControlFlags_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCompilerControlCreateInfoAMD( PipelineCompilerControlCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCompilerControlCreateInfoAMD( VkPipelineCompilerControlCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCompilerControlCreateInfoAMD & operator=( VkPipelineCompilerControlCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCompilerControlCreateInfoAMD & operator=( PipelineCompilerControlCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCompilerControlCreateInfoAMD ) ); + return *this; + } + + PipelineCompilerControlCreateInfoAMD & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCompilerControlCreateInfoAMD & setCompilerControlFlags( VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagsAMD compilerControlFlags_ ) VULKAN_HPP_NOEXCEPT + { + compilerControlFlags = compilerControlFlags_; + return *this; + } + + + operator VkPipelineCompilerControlCreateInfoAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCompilerControlCreateInfoAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCompilerControlCreateInfoAMD const& ) const = default; +#else + bool operator==( PipelineCompilerControlCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( compilerControlFlags == rhs.compilerControlFlags ); + } + + bool operator!=( PipelineCompilerControlCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCompilerControlCreateInfoAMD; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCompilerControlFlagsAMD compilerControlFlags = {}; + + }; + static_assert( sizeof( PipelineCompilerControlCreateInfoAMD ) == sizeof( VkPipelineCompilerControlCreateInfoAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCompilerControlCreateInfoAMD; + }; + + struct PipelineCoverageModulationStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCoverageModulationStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCoverageModulationStateCreateInfoNV(VULKAN_HPP_NAMESPACE::PipelineCoverageModulationStateCreateFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::CoverageModulationModeNV coverageModulationMode_ = VULKAN_HPP_NAMESPACE::CoverageModulationModeNV::eNone, VULKAN_HPP_NAMESPACE::Bool32 coverageModulationTableEnable_ = {}, uint32_t coverageModulationTableCount_ = {}, const float* pCoverageModulationTable_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), coverageModulationMode( coverageModulationMode_ ), coverageModulationTableEnable( coverageModulationTableEnable_ ), coverageModulationTableCount( coverageModulationTableCount_ ), pCoverageModulationTable( pCoverageModulationTable_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCoverageModulationStateCreateInfoNV( PipelineCoverageModulationStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCoverageModulationStateCreateInfoNV( VkPipelineCoverageModulationStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineCoverageModulationStateCreateInfoNV( VULKAN_HPP_NAMESPACE::PipelineCoverageModulationStateCreateFlagsNV flags_, VULKAN_HPP_NAMESPACE::CoverageModulationModeNV coverageModulationMode_, VULKAN_HPP_NAMESPACE::Bool32 coverageModulationTableEnable_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & coverageModulationTable_ ) + : flags( flags_ ), coverageModulationMode( coverageModulationMode_ ), coverageModulationTableEnable( coverageModulationTableEnable_ ), coverageModulationTableCount( static_cast( coverageModulationTable_.size() ) ), pCoverageModulationTable( coverageModulationTable_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCoverageModulationStateCreateInfoNV & operator=( VkPipelineCoverageModulationStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & operator=( PipelineCoverageModulationStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCoverageModulationStateCreateInfoNV ) ); + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::PipelineCoverageModulationStateCreateFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setCoverageModulationMode( VULKAN_HPP_NAMESPACE::CoverageModulationModeNV coverageModulationMode_ ) VULKAN_HPP_NOEXCEPT + { + coverageModulationMode = coverageModulationMode_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setCoverageModulationTableEnable( VULKAN_HPP_NAMESPACE::Bool32 coverageModulationTableEnable_ ) VULKAN_HPP_NOEXCEPT + { + coverageModulationTableEnable = coverageModulationTableEnable_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setCoverageModulationTableCount( uint32_t coverageModulationTableCount_ ) VULKAN_HPP_NOEXCEPT + { + coverageModulationTableCount = coverageModulationTableCount_; + return *this; + } + + PipelineCoverageModulationStateCreateInfoNV & setPCoverageModulationTable( const float* pCoverageModulationTable_ ) VULKAN_HPP_NOEXCEPT + { + pCoverageModulationTable = pCoverageModulationTable_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineCoverageModulationStateCreateInfoNV & setCoverageModulationTable( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & coverageModulationTable_ ) VULKAN_HPP_NOEXCEPT + { + coverageModulationTableCount = static_cast( coverageModulationTable_.size() ); + pCoverageModulationTable = coverageModulationTable_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineCoverageModulationStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCoverageModulationStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCoverageModulationStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineCoverageModulationStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( coverageModulationMode == rhs.coverageModulationMode ) + && ( coverageModulationTableEnable == rhs.coverageModulationTableEnable ) + && ( coverageModulationTableCount == rhs.coverageModulationTableCount ) + && ( pCoverageModulationTable == rhs.pCoverageModulationTable ); + } + + bool operator!=( PipelineCoverageModulationStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCoverageModulationStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCoverageModulationStateCreateFlagsNV flags = {}; + VULKAN_HPP_NAMESPACE::CoverageModulationModeNV coverageModulationMode = VULKAN_HPP_NAMESPACE::CoverageModulationModeNV::eNone; + VULKAN_HPP_NAMESPACE::Bool32 coverageModulationTableEnable = {}; + uint32_t coverageModulationTableCount = {}; + const float* pCoverageModulationTable = {}; + + }; + static_assert( sizeof( PipelineCoverageModulationStateCreateInfoNV ) == sizeof( VkPipelineCoverageModulationStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCoverageModulationStateCreateInfoNV; + }; + + struct PipelineCoverageReductionStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCoverageReductionStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCoverageReductionStateCreateInfoNV(VULKAN_HPP_NAMESPACE::PipelineCoverageReductionStateCreateFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode_ = VULKAN_HPP_NAMESPACE::CoverageReductionModeNV::eMerge) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), coverageReductionMode( coverageReductionMode_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCoverageReductionStateCreateInfoNV( PipelineCoverageReductionStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCoverageReductionStateCreateInfoNV( VkPipelineCoverageReductionStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCoverageReductionStateCreateInfoNV & operator=( VkPipelineCoverageReductionStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCoverageReductionStateCreateInfoNV & operator=( PipelineCoverageReductionStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCoverageReductionStateCreateInfoNV ) ); + return *this; + } + + PipelineCoverageReductionStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCoverageReductionStateCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::PipelineCoverageReductionStateCreateFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineCoverageReductionStateCreateInfoNV & setCoverageReductionMode( VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode_ ) VULKAN_HPP_NOEXCEPT + { + coverageReductionMode = coverageReductionMode_; + return *this; + } + + + operator VkPipelineCoverageReductionStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCoverageReductionStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCoverageReductionStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineCoverageReductionStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( coverageReductionMode == rhs.coverageReductionMode ); + } + + bool operator!=( PipelineCoverageReductionStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCoverageReductionStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCoverageReductionStateCreateFlagsNV flags = {}; + VULKAN_HPP_NAMESPACE::CoverageReductionModeNV coverageReductionMode = VULKAN_HPP_NAMESPACE::CoverageReductionModeNV::eMerge; + + }; + static_assert( sizeof( PipelineCoverageReductionStateCreateInfoNV ) == sizeof( VkPipelineCoverageReductionStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCoverageReductionStateCreateInfoNV; + }; + + struct PipelineCoverageToColorStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCoverageToColorStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCoverageToColorStateCreateInfoNV(VULKAN_HPP_NAMESPACE::PipelineCoverageToColorStateCreateFlagsNV flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 coverageToColorEnable_ = {}, uint32_t coverageToColorLocation_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), coverageToColorEnable( coverageToColorEnable_ ), coverageToColorLocation( coverageToColorLocation_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCoverageToColorStateCreateInfoNV( PipelineCoverageToColorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCoverageToColorStateCreateInfoNV( VkPipelineCoverageToColorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCoverageToColorStateCreateInfoNV & operator=( VkPipelineCoverageToColorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV & operator=( PipelineCoverageToColorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCoverageToColorStateCreateInfoNV ) ); + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::PipelineCoverageToColorStateCreateFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV & setCoverageToColorEnable( VULKAN_HPP_NAMESPACE::Bool32 coverageToColorEnable_ ) VULKAN_HPP_NOEXCEPT + { + coverageToColorEnable = coverageToColorEnable_; + return *this; + } + + PipelineCoverageToColorStateCreateInfoNV & setCoverageToColorLocation( uint32_t coverageToColorLocation_ ) VULKAN_HPP_NOEXCEPT + { + coverageToColorLocation = coverageToColorLocation_; + return *this; + } + + + operator VkPipelineCoverageToColorStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCoverageToColorStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCoverageToColorStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineCoverageToColorStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( coverageToColorEnable == rhs.coverageToColorEnable ) + && ( coverageToColorLocation == rhs.coverageToColorLocation ); + } + + bool operator!=( PipelineCoverageToColorStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCoverageToColorStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCoverageToColorStateCreateFlagsNV flags = {}; + VULKAN_HPP_NAMESPACE::Bool32 coverageToColorEnable = {}; + uint32_t coverageToColorLocation = {}; + + }; + static_assert( sizeof( PipelineCoverageToColorStateCreateInfoNV ) == sizeof( VkPipelineCoverageToColorStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCoverageToColorStateCreateInfoNV; + }; + + struct PipelineCreationFeedbackEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackEXT(VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlagsEXT flags_ = {}, uint64_t duration_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), duration( duration_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackEXT( PipelineCreationFeedbackEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCreationFeedbackEXT( VkPipelineCreationFeedbackEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCreationFeedbackEXT & operator=( VkPipelineCreationFeedbackEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCreationFeedbackEXT & operator=( PipelineCreationFeedbackEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCreationFeedbackEXT ) ); + return *this; + } + + + operator VkPipelineCreationFeedbackEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCreationFeedbackEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCreationFeedbackEXT const& ) const = default; +#else + bool operator==( PipelineCreationFeedbackEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( flags == rhs.flags ) + && ( duration == rhs.duration ); + } + + bool operator!=( PipelineCreationFeedbackEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackFlagsEXT flags = {}; + uint64_t duration = {}; + + }; + static_assert( sizeof( PipelineCreationFeedbackEXT ) == sizeof( VkPipelineCreationFeedbackEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineCreationFeedbackCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineCreationFeedbackCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackCreateInfoEXT(VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineCreationFeedback_ = {}, uint32_t pipelineStageCreationFeedbackCount_ = {}, VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks_ = {}) VULKAN_HPP_NOEXCEPT + : pPipelineCreationFeedback( pPipelineCreationFeedback_ ), pipelineStageCreationFeedbackCount( pipelineStageCreationFeedbackCount_ ), pPipelineStageCreationFeedbacks( pPipelineStageCreationFeedbacks_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineCreationFeedbackCreateInfoEXT( PipelineCreationFeedbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineCreationFeedbackCreateInfoEXT( VkPipelineCreationFeedbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineCreationFeedbackCreateInfoEXT( VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineCreationFeedback_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineStageCreationFeedbacks_ ) + : pPipelineCreationFeedback( pPipelineCreationFeedback_ ), pipelineStageCreationFeedbackCount( static_cast( pipelineStageCreationFeedbacks_.size() ) ), pPipelineStageCreationFeedbacks( pipelineStageCreationFeedbacks_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineCreationFeedbackCreateInfoEXT & operator=( VkPipelineCreationFeedbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineCreationFeedbackCreateInfoEXT & operator=( PipelineCreationFeedbackCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineCreationFeedbackCreateInfoEXT ) ); + return *this; + } + + PipelineCreationFeedbackCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineCreationFeedbackCreateInfoEXT & setPPipelineCreationFeedback( VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineCreationFeedback_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineCreationFeedback = pPipelineCreationFeedback_; + return *this; + } + + PipelineCreationFeedbackCreateInfoEXT & setPipelineStageCreationFeedbackCount( uint32_t pipelineStageCreationFeedbackCount_ ) VULKAN_HPP_NOEXCEPT + { + pipelineStageCreationFeedbackCount = pipelineStageCreationFeedbackCount_; + return *this; + } + + PipelineCreationFeedbackCreateInfoEXT & setPPipelineStageCreationFeedbacks( VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks_ ) VULKAN_HPP_NOEXCEPT + { + pPipelineStageCreationFeedbacks = pPipelineStageCreationFeedbacks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineCreationFeedbackCreateInfoEXT & setPipelineStageCreationFeedbacks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & pipelineStageCreationFeedbacks_ ) VULKAN_HPP_NOEXCEPT + { + pipelineStageCreationFeedbackCount = static_cast( pipelineStageCreationFeedbacks_.size() ); + pPipelineStageCreationFeedbacks = pipelineStageCreationFeedbacks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineCreationFeedbackCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineCreationFeedbackCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineCreationFeedbackCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineCreationFeedbackCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( pPipelineCreationFeedback == rhs.pPipelineCreationFeedback ) + && ( pipelineStageCreationFeedbackCount == rhs.pipelineStageCreationFeedbackCount ) + && ( pPipelineStageCreationFeedbacks == rhs.pPipelineStageCreationFeedbacks ); + } + + bool operator!=( PipelineCreationFeedbackCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineCreationFeedbackCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineCreationFeedback = {}; + uint32_t pipelineStageCreationFeedbackCount = {}; + VULKAN_HPP_NAMESPACE::PipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks = {}; + + }; + static_assert( sizeof( PipelineCreationFeedbackCreateInfoEXT ) == sizeof( VkPipelineCreationFeedbackCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineCreationFeedbackCreateInfoEXT; + }; + + struct PipelineDiscardRectangleStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineDiscardRectangleStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineDiscardRectangleStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::PipelineDiscardRectangleStateCreateFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT discardRectangleMode_ = VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT::eInclusive, uint32_t discardRectangleCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), discardRectangleMode( discardRectangleMode_ ), discardRectangleCount( discardRectangleCount_ ), pDiscardRectangles( pDiscardRectangles_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineDiscardRectangleStateCreateInfoEXT( PipelineDiscardRectangleStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineDiscardRectangleStateCreateInfoEXT( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineDiscardRectangleStateCreateInfoEXT( VULKAN_HPP_NAMESPACE::PipelineDiscardRectangleStateCreateFlagsEXT flags_, VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT discardRectangleMode_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & discardRectangles_ ) + : flags( flags_ ), discardRectangleMode( discardRectangleMode_ ), discardRectangleCount( static_cast( discardRectangles_.size() ) ), pDiscardRectangles( discardRectangles_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineDiscardRectangleStateCreateInfoEXT & operator=( VkPipelineDiscardRectangleStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & operator=( PipelineDiscardRectangleStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) ); + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::PipelineDiscardRectangleStateCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & setDiscardRectangleMode( VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT discardRectangleMode_ ) VULKAN_HPP_NOEXCEPT + { + discardRectangleMode = discardRectangleMode_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & setDiscardRectangleCount( uint32_t discardRectangleCount_ ) VULKAN_HPP_NOEXCEPT + { + discardRectangleCount = discardRectangleCount_; + return *this; + } + + PipelineDiscardRectangleStateCreateInfoEXT & setPDiscardRectangles( const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles_ ) VULKAN_HPP_NOEXCEPT + { + pDiscardRectangles = pDiscardRectangles_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineDiscardRectangleStateCreateInfoEXT & setDiscardRectangles( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & discardRectangles_ ) VULKAN_HPP_NOEXCEPT + { + discardRectangleCount = static_cast( discardRectangles_.size() ); + pDiscardRectangles = discardRectangles_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineDiscardRectangleStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineDiscardRectangleStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineDiscardRectangleStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( discardRectangleMode == rhs.discardRectangleMode ) + && ( discardRectangleCount == rhs.discardRectangleCount ) + && ( pDiscardRectangles == rhs.pDiscardRectangles ); + } + + bool operator!=( PipelineDiscardRectangleStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineDiscardRectangleStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineDiscardRectangleStateCreateFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT discardRectangleMode = VULKAN_HPP_NAMESPACE::DiscardRectangleModeEXT::eInclusive; + uint32_t discardRectangleCount = {}; + const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles = {}; + + }; + static_assert( sizeof( PipelineDiscardRectangleStateCreateInfoEXT ) == sizeof( VkPipelineDiscardRectangleStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineDiscardRectangleStateCreateInfoEXT; + }; + + struct PipelineFragmentShadingRateEnumStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateEnumStateCreateInfoNV(VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType_ = VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV::eFragmentSize, VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate_ = VULKAN_HPP_NAMESPACE::FragmentShadingRateNV::e1InvocationPerPixel, std::array const& combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }) VULKAN_HPP_NOEXCEPT + : shadingRateType( shadingRateType_ ), shadingRate( shadingRate_ ), combinerOps( combinerOps_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateEnumStateCreateInfoNV( PipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineFragmentShadingRateEnumStateCreateInfoNV( VkPipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineFragmentShadingRateEnumStateCreateInfoNV & operator=( VkPipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & operator=( PipelineFragmentShadingRateEnumStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineFragmentShadingRateEnumStateCreateInfoNV ) ); + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setShadingRateType( VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateType = shadingRateType_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setShadingRate( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate_ ) VULKAN_HPP_NOEXCEPT + { + shadingRate = shadingRate_; + return *this; + } + + PipelineFragmentShadingRateEnumStateCreateInfoNV & setCombinerOps( std::array combinerOps_ ) VULKAN_HPP_NOEXCEPT + { + combinerOps = combinerOps_; + return *this; + } + + + operator VkPipelineFragmentShadingRateEnumStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineFragmentShadingRateEnumStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineFragmentShadingRateEnumStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineFragmentShadingRateEnumStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateType == rhs.shadingRateType ) + && ( shadingRate == rhs.shadingRate ) + && ( combinerOps == rhs.combinerOps ); + } + + bool operator!=( PipelineFragmentShadingRateEnumStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineFragmentShadingRateEnumStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV shadingRateType = VULKAN_HPP_NAMESPACE::FragmentShadingRateTypeNV::eFragmentSize; + VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate = VULKAN_HPP_NAMESPACE::FragmentShadingRateNV::e1InvocationPerPixel; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D combinerOps = {}; + + }; + static_assert( sizeof( PipelineFragmentShadingRateEnumStateCreateInfoNV ) == sizeof( VkPipelineFragmentShadingRateEnumStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineFragmentShadingRateEnumStateCreateInfoNV; + }; + + struct PipelineFragmentShadingRateStateCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateStateCreateInfoKHR(VULKAN_HPP_NAMESPACE::Extent2D fragmentSize_ = {}, std::array const& combinerOps_ = { { VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep, VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR::eKeep } }) VULKAN_HPP_NOEXCEPT + : fragmentSize( fragmentSize_ ), combinerOps( combinerOps_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 PipelineFragmentShadingRateStateCreateInfoKHR( PipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineFragmentShadingRateStateCreateInfoKHR( VkPipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineFragmentShadingRateStateCreateInfoKHR & operator=( VkPipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & operator=( PipelineFragmentShadingRateStateCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineFragmentShadingRateStateCreateInfoKHR ) ); + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setFragmentSize( VULKAN_HPP_NAMESPACE::Extent2D const & fragmentSize_ ) VULKAN_HPP_NOEXCEPT + { + fragmentSize = fragmentSize_; + return *this; + } + + PipelineFragmentShadingRateStateCreateInfoKHR & setCombinerOps( std::array combinerOps_ ) VULKAN_HPP_NOEXCEPT + { + combinerOps = combinerOps_; + return *this; + } + + + operator VkPipelineFragmentShadingRateStateCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineFragmentShadingRateStateCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineFragmentShadingRateStateCreateInfoKHR const& ) const = default; +#else + bool operator==( PipelineFragmentShadingRateStateCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentSize == rhs.fragmentSize ) + && ( combinerOps == rhs.combinerOps ); + } + + bool operator!=( PipelineFragmentShadingRateStateCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineFragmentShadingRateStateCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Extent2D fragmentSize = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D combinerOps = {}; + + }; + static_assert( sizeof( PipelineFragmentShadingRateStateCreateInfoKHR ) == sizeof( VkPipelineFragmentShadingRateStateCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineFragmentShadingRateStateCreateInfoKHR; + }; + + struct PipelineRasterizationConservativeStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationConservativeStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationConservativeStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::PipelineRasterizationConservativeStateCreateFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT conservativeRasterizationMode_ = VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT::eDisabled, float extraPrimitiveOverestimationSize_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), conservativeRasterizationMode( conservativeRasterizationMode_ ), extraPrimitiveOverestimationSize( extraPrimitiveOverestimationSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationConservativeStateCreateInfoEXT( PipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationConservativeStateCreateInfoEXT( VkPipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationConservativeStateCreateInfoEXT & operator=( VkPipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT & operator=( PipelineRasterizationConservativeStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationConservativeStateCreateInfoEXT ) ); + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::PipelineRasterizationConservativeStateCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT & setConservativeRasterizationMode( VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT conservativeRasterizationMode_ ) VULKAN_HPP_NOEXCEPT + { + conservativeRasterizationMode = conservativeRasterizationMode_; + return *this; + } + + PipelineRasterizationConservativeStateCreateInfoEXT & setExtraPrimitiveOverestimationSize( float extraPrimitiveOverestimationSize_ ) VULKAN_HPP_NOEXCEPT + { + extraPrimitiveOverestimationSize = extraPrimitiveOverestimationSize_; + return *this; + } + + + operator VkPipelineRasterizationConservativeStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationConservativeStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationConservativeStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineRasterizationConservativeStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( conservativeRasterizationMode == rhs.conservativeRasterizationMode ) + && ( extraPrimitiveOverestimationSize == rhs.extraPrimitiveOverestimationSize ); + } + + bool operator!=( PipelineRasterizationConservativeStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationConservativeStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRasterizationConservativeStateCreateFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT conservativeRasterizationMode = VULKAN_HPP_NAMESPACE::ConservativeRasterizationModeEXT::eDisabled; + float extraPrimitiveOverestimationSize = {}; + + }; + static_assert( sizeof( PipelineRasterizationConservativeStateCreateInfoEXT ) == sizeof( VkPipelineRasterizationConservativeStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationConservativeStateCreateInfoEXT; + }; + + struct PipelineRasterizationDepthClipStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationDepthClipStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationDepthClipStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::PipelineRasterizationDepthClipStateCreateFlagsEXT flags_ = {}, VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), depthClipEnable( depthClipEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationDepthClipStateCreateInfoEXT( PipelineRasterizationDepthClipStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationDepthClipStateCreateInfoEXT( VkPipelineRasterizationDepthClipStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationDepthClipStateCreateInfoEXT & operator=( VkPipelineRasterizationDepthClipStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationDepthClipStateCreateInfoEXT & operator=( PipelineRasterizationDepthClipStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationDepthClipStateCreateInfoEXT ) ); + return *this; + } + + PipelineRasterizationDepthClipStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationDepthClipStateCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::PipelineRasterizationDepthClipStateCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineRasterizationDepthClipStateCreateInfoEXT & setDepthClipEnable( VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable_ ) VULKAN_HPP_NOEXCEPT + { + depthClipEnable = depthClipEnable_; + return *this; + } + + + operator VkPipelineRasterizationDepthClipStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationDepthClipStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationDepthClipStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineRasterizationDepthClipStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( depthClipEnable == rhs.depthClipEnable ); + } + + bool operator!=( PipelineRasterizationDepthClipStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationDepthClipStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRasterizationDepthClipStateCreateFlagsEXT flags = {}; + VULKAN_HPP_NAMESPACE::Bool32 depthClipEnable = {}; + + }; + static_assert( sizeof( PipelineRasterizationDepthClipStateCreateInfoEXT ) == sizeof( VkPipelineRasterizationDepthClipStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationDepthClipStateCreateInfoEXT; + }; + + struct PipelineRasterizationLineStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationLineStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT lineRasterizationMode_ = VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT::eDefault, VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable_ = {}, uint32_t lineStippleFactor_ = {}, uint16_t lineStipplePattern_ = {}) VULKAN_HPP_NOEXCEPT + : lineRasterizationMode( lineRasterizationMode_ ), stippledLineEnable( stippledLineEnable_ ), lineStippleFactor( lineStippleFactor_ ), lineStipplePattern( lineStipplePattern_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationLineStateCreateInfoEXT( PipelineRasterizationLineStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationLineStateCreateInfoEXT( VkPipelineRasterizationLineStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationLineStateCreateInfoEXT & operator=( VkPipelineRasterizationLineStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & operator=( PipelineRasterizationLineStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationLineStateCreateInfoEXT ) ); + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & setLineRasterizationMode( VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT lineRasterizationMode_ ) VULKAN_HPP_NOEXCEPT + { + lineRasterizationMode = lineRasterizationMode_; + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & setStippledLineEnable( VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable_ ) VULKAN_HPP_NOEXCEPT + { + stippledLineEnable = stippledLineEnable_; + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & setLineStippleFactor( uint32_t lineStippleFactor_ ) VULKAN_HPP_NOEXCEPT + { + lineStippleFactor = lineStippleFactor_; + return *this; + } + + PipelineRasterizationLineStateCreateInfoEXT & setLineStipplePattern( uint16_t lineStipplePattern_ ) VULKAN_HPP_NOEXCEPT + { + lineStipplePattern = lineStipplePattern_; + return *this; + } + + + operator VkPipelineRasterizationLineStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationLineStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationLineStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineRasterizationLineStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( lineRasterizationMode == rhs.lineRasterizationMode ) + && ( stippledLineEnable == rhs.stippledLineEnable ) + && ( lineStippleFactor == rhs.lineStippleFactor ) + && ( lineStipplePattern == rhs.lineStipplePattern ); + } + + bool operator!=( PipelineRasterizationLineStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationLineStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT lineRasterizationMode = VULKAN_HPP_NAMESPACE::LineRasterizationModeEXT::eDefault; + VULKAN_HPP_NAMESPACE::Bool32 stippledLineEnable = {}; + uint32_t lineStippleFactor = {}; + uint16_t lineStipplePattern = {}; + + }; + static_assert( sizeof( PipelineRasterizationLineStateCreateInfoEXT ) == sizeof( VkPipelineRasterizationLineStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationLineStateCreateInfoEXT; + }; + + struct PipelineRasterizationStateRasterizationOrderAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationStateRasterizationOrderAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateRasterizationOrderAMD(VULKAN_HPP_NAMESPACE::RasterizationOrderAMD rasterizationOrder_ = VULKAN_HPP_NAMESPACE::RasterizationOrderAMD::eStrict) VULKAN_HPP_NOEXCEPT + : rasterizationOrder( rasterizationOrder_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateRasterizationOrderAMD( PipelineRasterizationStateRasterizationOrderAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationStateRasterizationOrderAMD( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationStateRasterizationOrderAMD & operator=( VkPipelineRasterizationStateRasterizationOrderAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationStateRasterizationOrderAMD & operator=( PipelineRasterizationStateRasterizationOrderAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationStateRasterizationOrderAMD ) ); + return *this; + } + + PipelineRasterizationStateRasterizationOrderAMD & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationStateRasterizationOrderAMD & setRasterizationOrder( VULKAN_HPP_NAMESPACE::RasterizationOrderAMD rasterizationOrder_ ) VULKAN_HPP_NOEXCEPT + { + rasterizationOrder = rasterizationOrder_; + return *this; + } + + + operator VkPipelineRasterizationStateRasterizationOrderAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationStateRasterizationOrderAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationStateRasterizationOrderAMD const& ) const = default; +#else + bool operator==( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( rasterizationOrder == rhs.rasterizationOrder ); + } + + bool operator!=( PipelineRasterizationStateRasterizationOrderAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationStateRasterizationOrderAMD; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::RasterizationOrderAMD rasterizationOrder = VULKAN_HPP_NAMESPACE::RasterizationOrderAMD::eStrict; + + }; + static_assert( sizeof( PipelineRasterizationStateRasterizationOrderAMD ) == sizeof( VkPipelineRasterizationStateRasterizationOrderAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationStateRasterizationOrderAMD; + }; + + struct PipelineRasterizationStateStreamCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRasterizationStateStreamCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateStreamCreateInfoEXT(VULKAN_HPP_NAMESPACE::PipelineRasterizationStateStreamCreateFlagsEXT flags_ = {}, uint32_t rasterizationStream_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), rasterizationStream( rasterizationStream_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRasterizationStateStreamCreateInfoEXT( PipelineRasterizationStateStreamCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRasterizationStateStreamCreateInfoEXT( VkPipelineRasterizationStateStreamCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRasterizationStateStreamCreateInfoEXT & operator=( VkPipelineRasterizationStateStreamCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRasterizationStateStreamCreateInfoEXT & operator=( PipelineRasterizationStateStreamCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRasterizationStateStreamCreateInfoEXT ) ); + return *this; + } + + PipelineRasterizationStateStreamCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRasterizationStateStreamCreateInfoEXT & setFlags( VULKAN_HPP_NAMESPACE::PipelineRasterizationStateStreamCreateFlagsEXT flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineRasterizationStateStreamCreateInfoEXT & setRasterizationStream( uint32_t rasterizationStream_ ) VULKAN_HPP_NOEXCEPT + { + rasterizationStream = rasterizationStream_; + return *this; + } + + + operator VkPipelineRasterizationStateStreamCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRasterizationStateStreamCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRasterizationStateStreamCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineRasterizationStateStreamCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( rasterizationStream == rhs.rasterizationStream ); + } + + bool operator!=( PipelineRasterizationStateStreamCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRasterizationStateStreamCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineRasterizationStateStreamCreateFlagsEXT flags = {}; + uint32_t rasterizationStream = {}; + + }; + static_assert( sizeof( PipelineRasterizationStateStreamCreateInfoEXT ) == sizeof( VkPipelineRasterizationStateStreamCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRasterizationStateStreamCreateInfoEXT; + }; + + struct PipelineRepresentativeFragmentTestStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineRepresentativeFragmentTestStateCreateInfoNV(VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTestEnable_ = {}) VULKAN_HPP_NOEXCEPT + : representativeFragmentTestEnable( representativeFragmentTestEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineRepresentativeFragmentTestStateCreateInfoNV( PipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineRepresentativeFragmentTestStateCreateInfoNV( VkPipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineRepresentativeFragmentTestStateCreateInfoNV & operator=( VkPipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV & operator=( PipelineRepresentativeFragmentTestStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineRepresentativeFragmentTestStateCreateInfoNV ) ); + return *this; + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineRepresentativeFragmentTestStateCreateInfoNV & setRepresentativeFragmentTestEnable( VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTestEnable_ ) VULKAN_HPP_NOEXCEPT + { + representativeFragmentTestEnable = representativeFragmentTestEnable_; + return *this; + } + + + operator VkPipelineRepresentativeFragmentTestStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineRepresentativeFragmentTestStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineRepresentativeFragmentTestStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineRepresentativeFragmentTestStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( representativeFragmentTestEnable == rhs.representativeFragmentTestEnable ); + } + + bool operator!=( PipelineRepresentativeFragmentTestStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineRepresentativeFragmentTestStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 representativeFragmentTestEnable = {}; + + }; + static_assert( sizeof( PipelineRepresentativeFragmentTestStateCreateInfoNV ) == sizeof( VkPipelineRepresentativeFragmentTestStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineRepresentativeFragmentTestStateCreateInfoNV; + }; + + struct PipelineSampleLocationsStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineSampleLocationsStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineSampleLocationsStateCreateInfoEXT(VULKAN_HPP_NAMESPACE::Bool32 sampleLocationsEnable_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {}) VULKAN_HPP_NOEXCEPT + : sampleLocationsEnable( sampleLocationsEnable_ ), sampleLocationsInfo( sampleLocationsInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineSampleLocationsStateCreateInfoEXT( PipelineSampleLocationsStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineSampleLocationsStateCreateInfoEXT( VkPipelineSampleLocationsStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineSampleLocationsStateCreateInfoEXT & operator=( VkPipelineSampleLocationsStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT & operator=( PipelineSampleLocationsStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineSampleLocationsStateCreateInfoEXT ) ); + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT & setSampleLocationsEnable( VULKAN_HPP_NAMESPACE::Bool32 sampleLocationsEnable_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsEnable = sampleLocationsEnable_; + return *this; + } + + PipelineSampleLocationsStateCreateInfoEXT & setSampleLocationsInfo( VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT const & sampleLocationsInfo_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + + operator VkPipelineSampleLocationsStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineSampleLocationsStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineSampleLocationsStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineSampleLocationsStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleLocationsEnable == rhs.sampleLocationsEnable ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( PipelineSampleLocationsStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineSampleLocationsStateCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 sampleLocationsEnable = {}; + VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo = {}; + + }; + static_assert( sizeof( PipelineSampleLocationsStateCreateInfoEXT ) == sizeof( VkPipelineSampleLocationsStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineSampleLocationsStateCreateInfoEXT; + }; + + struct PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(uint32_t requiredSubgroupSize_ = {}) VULKAN_HPP_NOEXCEPT + : requiredSubgroupSize( requiredSubgroupSize_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT( VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT & operator=( VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT & operator=( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT ) ); + return *this; + } + + + operator VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( requiredSubgroupSize == rhs.requiredSubgroupSize ); + } + + bool operator!=( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + void* pNext = {}; + uint32_t requiredSubgroupSize = {}; + + }; + static_assert( sizeof( PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT ) == sizeof( VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + }; + + struct PipelineTessellationDomainOriginStateCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineTessellationDomainOriginStateCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineTessellationDomainOriginStateCreateInfo(VULKAN_HPP_NAMESPACE::TessellationDomainOrigin domainOrigin_ = VULKAN_HPP_NAMESPACE::TessellationDomainOrigin::eUpperLeft) VULKAN_HPP_NOEXCEPT + : domainOrigin( domainOrigin_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineTessellationDomainOriginStateCreateInfo( PipelineTessellationDomainOriginStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineTessellationDomainOriginStateCreateInfo( VkPipelineTessellationDomainOriginStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineTessellationDomainOriginStateCreateInfo & operator=( VkPipelineTessellationDomainOriginStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineTessellationDomainOriginStateCreateInfo & operator=( PipelineTessellationDomainOriginStateCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineTessellationDomainOriginStateCreateInfo ) ); + return *this; + } + + PipelineTessellationDomainOriginStateCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineTessellationDomainOriginStateCreateInfo & setDomainOrigin( VULKAN_HPP_NAMESPACE::TessellationDomainOrigin domainOrigin_ ) VULKAN_HPP_NOEXCEPT + { + domainOrigin = domainOrigin_; + return *this; + } + + + operator VkPipelineTessellationDomainOriginStateCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineTessellationDomainOriginStateCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineTessellationDomainOriginStateCreateInfo const& ) const = default; +#else + bool operator==( PipelineTessellationDomainOriginStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( domainOrigin == rhs.domainOrigin ); + } + + bool operator!=( PipelineTessellationDomainOriginStateCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineTessellationDomainOriginStateCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::TessellationDomainOrigin domainOrigin = VULKAN_HPP_NAMESPACE::TessellationDomainOrigin::eUpperLeft; + + }; + static_assert( sizeof( PipelineTessellationDomainOriginStateCreateInfo ) == sizeof( VkPipelineTessellationDomainOriginStateCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineTessellationDomainOriginStateCreateInfo; + }; + using PipelineTessellationDomainOriginStateCreateInfoKHR = PipelineTessellationDomainOriginStateCreateInfo; + + struct VertexInputBindingDivisorDescriptionEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescriptionEXT(uint32_t binding_ = {}, uint32_t divisor_ = {}) VULKAN_HPP_NOEXCEPT + : binding( binding_ ), divisor( divisor_ ) + {} + + VULKAN_HPP_CONSTEXPR VertexInputBindingDivisorDescriptionEXT( VertexInputBindingDivisorDescriptionEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + VertexInputBindingDivisorDescriptionEXT( VkVertexInputBindingDivisorDescriptionEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + VertexInputBindingDivisorDescriptionEXT & operator=( VkVertexInputBindingDivisorDescriptionEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + VertexInputBindingDivisorDescriptionEXT & operator=( VertexInputBindingDivisorDescriptionEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( VertexInputBindingDivisorDescriptionEXT ) ); + return *this; + } + + VertexInputBindingDivisorDescriptionEXT & setBinding( uint32_t binding_ ) VULKAN_HPP_NOEXCEPT + { + binding = binding_; + return *this; + } + + VertexInputBindingDivisorDescriptionEXT & setDivisor( uint32_t divisor_ ) VULKAN_HPP_NOEXCEPT + { + divisor = divisor_; + return *this; + } + + + operator VkVertexInputBindingDivisorDescriptionEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkVertexInputBindingDivisorDescriptionEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( VertexInputBindingDivisorDescriptionEXT const& ) const = default; +#else + bool operator==( VertexInputBindingDivisorDescriptionEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( binding == rhs.binding ) + && ( divisor == rhs.divisor ); + } + + bool operator!=( VertexInputBindingDivisorDescriptionEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t binding = {}; + uint32_t divisor = {}; + + }; + static_assert( sizeof( VertexInputBindingDivisorDescriptionEXT ) == sizeof( VkVertexInputBindingDivisorDescriptionEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineVertexInputDivisorStateCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineVertexInputDivisorStateCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineVertexInputDivisorStateCreateInfoEXT(uint32_t vertexBindingDivisorCount_ = {}, const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors_ = {}) VULKAN_HPP_NOEXCEPT + : vertexBindingDivisorCount( vertexBindingDivisorCount_ ), pVertexBindingDivisors( pVertexBindingDivisors_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineVertexInputDivisorStateCreateInfoEXT( PipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineVertexInputDivisorStateCreateInfoEXT( VkPipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineVertexInputDivisorStateCreateInfoEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_ ) + : vertexBindingDivisorCount( static_cast( vertexBindingDivisors_.size() ) ), pVertexBindingDivisors( vertexBindingDivisors_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineVertexInputDivisorStateCreateInfoEXT & operator=( VkPipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT & operator=( PipelineVertexInputDivisorStateCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineVertexInputDivisorStateCreateInfoEXT ) ); + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT & setVertexBindingDivisorCount( uint32_t vertexBindingDivisorCount_ ) VULKAN_HPP_NOEXCEPT + { + vertexBindingDivisorCount = vertexBindingDivisorCount_; + return *this; + } + + PipelineVertexInputDivisorStateCreateInfoEXT & setPVertexBindingDivisors( const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors_ ) VULKAN_HPP_NOEXCEPT + { + pVertexBindingDivisors = pVertexBindingDivisors_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineVertexInputDivisorStateCreateInfoEXT & setVertexBindingDivisors( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & vertexBindingDivisors_ ) VULKAN_HPP_NOEXCEPT + { + vertexBindingDivisorCount = static_cast( vertexBindingDivisors_.size() ); + pVertexBindingDivisors = vertexBindingDivisors_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineVertexInputDivisorStateCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineVertexInputDivisorStateCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineVertexInputDivisorStateCreateInfoEXT const& ) const = default; +#else + bool operator==( PipelineVertexInputDivisorStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( vertexBindingDivisorCount == rhs.vertexBindingDivisorCount ) + && ( pVertexBindingDivisors == rhs.pVertexBindingDivisors ); + } + + bool operator!=( PipelineVertexInputDivisorStateCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineVertexInputDivisorStateCreateInfoEXT; + const void* pNext = {}; + uint32_t vertexBindingDivisorCount = {}; + const VULKAN_HPP_NAMESPACE::VertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors = {}; + + }; + static_assert( sizeof( PipelineVertexInputDivisorStateCreateInfoEXT ) == sizeof( VkPipelineVertexInputDivisorStateCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineVertexInputDivisorStateCreateInfoEXT; + }; + + struct PipelineViewportCoarseSampleOrderStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportCoarseSampleOrderStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportCoarseSampleOrderStateCreateInfoNV(VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType_ = VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV::eDefault, uint32_t customSampleOrderCount_ = {}, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders_ = {}) VULKAN_HPP_NOEXCEPT + : sampleOrderType( sampleOrderType_ ), customSampleOrderCount( customSampleOrderCount_ ), pCustomSampleOrders( pCustomSampleOrders_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportCoarseSampleOrderStateCreateInfoNV( PipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportCoarseSampleOrderStateCreateInfoNV( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportCoarseSampleOrderStateCreateInfoNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & customSampleOrders_ ) + : sampleOrderType( sampleOrderType_ ), customSampleOrderCount( static_cast( customSampleOrders_.size() ) ), pCustomSampleOrders( customSampleOrders_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & operator=( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & operator=( PipelineViewportCoarseSampleOrderStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportCoarseSampleOrderStateCreateInfoNV ) ); + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & setSampleOrderType( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType_ ) VULKAN_HPP_NOEXCEPT + { + sampleOrderType = sampleOrderType_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & setCustomSampleOrderCount( uint32_t customSampleOrderCount_ ) VULKAN_HPP_NOEXCEPT + { + customSampleOrderCount = customSampleOrderCount_; + return *this; + } + + PipelineViewportCoarseSampleOrderStateCreateInfoNV & setPCustomSampleOrders( const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders_ ) VULKAN_HPP_NOEXCEPT + { + pCustomSampleOrders = pCustomSampleOrders_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportCoarseSampleOrderStateCreateInfoNV & setCustomSampleOrders( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & customSampleOrders_ ) VULKAN_HPP_NOEXCEPT + { + customSampleOrderCount = static_cast( customSampleOrders_.size() ); + pCustomSampleOrders = customSampleOrders_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportCoarseSampleOrderStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportCoarseSampleOrderStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportCoarseSampleOrderStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineViewportCoarseSampleOrderStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sampleOrderType == rhs.sampleOrderType ) + && ( customSampleOrderCount == rhs.customSampleOrderCount ) + && ( pCustomSampleOrders == rhs.pCustomSampleOrders ); + } + + bool operator!=( PipelineViewportCoarseSampleOrderStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportCoarseSampleOrderStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType = VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV::eDefault; + uint32_t customSampleOrderCount = {}; + const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders = {}; + + }; + static_assert( sizeof( PipelineViewportCoarseSampleOrderStateCreateInfoNV ) == sizeof( VkPipelineViewportCoarseSampleOrderStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportCoarseSampleOrderStateCreateInfoNV; + }; + + struct PipelineViewportExclusiveScissorStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportExclusiveScissorStateCreateInfoNV(uint32_t exclusiveScissorCount_ = {}, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors_ = {}) VULKAN_HPP_NOEXCEPT + : exclusiveScissorCount( exclusiveScissorCount_ ), pExclusiveScissors( pExclusiveScissors_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportExclusiveScissorStateCreateInfoNV( PipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportExclusiveScissorStateCreateInfoNV( VkPipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportExclusiveScissorStateCreateInfoNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & exclusiveScissors_ ) + : exclusiveScissorCount( static_cast( exclusiveScissors_.size() ) ), pExclusiveScissors( exclusiveScissors_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportExclusiveScissorStateCreateInfoNV & operator=( VkPipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV & operator=( PipelineViewportExclusiveScissorStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportExclusiveScissorStateCreateInfoNV ) ); + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV & setExclusiveScissorCount( uint32_t exclusiveScissorCount_ ) VULKAN_HPP_NOEXCEPT + { + exclusiveScissorCount = exclusiveScissorCount_; + return *this; + } + + PipelineViewportExclusiveScissorStateCreateInfoNV & setPExclusiveScissors( const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors_ ) VULKAN_HPP_NOEXCEPT + { + pExclusiveScissors = pExclusiveScissors_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportExclusiveScissorStateCreateInfoNV & setExclusiveScissors( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & exclusiveScissors_ ) VULKAN_HPP_NOEXCEPT + { + exclusiveScissorCount = static_cast( exclusiveScissors_.size() ); + pExclusiveScissors = exclusiveScissors_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportExclusiveScissorStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportExclusiveScissorStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportExclusiveScissorStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineViewportExclusiveScissorStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( exclusiveScissorCount == rhs.exclusiveScissorCount ) + && ( pExclusiveScissors == rhs.pExclusiveScissors ); + } + + bool operator!=( PipelineViewportExclusiveScissorStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportExclusiveScissorStateCreateInfoNV; + const void* pNext = {}; + uint32_t exclusiveScissorCount = {}; + const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors = {}; + + }; + static_assert( sizeof( PipelineViewportExclusiveScissorStateCreateInfoNV ) == sizeof( VkPipelineViewportExclusiveScissorStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportExclusiveScissorStateCreateInfoNV; + }; + + struct PipelineViewportShadingRateImageStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportShadingRateImageStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportShadingRateImageStateCreateInfoNV(VULKAN_HPP_NAMESPACE::Bool32 shadingRateImageEnable_ = {}, uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes_ = {}) VULKAN_HPP_NOEXCEPT + : shadingRateImageEnable( shadingRateImageEnable_ ), viewportCount( viewportCount_ ), pShadingRatePalettes( pShadingRatePalettes_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportShadingRateImageStateCreateInfoNV( PipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportShadingRateImageStateCreateInfoNV( VkPipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportShadingRateImageStateCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 shadingRateImageEnable_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & shadingRatePalettes_ ) + : shadingRateImageEnable( shadingRateImageEnable_ ), viewportCount( static_cast( shadingRatePalettes_.size() ) ), pShadingRatePalettes( shadingRatePalettes_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportShadingRateImageStateCreateInfoNV & operator=( VkPipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV & operator=( PipelineViewportShadingRateImageStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportShadingRateImageStateCreateInfoNV ) ); + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV & setShadingRateImageEnable( VULKAN_HPP_NAMESPACE::Bool32 shadingRateImageEnable_ ) VULKAN_HPP_NOEXCEPT + { + shadingRateImageEnable = shadingRateImageEnable_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV & setViewportCount( uint32_t viewportCount_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportShadingRateImageStateCreateInfoNV & setPShadingRatePalettes( const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes_ ) VULKAN_HPP_NOEXCEPT + { + pShadingRatePalettes = pShadingRatePalettes_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportShadingRateImageStateCreateInfoNV & setShadingRatePalettes( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & shadingRatePalettes_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = static_cast( shadingRatePalettes_.size() ); + pShadingRatePalettes = shadingRatePalettes_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportShadingRateImageStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportShadingRateImageStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportShadingRateImageStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineViewportShadingRateImageStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( shadingRateImageEnable == rhs.shadingRateImageEnable ) + && ( viewportCount == rhs.viewportCount ) + && ( pShadingRatePalettes == rhs.pShadingRatePalettes ); + } + + bool operator!=( PipelineViewportShadingRateImageStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportShadingRateImageStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 shadingRateImageEnable = {}; + uint32_t viewportCount = {}; + const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes = {}; + + }; + static_assert( sizeof( PipelineViewportShadingRateImageStateCreateInfoNV ) == sizeof( VkPipelineViewportShadingRateImageStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportShadingRateImageStateCreateInfoNV; + }; + + struct ViewportSwizzleNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ViewportSwizzleNV(VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV x_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX, VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV y_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX, VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV z_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX, VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV w_ = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX) VULKAN_HPP_NOEXCEPT + : x( x_ ), y( y_ ), z( z_ ), w( w_ ) + {} + + VULKAN_HPP_CONSTEXPR ViewportSwizzleNV( ViewportSwizzleNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ViewportSwizzleNV( VkViewportSwizzleNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ViewportSwizzleNV & operator=( VkViewportSwizzleNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ViewportSwizzleNV & operator=( ViewportSwizzleNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ViewportSwizzleNV ) ); + return *this; + } + + ViewportSwizzleNV & setX( VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV x_ ) VULKAN_HPP_NOEXCEPT + { + x = x_; + return *this; + } + + ViewportSwizzleNV & setY( VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV y_ ) VULKAN_HPP_NOEXCEPT + { + y = y_; + return *this; + } + + ViewportSwizzleNV & setZ( VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV z_ ) VULKAN_HPP_NOEXCEPT + { + z = z_; + return *this; + } + + ViewportSwizzleNV & setW( VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV w_ ) VULKAN_HPP_NOEXCEPT + { + w = w_; + return *this; + } + + + operator VkViewportSwizzleNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkViewportSwizzleNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ViewportSwizzleNV const& ) const = default; +#else + bool operator==( ViewportSwizzleNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( x == rhs.x ) + && ( y == rhs.y ) + && ( z == rhs.z ) + && ( w == rhs.w ); + } + + bool operator!=( ViewportSwizzleNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV x = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX; + VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV y = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX; + VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV z = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX; + VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV w = VULKAN_HPP_NAMESPACE::ViewportCoordinateSwizzleNV::ePositiveX; + + }; + static_assert( sizeof( ViewportSwizzleNV ) == sizeof( VkViewportSwizzleNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PipelineViewportSwizzleStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportSwizzleStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportSwizzleStateCreateInfoNV(VULKAN_HPP_NAMESPACE::PipelineViewportSwizzleStateCreateFlagsNV flags_ = {}, uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ViewportSwizzleNV* pViewportSwizzles_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), viewportCount( viewportCount_ ), pViewportSwizzles( pViewportSwizzles_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportSwizzleStateCreateInfoNV( PipelineViewportSwizzleStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportSwizzleStateCreateInfoNV( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportSwizzleStateCreateInfoNV( VULKAN_HPP_NAMESPACE::PipelineViewportSwizzleStateCreateFlagsNV flags_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewportSwizzles_ ) + : flags( flags_ ), viewportCount( static_cast( viewportSwizzles_.size() ) ), pViewportSwizzles( viewportSwizzles_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportSwizzleStateCreateInfoNV & operator=( VkPipelineViewportSwizzleStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV & operator=( PipelineViewportSwizzleStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportSwizzleStateCreateInfoNV ) ); + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV & setFlags( VULKAN_HPP_NAMESPACE::PipelineViewportSwizzleStateCreateFlagsNV flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV & setViewportCount( uint32_t viewportCount_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportSwizzleStateCreateInfoNV & setPViewportSwizzles( const VULKAN_HPP_NAMESPACE::ViewportSwizzleNV* pViewportSwizzles_ ) VULKAN_HPP_NOEXCEPT + { + pViewportSwizzles = pViewportSwizzles_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportSwizzleStateCreateInfoNV & setViewportSwizzles( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewportSwizzles_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = static_cast( viewportSwizzles_.size() ); + pViewportSwizzles = viewportSwizzles_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportSwizzleStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportSwizzleStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportSwizzleStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewportSwizzles == rhs.pViewportSwizzles ); + } + + bool operator!=( PipelineViewportSwizzleStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportSwizzleStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineViewportSwizzleStateCreateFlagsNV flags = {}; + uint32_t viewportCount = {}; + const VULKAN_HPP_NAMESPACE::ViewportSwizzleNV* pViewportSwizzles = {}; + + }; + static_assert( sizeof( PipelineViewportSwizzleStateCreateInfoNV ) == sizeof( VkPipelineViewportSwizzleStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportSwizzleStateCreateInfoNV; + }; + + struct PipelineViewportWScalingStateCreateInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePipelineViewportWScalingStateCreateInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PipelineViewportWScalingStateCreateInfoNV(VULKAN_HPP_NAMESPACE::Bool32 viewportWScalingEnable_ = {}, uint32_t viewportCount_ = {}, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings_ = {}) VULKAN_HPP_NOEXCEPT + : viewportWScalingEnable( viewportWScalingEnable_ ), viewportCount( viewportCount_ ), pViewportWScalings( pViewportWScalings_ ) + {} + + VULKAN_HPP_CONSTEXPR PipelineViewportWScalingStateCreateInfoNV( PipelineViewportWScalingStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PipelineViewportWScalingStateCreateInfoNV( VkPipelineViewportWScalingStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportWScalingStateCreateInfoNV( VULKAN_HPP_NAMESPACE::Bool32 viewportWScalingEnable_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewportWScalings_ ) + : viewportWScalingEnable( viewportWScalingEnable_ ), viewportCount( static_cast( viewportWScalings_.size() ) ), pViewportWScalings( viewportWScalings_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PipelineViewportWScalingStateCreateInfoNV & operator=( VkPipelineViewportWScalingStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV & operator=( PipelineViewportWScalingStateCreateInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PipelineViewportWScalingStateCreateInfoNV ) ); + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV & setViewportWScalingEnable( VULKAN_HPP_NAMESPACE::Bool32 viewportWScalingEnable_ ) VULKAN_HPP_NOEXCEPT + { + viewportWScalingEnable = viewportWScalingEnable_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV & setViewportCount( uint32_t viewportCount_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = viewportCount_; + return *this; + } + + PipelineViewportWScalingStateCreateInfoNV & setPViewportWScalings( const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings_ ) VULKAN_HPP_NOEXCEPT + { + pViewportWScalings = pViewportWScalings_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PipelineViewportWScalingStateCreateInfoNV & setViewportWScalings( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewportWScalings_ ) VULKAN_HPP_NOEXCEPT + { + viewportCount = static_cast( viewportWScalings_.size() ); + pViewportWScalings = viewportWScalings_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPipelineViewportWScalingStateCreateInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPipelineViewportWScalingStateCreateInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PipelineViewportWScalingStateCreateInfoNV const& ) const = default; +#else + bool operator==( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( viewportWScalingEnable == rhs.viewportWScalingEnable ) + && ( viewportCount == rhs.viewportCount ) + && ( pViewportWScalings == rhs.pViewportWScalings ); + } + + bool operator!=( PipelineViewportWScalingStateCreateInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePipelineViewportWScalingStateCreateInfoNV; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 viewportWScalingEnable = {}; + uint32_t viewportCount = {}; + const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings = {}; + + }; + static_assert( sizeof( PipelineViewportWScalingStateCreateInfoNV ) == sizeof( VkPipelineViewportWScalingStateCreateInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PipelineViewportWScalingStateCreateInfoNV; + }; + +#ifdef VK_USE_PLATFORM_GGP + struct PresentFrameTokenGGP + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePresentFrameTokenGGP; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentFrameTokenGGP(GgpFrameToken frameToken_ = {}) VULKAN_HPP_NOEXCEPT + : frameToken( frameToken_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentFrameTokenGGP( PresentFrameTokenGGP const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentFrameTokenGGP( VkPresentFrameTokenGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentFrameTokenGGP & operator=( VkPresentFrameTokenGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentFrameTokenGGP & operator=( PresentFrameTokenGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentFrameTokenGGP ) ); + return *this; + } + + PresentFrameTokenGGP & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PresentFrameTokenGGP & setFrameToken( GgpFrameToken frameToken_ ) VULKAN_HPP_NOEXCEPT + { + frameToken = frameToken_; + return *this; + } + + + operator VkPresentFrameTokenGGP const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentFrameTokenGGP &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentFrameTokenGGP const& ) const = default; +#else + bool operator==( PresentFrameTokenGGP const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( memcmp( &frameToken, &rhs.frameToken, sizeof( GgpFrameToken ) ) == 0 ); + } + + bool operator!=( PresentFrameTokenGGP const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePresentFrameTokenGGP; + const void* pNext = {}; + GgpFrameToken frameToken = {}; + + }; + static_assert( sizeof( PresentFrameTokenGGP ) == sizeof( VkPresentFrameTokenGGP ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PresentFrameTokenGGP; + }; +#endif /*VK_USE_PLATFORM_GGP*/ + + struct RectLayerKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RectLayerKHR(VULKAN_HPP_NAMESPACE::Offset2D offset_ = {}, VULKAN_HPP_NAMESPACE::Extent2D extent_ = {}, uint32_t layer_ = {}) VULKAN_HPP_NOEXCEPT + : offset( offset_ ), extent( extent_ ), layer( layer_ ) + {} + + VULKAN_HPP_CONSTEXPR RectLayerKHR( RectLayerKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RectLayerKHR( VkRectLayerKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + + explicit RectLayerKHR( Rect2D const& rect2D, uint32_t layer_ = {} ) + : offset( rect2D.offset ) + , extent( rect2D.extent ) + , layer( layer_ ) + {} +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RectLayerKHR & operator=( VkRectLayerKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RectLayerKHR & operator=( RectLayerKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RectLayerKHR ) ); + return *this; + } + + RectLayerKHR & setOffset( VULKAN_HPP_NAMESPACE::Offset2D const & offset_ ) VULKAN_HPP_NOEXCEPT + { + offset = offset_; + return *this; + } + + RectLayerKHR & setExtent( VULKAN_HPP_NAMESPACE::Extent2D const & extent_ ) VULKAN_HPP_NOEXCEPT + { + extent = extent_; + return *this; + } + + RectLayerKHR & setLayer( uint32_t layer_ ) VULKAN_HPP_NOEXCEPT + { + layer = layer_; + return *this; + } + + + operator VkRectLayerKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRectLayerKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RectLayerKHR const& ) const = default; +#else + bool operator==( RectLayerKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( offset == rhs.offset ) + && ( extent == rhs.extent ) + && ( layer == rhs.layer ); + } + + bool operator!=( RectLayerKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::Offset2D offset = {}; + VULKAN_HPP_NAMESPACE::Extent2D extent = {}; + uint32_t layer = {}; + + }; + static_assert( sizeof( RectLayerKHR ) == sizeof( VkRectLayerKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PresentRegionKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentRegionKHR(uint32_t rectangleCount_ = {}, const VULKAN_HPP_NAMESPACE::RectLayerKHR* pRectangles_ = {}) VULKAN_HPP_NOEXCEPT + : rectangleCount( rectangleCount_ ), pRectangles( pRectangles_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentRegionKHR( PresentRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentRegionKHR( VkPresentRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentRegionKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & rectangles_ ) + : rectangleCount( static_cast( rectangles_.size() ) ), pRectangles( rectangles_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentRegionKHR & operator=( VkPresentRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentRegionKHR & operator=( PresentRegionKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentRegionKHR ) ); + return *this; + } + + PresentRegionKHR & setRectangleCount( uint32_t rectangleCount_ ) VULKAN_HPP_NOEXCEPT + { + rectangleCount = rectangleCount_; + return *this; + } + + PresentRegionKHR & setPRectangles( const VULKAN_HPP_NAMESPACE::RectLayerKHR* pRectangles_ ) VULKAN_HPP_NOEXCEPT + { + pRectangles = pRectangles_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentRegionKHR & setRectangles( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & rectangles_ ) VULKAN_HPP_NOEXCEPT + { + rectangleCount = static_cast( rectangles_.size() ); + pRectangles = rectangles_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPresentRegionKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentRegionKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentRegionKHR const& ) const = default; +#else + bool operator==( PresentRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( rectangleCount == rhs.rectangleCount ) + && ( pRectangles == rhs.pRectangles ); + } + + bool operator!=( PresentRegionKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t rectangleCount = {}; + const VULKAN_HPP_NAMESPACE::RectLayerKHR* pRectangles = {}; + + }; + static_assert( sizeof( PresentRegionKHR ) == sizeof( VkPresentRegionKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PresentRegionsKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePresentRegionsKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentRegionsKHR(uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentRegionKHR* pRegions_ = {}) VULKAN_HPP_NOEXCEPT + : swapchainCount( swapchainCount_ ), pRegions( pRegions_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentRegionsKHR( PresentRegionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentRegionsKHR( VkPresentRegionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentRegionsKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) + : swapchainCount( static_cast( regions_.size() ) ), pRegions( regions_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentRegionsKHR & operator=( VkPresentRegionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentRegionsKHR & operator=( PresentRegionsKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentRegionsKHR ) ); + return *this; + } + + PresentRegionsKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PresentRegionsKHR & setSwapchainCount( uint32_t swapchainCount_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentRegionsKHR & setPRegions( const VULKAN_HPP_NAMESPACE::PresentRegionKHR* pRegions_ ) VULKAN_HPP_NOEXCEPT + { + pRegions = pRegions_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentRegionsKHR & setRegions( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & regions_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( regions_.size() ); + pRegions = regions_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPresentRegionsKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentRegionsKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentRegionsKHR const& ) const = default; +#else + bool operator==( PresentRegionsKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pRegions == rhs.pRegions ); + } + + bool operator!=( PresentRegionsKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePresentRegionsKHR; + const void* pNext = {}; + uint32_t swapchainCount = {}; + const VULKAN_HPP_NAMESPACE::PresentRegionKHR* pRegions = {}; + + }; + static_assert( sizeof( PresentRegionsKHR ) == sizeof( VkPresentRegionsKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PresentRegionsKHR; + }; + + struct PresentTimeGOOGLE + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentTimeGOOGLE(uint32_t presentID_ = {}, uint64_t desiredPresentTime_ = {}) VULKAN_HPP_NOEXCEPT + : presentID( presentID_ ), desiredPresentTime( desiredPresentTime_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentTimeGOOGLE( PresentTimeGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentTimeGOOGLE( VkPresentTimeGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentTimeGOOGLE & operator=( VkPresentTimeGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentTimeGOOGLE & operator=( PresentTimeGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentTimeGOOGLE ) ); + return *this; + } + + PresentTimeGOOGLE & setPresentID( uint32_t presentID_ ) VULKAN_HPP_NOEXCEPT + { + presentID = presentID_; + return *this; + } + + PresentTimeGOOGLE & setDesiredPresentTime( uint64_t desiredPresentTime_ ) VULKAN_HPP_NOEXCEPT + { + desiredPresentTime = desiredPresentTime_; + return *this; + } + + + operator VkPresentTimeGOOGLE const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentTimeGOOGLE &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentTimeGOOGLE const& ) const = default; +#else + bool operator==( PresentTimeGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( presentID == rhs.presentID ) + && ( desiredPresentTime == rhs.desiredPresentTime ); + } + + bool operator!=( PresentTimeGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t presentID = {}; + uint64_t desiredPresentTime = {}; + + }; + static_assert( sizeof( PresentTimeGOOGLE ) == sizeof( VkPresentTimeGOOGLE ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct PresentTimesInfoGOOGLE + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::ePresentTimesInfoGOOGLE; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PresentTimesInfoGOOGLE(uint32_t swapchainCount_ = {}, const VULKAN_HPP_NAMESPACE::PresentTimeGOOGLE* pTimes_ = {}) VULKAN_HPP_NOEXCEPT + : swapchainCount( swapchainCount_ ), pTimes( pTimes_ ) + {} + + VULKAN_HPP_CONSTEXPR PresentTimesInfoGOOGLE( PresentTimesInfoGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + PresentTimesInfoGOOGLE( VkPresentTimesInfoGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentTimesInfoGOOGLE( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & times_ ) + : swapchainCount( static_cast( times_.size() ) ), pTimes( times_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + PresentTimesInfoGOOGLE & operator=( VkPresentTimesInfoGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + PresentTimesInfoGOOGLE & operator=( PresentTimesInfoGOOGLE const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( PresentTimesInfoGOOGLE ) ); + return *this; + } + + PresentTimesInfoGOOGLE & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + PresentTimesInfoGOOGLE & setSwapchainCount( uint32_t swapchainCount_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = swapchainCount_; + return *this; + } + + PresentTimesInfoGOOGLE & setPTimes( const VULKAN_HPP_NAMESPACE::PresentTimeGOOGLE* pTimes_ ) VULKAN_HPP_NOEXCEPT + { + pTimes = pTimes_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + PresentTimesInfoGOOGLE & setTimes( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & times_ ) VULKAN_HPP_NOEXCEPT + { + swapchainCount = static_cast( times_.size() ); + pTimes = times_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkPresentTimesInfoGOOGLE const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkPresentTimesInfoGOOGLE &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( PresentTimesInfoGOOGLE const& ) const = default; +#else + bool operator==( PresentTimesInfoGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( swapchainCount == rhs.swapchainCount ) + && ( pTimes == rhs.pTimes ); + } + + bool operator!=( PresentTimesInfoGOOGLE const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::ePresentTimesInfoGOOGLE; + const void* pNext = {}; + uint32_t swapchainCount = {}; + const VULKAN_HPP_NAMESPACE::PresentTimeGOOGLE* pTimes = {}; + + }; + static_assert( sizeof( PresentTimesInfoGOOGLE ) == sizeof( VkPresentTimesInfoGOOGLE ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = PresentTimesInfoGOOGLE; + }; + + struct ProtectedSubmitInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eProtectedSubmitInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ProtectedSubmitInfo(VULKAN_HPP_NAMESPACE::Bool32 protectedSubmit_ = {}) VULKAN_HPP_NOEXCEPT + : protectedSubmit( protectedSubmit_ ) + {} + + VULKAN_HPP_CONSTEXPR ProtectedSubmitInfo( ProtectedSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ProtectedSubmitInfo( VkProtectedSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ProtectedSubmitInfo & operator=( VkProtectedSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ProtectedSubmitInfo & operator=( ProtectedSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ProtectedSubmitInfo ) ); + return *this; + } + + ProtectedSubmitInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ProtectedSubmitInfo & setProtectedSubmit( VULKAN_HPP_NAMESPACE::Bool32 protectedSubmit_ ) VULKAN_HPP_NOEXCEPT + { + protectedSubmit = protectedSubmit_; + return *this; + } + + + operator VkProtectedSubmitInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkProtectedSubmitInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ProtectedSubmitInfo const& ) const = default; +#else + bool operator==( ProtectedSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( protectedSubmit == rhs.protectedSubmit ); + } + + bool operator!=( ProtectedSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eProtectedSubmitInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 protectedSubmit = {}; + + }; + static_assert( sizeof( ProtectedSubmitInfo ) == sizeof( VkProtectedSubmitInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ProtectedSubmitInfo; + }; + + struct QueryPoolPerformanceQueryCreateInfoINTEL + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueryPoolPerformanceQueryCreateInfoINTEL; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueryPoolPerformanceQueryCreateInfoINTEL(VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL performanceCountersSampling_ = VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL::eManual) VULKAN_HPP_NOEXCEPT + : performanceCountersSampling( performanceCountersSampling_ ) + {} + + VULKAN_HPP_CONSTEXPR QueryPoolPerformanceQueryCreateInfoINTEL( QueryPoolPerformanceQueryCreateInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueryPoolPerformanceQueryCreateInfoINTEL( VkQueryPoolPerformanceQueryCreateInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueryPoolPerformanceQueryCreateInfoINTEL & operator=( VkQueryPoolPerformanceQueryCreateInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueryPoolPerformanceQueryCreateInfoINTEL & operator=( QueryPoolPerformanceQueryCreateInfoINTEL const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueryPoolPerformanceQueryCreateInfoINTEL ) ); + return *this; + } + + QueryPoolPerformanceQueryCreateInfoINTEL & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + QueryPoolPerformanceQueryCreateInfoINTEL & setPerformanceCountersSampling( VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL performanceCountersSampling_ ) VULKAN_HPP_NOEXCEPT + { + performanceCountersSampling = performanceCountersSampling_; + return *this; + } + + + operator VkQueryPoolPerformanceQueryCreateInfoINTEL const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueryPoolPerformanceQueryCreateInfoINTEL &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueryPoolPerformanceQueryCreateInfoINTEL const& ) const = default; +#else + bool operator==( QueryPoolPerformanceQueryCreateInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( performanceCountersSampling == rhs.performanceCountersSampling ); + } + + bool operator!=( QueryPoolPerformanceQueryCreateInfoINTEL const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueryPoolPerformanceQueryCreateInfoINTEL; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL performanceCountersSampling = VULKAN_HPP_NAMESPACE::QueryPoolSamplingModeINTEL::eManual; + + }; + static_assert( sizeof( QueryPoolPerformanceQueryCreateInfoINTEL ) == sizeof( VkQueryPoolPerformanceQueryCreateInfoINTEL ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = QueryPoolPerformanceQueryCreateInfoINTEL; + }; + using QueryPoolCreateInfoINTEL = QueryPoolPerformanceQueryCreateInfoINTEL; + + struct QueueFamilyCheckpointPropertiesNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eQueueFamilyCheckpointPropertiesNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR QueueFamilyCheckpointPropertiesNV(VULKAN_HPP_NAMESPACE::PipelineStageFlags checkpointExecutionStageMask_ = {}) VULKAN_HPP_NOEXCEPT + : checkpointExecutionStageMask( checkpointExecutionStageMask_ ) + {} + + VULKAN_HPP_CONSTEXPR QueueFamilyCheckpointPropertiesNV( QueueFamilyCheckpointPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + QueueFamilyCheckpointPropertiesNV( VkQueueFamilyCheckpointPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + QueueFamilyCheckpointPropertiesNV & operator=( VkQueueFamilyCheckpointPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + QueueFamilyCheckpointPropertiesNV & operator=( QueueFamilyCheckpointPropertiesNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( QueueFamilyCheckpointPropertiesNV ) ); + return *this; + } + + + operator VkQueueFamilyCheckpointPropertiesNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkQueueFamilyCheckpointPropertiesNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( QueueFamilyCheckpointPropertiesNV const& ) const = default; +#else + bool operator==( QueueFamilyCheckpointPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( checkpointExecutionStageMask == rhs.checkpointExecutionStageMask ); + } + + bool operator!=( QueueFamilyCheckpointPropertiesNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eQueueFamilyCheckpointPropertiesNV; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::PipelineStageFlags checkpointExecutionStageMask = {}; + + }; + static_assert( sizeof( QueueFamilyCheckpointPropertiesNV ) == sizeof( VkQueueFamilyCheckpointPropertiesNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = QueueFamilyCheckpointPropertiesNV; + }; + + struct RenderPassAttachmentBeginInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassAttachmentBeginInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassAttachmentBeginInfo(uint32_t attachmentCount_ = {}, const VULKAN_HPP_NAMESPACE::ImageView* pAttachments_ = {}) VULKAN_HPP_NOEXCEPT + : attachmentCount( attachmentCount_ ), pAttachments( pAttachments_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassAttachmentBeginInfo( RenderPassAttachmentBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassAttachmentBeginInfo( VkRenderPassAttachmentBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassAttachmentBeginInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) + : attachmentCount( static_cast( attachments_.size() ) ), pAttachments( attachments_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassAttachmentBeginInfo & operator=( VkRenderPassAttachmentBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassAttachmentBeginInfo & operator=( RenderPassAttachmentBeginInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassAttachmentBeginInfo ) ); + return *this; + } + + RenderPassAttachmentBeginInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassAttachmentBeginInfo & setAttachmentCount( uint32_t attachmentCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = attachmentCount_; + return *this; + } + + RenderPassAttachmentBeginInfo & setPAttachments( const VULKAN_HPP_NAMESPACE::ImageView* pAttachments_ ) VULKAN_HPP_NOEXCEPT + { + pAttachments = pAttachments_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassAttachmentBeginInfo & setAttachments( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachments_ ) VULKAN_HPP_NOEXCEPT + { + attachmentCount = static_cast( attachments_.size() ); + pAttachments = attachments_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassAttachmentBeginInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassAttachmentBeginInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassAttachmentBeginInfo const& ) const = default; +#else + bool operator==( RenderPassAttachmentBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachmentCount == rhs.attachmentCount ) + && ( pAttachments == rhs.pAttachments ); + } + + bool operator!=( RenderPassAttachmentBeginInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassAttachmentBeginInfo; + const void* pNext = {}; + uint32_t attachmentCount = {}; + const VULKAN_HPP_NAMESPACE::ImageView* pAttachments = {}; + + }; + static_assert( sizeof( RenderPassAttachmentBeginInfo ) == sizeof( VkRenderPassAttachmentBeginInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassAttachmentBeginInfo; + }; + using RenderPassAttachmentBeginInfoKHR = RenderPassAttachmentBeginInfo; + + struct RenderPassFragmentDensityMapCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassFragmentDensityMapCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassFragmentDensityMapCreateInfoEXT(VULKAN_HPP_NAMESPACE::AttachmentReference fragmentDensityMapAttachment_ = {}) VULKAN_HPP_NOEXCEPT + : fragmentDensityMapAttachment( fragmentDensityMapAttachment_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassFragmentDensityMapCreateInfoEXT( RenderPassFragmentDensityMapCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassFragmentDensityMapCreateInfoEXT( VkRenderPassFragmentDensityMapCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassFragmentDensityMapCreateInfoEXT & operator=( VkRenderPassFragmentDensityMapCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassFragmentDensityMapCreateInfoEXT & operator=( RenderPassFragmentDensityMapCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassFragmentDensityMapCreateInfoEXT ) ); + return *this; + } + + RenderPassFragmentDensityMapCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassFragmentDensityMapCreateInfoEXT & setFragmentDensityMapAttachment( VULKAN_HPP_NAMESPACE::AttachmentReference const & fragmentDensityMapAttachment_ ) VULKAN_HPP_NOEXCEPT + { + fragmentDensityMapAttachment = fragmentDensityMapAttachment_; + return *this; + } + + + operator VkRenderPassFragmentDensityMapCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassFragmentDensityMapCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassFragmentDensityMapCreateInfoEXT const& ) const = default; +#else + bool operator==( RenderPassFragmentDensityMapCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fragmentDensityMapAttachment == rhs.fragmentDensityMapAttachment ); + } + + bool operator!=( RenderPassFragmentDensityMapCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassFragmentDensityMapCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::AttachmentReference fragmentDensityMapAttachment = {}; + + }; + static_assert( sizeof( RenderPassFragmentDensityMapCreateInfoEXT ) == sizeof( VkRenderPassFragmentDensityMapCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassFragmentDensityMapCreateInfoEXT; + }; + + struct RenderPassInputAttachmentAspectCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassInputAttachmentAspectCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassInputAttachmentAspectCreateInfo(uint32_t aspectReferenceCount_ = {}, const VULKAN_HPP_NAMESPACE::InputAttachmentAspectReference* pAspectReferences_ = {}) VULKAN_HPP_NOEXCEPT + : aspectReferenceCount( aspectReferenceCount_ ), pAspectReferences( pAspectReferences_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassInputAttachmentAspectCreateInfo( RenderPassInputAttachmentAspectCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassInputAttachmentAspectCreateInfo( VkRenderPassInputAttachmentAspectCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassInputAttachmentAspectCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & aspectReferences_ ) + : aspectReferenceCount( static_cast( aspectReferences_.size() ) ), pAspectReferences( aspectReferences_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassInputAttachmentAspectCreateInfo & operator=( VkRenderPassInputAttachmentAspectCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo & operator=( RenderPassInputAttachmentAspectCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassInputAttachmentAspectCreateInfo ) ); + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo & setAspectReferenceCount( uint32_t aspectReferenceCount_ ) VULKAN_HPP_NOEXCEPT + { + aspectReferenceCount = aspectReferenceCount_; + return *this; + } + + RenderPassInputAttachmentAspectCreateInfo & setPAspectReferences( const VULKAN_HPP_NAMESPACE::InputAttachmentAspectReference* pAspectReferences_ ) VULKAN_HPP_NOEXCEPT + { + pAspectReferences = pAspectReferences_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassInputAttachmentAspectCreateInfo & setAspectReferences( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & aspectReferences_ ) VULKAN_HPP_NOEXCEPT + { + aspectReferenceCount = static_cast( aspectReferences_.size() ); + pAspectReferences = aspectReferences_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassInputAttachmentAspectCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassInputAttachmentAspectCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassInputAttachmentAspectCreateInfo const& ) const = default; +#else + bool operator==( RenderPassInputAttachmentAspectCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( aspectReferenceCount == rhs.aspectReferenceCount ) + && ( pAspectReferences == rhs.pAspectReferences ); + } + + bool operator!=( RenderPassInputAttachmentAspectCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassInputAttachmentAspectCreateInfo; + const void* pNext = {}; + uint32_t aspectReferenceCount = {}; + const VULKAN_HPP_NAMESPACE::InputAttachmentAspectReference* pAspectReferences = {}; + + }; + static_assert( sizeof( RenderPassInputAttachmentAspectCreateInfo ) == sizeof( VkRenderPassInputAttachmentAspectCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassInputAttachmentAspectCreateInfo; + }; + using RenderPassInputAttachmentAspectCreateInfoKHR = RenderPassInputAttachmentAspectCreateInfo; + + struct RenderPassMultiviewCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassMultiviewCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassMultiviewCreateInfo(uint32_t subpassCount_ = {}, const uint32_t* pViewMasks_ = {}, uint32_t dependencyCount_ = {}, const int32_t* pViewOffsets_ = {}, uint32_t correlationMaskCount_ = {}, const uint32_t* pCorrelationMasks_ = {}) VULKAN_HPP_NOEXCEPT + : subpassCount( subpassCount_ ), pViewMasks( pViewMasks_ ), dependencyCount( dependencyCount_ ), pViewOffsets( pViewOffsets_ ), correlationMaskCount( correlationMaskCount_ ), pCorrelationMasks( pCorrelationMasks_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassMultiviewCreateInfo( RenderPassMultiviewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassMultiviewCreateInfo( VkRenderPassMultiviewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassMultiviewCreateInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewMasks_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewOffsets_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & correlationMasks_ = {} ) + : subpassCount( static_cast( viewMasks_.size() ) ), pViewMasks( viewMasks_.data() ), dependencyCount( static_cast( viewOffsets_.size() ) ), pViewOffsets( viewOffsets_.data() ), correlationMaskCount( static_cast( correlationMasks_.size() ) ), pCorrelationMasks( correlationMasks_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassMultiviewCreateInfo & operator=( VkRenderPassMultiviewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassMultiviewCreateInfo & operator=( RenderPassMultiviewCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassMultiviewCreateInfo ) ); + return *this; + } + + RenderPassMultiviewCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassMultiviewCreateInfo & setSubpassCount( uint32_t subpassCount_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = subpassCount_; + return *this; + } + + RenderPassMultiviewCreateInfo & setPViewMasks( const uint32_t* pViewMasks_ ) VULKAN_HPP_NOEXCEPT + { + pViewMasks = pViewMasks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassMultiviewCreateInfo & setViewMasks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewMasks_ ) VULKAN_HPP_NOEXCEPT + { + subpassCount = static_cast( viewMasks_.size() ); + pViewMasks = viewMasks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassMultiviewCreateInfo & setDependencyCount( uint32_t dependencyCount_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = dependencyCount_; + return *this; + } + + RenderPassMultiviewCreateInfo & setPViewOffsets( const int32_t* pViewOffsets_ ) VULKAN_HPP_NOEXCEPT + { + pViewOffsets = pViewOffsets_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassMultiviewCreateInfo & setViewOffsets( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & viewOffsets_ ) VULKAN_HPP_NOEXCEPT + { + dependencyCount = static_cast( viewOffsets_.size() ); + pViewOffsets = viewOffsets_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassMultiviewCreateInfo & setCorrelationMaskCount( uint32_t correlationMaskCount_ ) VULKAN_HPP_NOEXCEPT + { + correlationMaskCount = correlationMaskCount_; + return *this; + } + + RenderPassMultiviewCreateInfo & setPCorrelationMasks( const uint32_t* pCorrelationMasks_ ) VULKAN_HPP_NOEXCEPT + { + pCorrelationMasks = pCorrelationMasks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassMultiviewCreateInfo & setCorrelationMasks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & correlationMasks_ ) VULKAN_HPP_NOEXCEPT + { + correlationMaskCount = static_cast( correlationMasks_.size() ); + pCorrelationMasks = correlationMasks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassMultiviewCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassMultiviewCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassMultiviewCreateInfo const& ) const = default; +#else + bool operator==( RenderPassMultiviewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( subpassCount == rhs.subpassCount ) + && ( pViewMasks == rhs.pViewMasks ) + && ( dependencyCount == rhs.dependencyCount ) + && ( pViewOffsets == rhs.pViewOffsets ) + && ( correlationMaskCount == rhs.correlationMaskCount ) + && ( pCorrelationMasks == rhs.pCorrelationMasks ); + } + + bool operator!=( RenderPassMultiviewCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassMultiviewCreateInfo; + const void* pNext = {}; + uint32_t subpassCount = {}; + const uint32_t* pViewMasks = {}; + uint32_t dependencyCount = {}; + const int32_t* pViewOffsets = {}; + uint32_t correlationMaskCount = {}; + const uint32_t* pCorrelationMasks = {}; + + }; + static_assert( sizeof( RenderPassMultiviewCreateInfo ) == sizeof( VkRenderPassMultiviewCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassMultiviewCreateInfo; + }; + using RenderPassMultiviewCreateInfoKHR = RenderPassMultiviewCreateInfo; + + struct SubpassSampleLocationsEXT + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassSampleLocationsEXT(uint32_t subpassIndex_ = {}, VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo_ = {}) VULKAN_HPP_NOEXCEPT + : subpassIndex( subpassIndex_ ), sampleLocationsInfo( sampleLocationsInfo_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassSampleLocationsEXT( SubpassSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassSampleLocationsEXT( VkSubpassSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassSampleLocationsEXT & operator=( VkSubpassSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassSampleLocationsEXT & operator=( SubpassSampleLocationsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassSampleLocationsEXT ) ); + return *this; + } + + SubpassSampleLocationsEXT & setSubpassIndex( uint32_t subpassIndex_ ) VULKAN_HPP_NOEXCEPT + { + subpassIndex = subpassIndex_; + return *this; + } + + SubpassSampleLocationsEXT & setSampleLocationsInfo( VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT const & sampleLocationsInfo_ ) VULKAN_HPP_NOEXCEPT + { + sampleLocationsInfo = sampleLocationsInfo_; + return *this; + } + + + operator VkSubpassSampleLocationsEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassSampleLocationsEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassSampleLocationsEXT const& ) const = default; +#else + bool operator==( SubpassSampleLocationsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( subpassIndex == rhs.subpassIndex ) + && ( sampleLocationsInfo == rhs.sampleLocationsInfo ); + } + + bool operator!=( SubpassSampleLocationsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t subpassIndex = {}; + VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT sampleLocationsInfo = {}; + + }; + static_assert( sizeof( SubpassSampleLocationsEXT ) == sizeof( VkSubpassSampleLocationsEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct RenderPassSampleLocationsBeginInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassSampleLocationsBeginInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassSampleLocationsBeginInfoEXT(uint32_t attachmentInitialSampleLocationsCount_ = {}, const VULKAN_HPP_NAMESPACE::AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations_ = {}, uint32_t postSubpassSampleLocationsCount_ = {}, const VULKAN_HPP_NAMESPACE::SubpassSampleLocationsEXT* pPostSubpassSampleLocations_ = {}) VULKAN_HPP_NOEXCEPT + : attachmentInitialSampleLocationsCount( attachmentInitialSampleLocationsCount_ ), pAttachmentInitialSampleLocations( pAttachmentInitialSampleLocations_ ), postSubpassSampleLocationsCount( postSubpassSampleLocationsCount_ ), pPostSubpassSampleLocations( pPostSubpassSampleLocations_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassSampleLocationsBeginInfoEXT( RenderPassSampleLocationsBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassSampleLocationsBeginInfoEXT( VkRenderPassSampleLocationsBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassSampleLocationsBeginInfoEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachmentInitialSampleLocations_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & postSubpassSampleLocations_ = {} ) + : attachmentInitialSampleLocationsCount( static_cast( attachmentInitialSampleLocations_.size() ) ), pAttachmentInitialSampleLocations( attachmentInitialSampleLocations_.data() ), postSubpassSampleLocationsCount( static_cast( postSubpassSampleLocations_.size() ) ), pPostSubpassSampleLocations( postSubpassSampleLocations_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassSampleLocationsBeginInfoEXT & operator=( VkRenderPassSampleLocationsBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT & operator=( RenderPassSampleLocationsBeginInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassSampleLocationsBeginInfoEXT ) ); + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT & setAttachmentInitialSampleLocationsCount( uint32_t attachmentInitialSampleLocationsCount_ ) VULKAN_HPP_NOEXCEPT + { + attachmentInitialSampleLocationsCount = attachmentInitialSampleLocationsCount_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT & setPAttachmentInitialSampleLocations( const VULKAN_HPP_NAMESPACE::AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + pAttachmentInitialSampleLocations = pAttachmentInitialSampleLocations_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassSampleLocationsBeginInfoEXT & setAttachmentInitialSampleLocations( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & attachmentInitialSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + attachmentInitialSampleLocationsCount = static_cast( attachmentInitialSampleLocations_.size() ); + pAttachmentInitialSampleLocations = attachmentInitialSampleLocations_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + RenderPassSampleLocationsBeginInfoEXT & setPostSubpassSampleLocationsCount( uint32_t postSubpassSampleLocationsCount_ ) VULKAN_HPP_NOEXCEPT + { + postSubpassSampleLocationsCount = postSubpassSampleLocationsCount_; + return *this; + } + + RenderPassSampleLocationsBeginInfoEXT & setPPostSubpassSampleLocations( const VULKAN_HPP_NAMESPACE::SubpassSampleLocationsEXT* pPostSubpassSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + pPostSubpassSampleLocations = pPostSubpassSampleLocations_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + RenderPassSampleLocationsBeginInfoEXT & setPostSubpassSampleLocations( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & postSubpassSampleLocations_ ) VULKAN_HPP_NOEXCEPT + { + postSubpassSampleLocationsCount = static_cast( postSubpassSampleLocations_.size() ); + pPostSubpassSampleLocations = postSubpassSampleLocations_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkRenderPassSampleLocationsBeginInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassSampleLocationsBeginInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassSampleLocationsBeginInfoEXT const& ) const = default; +#else + bool operator==( RenderPassSampleLocationsBeginInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( attachmentInitialSampleLocationsCount == rhs.attachmentInitialSampleLocationsCount ) + && ( pAttachmentInitialSampleLocations == rhs.pAttachmentInitialSampleLocations ) + && ( postSubpassSampleLocationsCount == rhs.postSubpassSampleLocationsCount ) + && ( pPostSubpassSampleLocations == rhs.pPostSubpassSampleLocations ); + } + + bool operator!=( RenderPassSampleLocationsBeginInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassSampleLocationsBeginInfoEXT; + const void* pNext = {}; + uint32_t attachmentInitialSampleLocationsCount = {}; + const VULKAN_HPP_NAMESPACE::AttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations = {}; + uint32_t postSubpassSampleLocationsCount = {}; + const VULKAN_HPP_NAMESPACE::SubpassSampleLocationsEXT* pPostSubpassSampleLocations = {}; + + }; + static_assert( sizeof( RenderPassSampleLocationsBeginInfoEXT ) == sizeof( VkRenderPassSampleLocationsBeginInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassSampleLocationsBeginInfoEXT; + }; + + struct RenderPassTransformBeginInfoQCOM + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eRenderPassTransformBeginInfoQCOM; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR RenderPassTransformBeginInfoQCOM(VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity) VULKAN_HPP_NOEXCEPT + : transform( transform_ ) + {} + + VULKAN_HPP_CONSTEXPR RenderPassTransformBeginInfoQCOM( RenderPassTransformBeginInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + RenderPassTransformBeginInfoQCOM( VkRenderPassTransformBeginInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + RenderPassTransformBeginInfoQCOM & operator=( VkRenderPassTransformBeginInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + RenderPassTransformBeginInfoQCOM & operator=( RenderPassTransformBeginInfoQCOM const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( RenderPassTransformBeginInfoQCOM ) ); + return *this; + } + + RenderPassTransformBeginInfoQCOM & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + RenderPassTransformBeginInfoQCOM & setTransform( VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform_ ) VULKAN_HPP_NOEXCEPT + { + transform = transform_; + return *this; + } + + + operator VkRenderPassTransformBeginInfoQCOM const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkRenderPassTransformBeginInfoQCOM &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( RenderPassTransformBeginInfoQCOM const& ) const = default; +#else + bool operator==( RenderPassTransformBeginInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( transform == rhs.transform ); + } + + bool operator!=( RenderPassTransformBeginInfoQCOM const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eRenderPassTransformBeginInfoQCOM; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR transform = VULKAN_HPP_NAMESPACE::SurfaceTransformFlagBitsKHR::eIdentity; + + }; + static_assert( sizeof( RenderPassTransformBeginInfoQCOM ) == sizeof( VkRenderPassTransformBeginInfoQCOM ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = RenderPassTransformBeginInfoQCOM; + }; + + struct SamplerCustomBorderColorCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerCustomBorderColorCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + SamplerCustomBorderColorCreateInfoEXT(VULKAN_HPP_NAMESPACE::ClearColorValue customBorderColor_ = {}, VULKAN_HPP_NAMESPACE::Format format_ = VULKAN_HPP_NAMESPACE::Format::eUndefined) VULKAN_HPP_NOEXCEPT + : customBorderColor( customBorderColor_ ), format( format_ ) + {} + + SamplerCustomBorderColorCreateInfoEXT( SamplerCustomBorderColorCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerCustomBorderColorCreateInfoEXT( VkSamplerCustomBorderColorCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerCustomBorderColorCreateInfoEXT & operator=( VkSamplerCustomBorderColorCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerCustomBorderColorCreateInfoEXT & operator=( SamplerCustomBorderColorCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerCustomBorderColorCreateInfoEXT ) ); + return *this; + } + + SamplerCustomBorderColorCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SamplerCustomBorderColorCreateInfoEXT & setCustomBorderColor( VULKAN_HPP_NAMESPACE::ClearColorValue const & customBorderColor_ ) VULKAN_HPP_NOEXCEPT + { + customBorderColor = customBorderColor_; + return *this; + } + + SamplerCustomBorderColorCreateInfoEXT & setFormat( VULKAN_HPP_NAMESPACE::Format format_ ) VULKAN_HPP_NOEXCEPT + { + format = format_; + return *this; + } + + + operator VkSamplerCustomBorderColorCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerCustomBorderColorCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerCustomBorderColorCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ClearColorValue customBorderColor = {}; + VULKAN_HPP_NAMESPACE::Format format = VULKAN_HPP_NAMESPACE::Format::eUndefined; + + }; + static_assert( sizeof( SamplerCustomBorderColorCreateInfoEXT ) == sizeof( VkSamplerCustomBorderColorCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerCustomBorderColorCreateInfoEXT; + }; + + struct SamplerReductionModeCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerReductionModeCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SamplerReductionModeCreateInfo(VULKAN_HPP_NAMESPACE::SamplerReductionMode reductionMode_ = VULKAN_HPP_NAMESPACE::SamplerReductionMode::eWeightedAverage) VULKAN_HPP_NOEXCEPT + : reductionMode( reductionMode_ ) + {} + + VULKAN_HPP_CONSTEXPR SamplerReductionModeCreateInfo( SamplerReductionModeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerReductionModeCreateInfo( VkSamplerReductionModeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerReductionModeCreateInfo & operator=( VkSamplerReductionModeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerReductionModeCreateInfo & operator=( SamplerReductionModeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerReductionModeCreateInfo ) ); + return *this; + } + + SamplerReductionModeCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SamplerReductionModeCreateInfo & setReductionMode( VULKAN_HPP_NAMESPACE::SamplerReductionMode reductionMode_ ) VULKAN_HPP_NOEXCEPT + { + reductionMode = reductionMode_; + return *this; + } + + + operator VkSamplerReductionModeCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerReductionModeCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerReductionModeCreateInfo const& ) const = default; +#else + bool operator==( SamplerReductionModeCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( reductionMode == rhs.reductionMode ); + } + + bool operator!=( SamplerReductionModeCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerReductionModeCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SamplerReductionMode reductionMode = VULKAN_HPP_NAMESPACE::SamplerReductionMode::eWeightedAverage; + + }; + static_assert( sizeof( SamplerReductionModeCreateInfo ) == sizeof( VkSamplerReductionModeCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerReductionModeCreateInfo; + }; + using SamplerReductionModeCreateInfoEXT = SamplerReductionModeCreateInfo; + + struct SamplerYcbcrConversionImageFormatProperties + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerYcbcrConversionImageFormatProperties; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionImageFormatProperties(uint32_t combinedImageSamplerDescriptorCount_ = {}) VULKAN_HPP_NOEXCEPT + : combinedImageSamplerDescriptorCount( combinedImageSamplerDescriptorCount_ ) + {} + + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionImageFormatProperties( SamplerYcbcrConversionImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerYcbcrConversionImageFormatProperties( VkSamplerYcbcrConversionImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerYcbcrConversionImageFormatProperties & operator=( VkSamplerYcbcrConversionImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerYcbcrConversionImageFormatProperties & operator=( SamplerYcbcrConversionImageFormatProperties const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerYcbcrConversionImageFormatProperties ) ); + return *this; + } + + + operator VkSamplerYcbcrConversionImageFormatProperties const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerYcbcrConversionImageFormatProperties &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerYcbcrConversionImageFormatProperties const& ) const = default; +#else + bool operator==( SamplerYcbcrConversionImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( combinedImageSamplerDescriptorCount == rhs.combinedImageSamplerDescriptorCount ); + } + + bool operator!=( SamplerYcbcrConversionImageFormatProperties const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerYcbcrConversionImageFormatProperties; + void* pNext = {}; + uint32_t combinedImageSamplerDescriptorCount = {}; + + }; + static_assert( sizeof( SamplerYcbcrConversionImageFormatProperties ) == sizeof( VkSamplerYcbcrConversionImageFormatProperties ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerYcbcrConversionImageFormatProperties; + }; + using SamplerYcbcrConversionImageFormatPropertiesKHR = SamplerYcbcrConversionImageFormatProperties; + + struct SamplerYcbcrConversionInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSamplerYcbcrConversionInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionInfo(VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion conversion_ = {}) VULKAN_HPP_NOEXCEPT + : conversion( conversion_ ) + {} + + VULKAN_HPP_CONSTEXPR SamplerYcbcrConversionInfo( SamplerYcbcrConversionInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SamplerYcbcrConversionInfo( VkSamplerYcbcrConversionInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SamplerYcbcrConversionInfo & operator=( VkSamplerYcbcrConversionInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SamplerYcbcrConversionInfo & operator=( SamplerYcbcrConversionInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SamplerYcbcrConversionInfo ) ); + return *this; + } + + SamplerYcbcrConversionInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SamplerYcbcrConversionInfo & setConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion conversion_ ) VULKAN_HPP_NOEXCEPT + { + conversion = conversion_; + return *this; + } + + + operator VkSamplerYcbcrConversionInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSamplerYcbcrConversionInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SamplerYcbcrConversionInfo const& ) const = default; +#else + bool operator==( SamplerYcbcrConversionInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( conversion == rhs.conversion ); + } + + bool operator!=( SamplerYcbcrConversionInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSamplerYcbcrConversionInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion conversion = {}; + + }; + static_assert( sizeof( SamplerYcbcrConversionInfo ) == sizeof( VkSamplerYcbcrConversionInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SamplerYcbcrConversionInfo; + }; + using SamplerYcbcrConversionInfoKHR = SamplerYcbcrConversionInfo; + + struct SemaphoreTypeCreateInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSemaphoreTypeCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SemaphoreTypeCreateInfo(VULKAN_HPP_NAMESPACE::SemaphoreType semaphoreType_ = VULKAN_HPP_NAMESPACE::SemaphoreType::eBinary, uint64_t initialValue_ = {}) VULKAN_HPP_NOEXCEPT + : semaphoreType( semaphoreType_ ), initialValue( initialValue_ ) + {} + + VULKAN_HPP_CONSTEXPR SemaphoreTypeCreateInfo( SemaphoreTypeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SemaphoreTypeCreateInfo( VkSemaphoreTypeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SemaphoreTypeCreateInfo & operator=( VkSemaphoreTypeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SemaphoreTypeCreateInfo & operator=( SemaphoreTypeCreateInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SemaphoreTypeCreateInfo ) ); + return *this; + } + + SemaphoreTypeCreateInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SemaphoreTypeCreateInfo & setSemaphoreType( VULKAN_HPP_NAMESPACE::SemaphoreType semaphoreType_ ) VULKAN_HPP_NOEXCEPT + { + semaphoreType = semaphoreType_; + return *this; + } + + SemaphoreTypeCreateInfo & setInitialValue( uint64_t initialValue_ ) VULKAN_HPP_NOEXCEPT + { + initialValue = initialValue_; + return *this; + } + + + operator VkSemaphoreTypeCreateInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSemaphoreTypeCreateInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SemaphoreTypeCreateInfo const& ) const = default; +#else + bool operator==( SemaphoreTypeCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( semaphoreType == rhs.semaphoreType ) + && ( initialValue == rhs.initialValue ); + } + + bool operator!=( SemaphoreTypeCreateInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSemaphoreTypeCreateInfo; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SemaphoreType semaphoreType = VULKAN_HPP_NAMESPACE::SemaphoreType::eBinary; + uint64_t initialValue = {}; + + }; + static_assert( sizeof( SemaphoreTypeCreateInfo ) == sizeof( VkSemaphoreTypeCreateInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SemaphoreTypeCreateInfo; + }; + using SemaphoreTypeCreateInfoKHR = SemaphoreTypeCreateInfo; + + struct SetStateFlagsIndirectCommandNV + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SetStateFlagsIndirectCommandNV(uint32_t data_ = {}) VULKAN_HPP_NOEXCEPT + : data( data_ ) + {} + + VULKAN_HPP_CONSTEXPR SetStateFlagsIndirectCommandNV( SetStateFlagsIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SetStateFlagsIndirectCommandNV( VkSetStateFlagsIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SetStateFlagsIndirectCommandNV & operator=( VkSetStateFlagsIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SetStateFlagsIndirectCommandNV & operator=( SetStateFlagsIndirectCommandNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SetStateFlagsIndirectCommandNV ) ); + return *this; + } + + SetStateFlagsIndirectCommandNV & setData( uint32_t data_ ) VULKAN_HPP_NOEXCEPT + { + data = data_; + return *this; + } + + + operator VkSetStateFlagsIndirectCommandNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSetStateFlagsIndirectCommandNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SetStateFlagsIndirectCommandNV const& ) const = default; +#else + bool operator==( SetStateFlagsIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( data == rhs.data ); + } + + bool operator!=( SetStateFlagsIndirectCommandNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t data = {}; + + }; + static_assert( sizeof( SetStateFlagsIndirectCommandNV ) == sizeof( VkSetStateFlagsIndirectCommandNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ShaderModuleValidationCacheCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eShaderModuleValidationCacheCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ShaderModuleValidationCacheCreateInfoEXT(VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache_ = {}) VULKAN_HPP_NOEXCEPT + : validationCache( validationCache_ ) + {} + + VULKAN_HPP_CONSTEXPR ShaderModuleValidationCacheCreateInfoEXT( ShaderModuleValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ShaderModuleValidationCacheCreateInfoEXT( VkShaderModuleValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ShaderModuleValidationCacheCreateInfoEXT & operator=( VkShaderModuleValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ShaderModuleValidationCacheCreateInfoEXT & operator=( ShaderModuleValidationCacheCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ShaderModuleValidationCacheCreateInfoEXT ) ); + return *this; + } + + ShaderModuleValidationCacheCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ShaderModuleValidationCacheCreateInfoEXT & setValidationCache( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache_ ) VULKAN_HPP_NOEXCEPT + { + validationCache = validationCache_; + return *this; + } + + + operator VkShaderModuleValidationCacheCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkShaderModuleValidationCacheCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShaderModuleValidationCacheCreateInfoEXT const& ) const = default; +#else + bool operator==( ShaderModuleValidationCacheCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( validationCache == rhs.validationCache ); + } + + bool operator!=( ShaderModuleValidationCacheCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eShaderModuleValidationCacheCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache = {}; + + }; + static_assert( sizeof( ShaderModuleValidationCacheCreateInfoEXT ) == sizeof( VkShaderModuleValidationCacheCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ShaderModuleValidationCacheCreateInfoEXT; + }; + + struct ShaderResourceUsageAMD + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ShaderResourceUsageAMD(uint32_t numUsedVgprs_ = {}, uint32_t numUsedSgprs_ = {}, uint32_t ldsSizePerLocalWorkGroup_ = {}, size_t ldsUsageSizeInBytes_ = {}, size_t scratchMemUsageInBytes_ = {}) VULKAN_HPP_NOEXCEPT + : numUsedVgprs( numUsedVgprs_ ), numUsedSgprs( numUsedSgprs_ ), ldsSizePerLocalWorkGroup( ldsSizePerLocalWorkGroup_ ), ldsUsageSizeInBytes( ldsUsageSizeInBytes_ ), scratchMemUsageInBytes( scratchMemUsageInBytes_ ) + {} + + VULKAN_HPP_CONSTEXPR ShaderResourceUsageAMD( ShaderResourceUsageAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ShaderResourceUsageAMD( VkShaderResourceUsageAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ShaderResourceUsageAMD & operator=( VkShaderResourceUsageAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ShaderResourceUsageAMD & operator=( ShaderResourceUsageAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ShaderResourceUsageAMD ) ); + return *this; + } + + + operator VkShaderResourceUsageAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkShaderResourceUsageAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShaderResourceUsageAMD const& ) const = default; +#else + bool operator==( ShaderResourceUsageAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( numUsedVgprs == rhs.numUsedVgprs ) + && ( numUsedSgprs == rhs.numUsedSgprs ) + && ( ldsSizePerLocalWorkGroup == rhs.ldsSizePerLocalWorkGroup ) + && ( ldsUsageSizeInBytes == rhs.ldsUsageSizeInBytes ) + && ( scratchMemUsageInBytes == rhs.scratchMemUsageInBytes ); + } + + bool operator!=( ShaderResourceUsageAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t numUsedVgprs = {}; + uint32_t numUsedSgprs = {}; + uint32_t ldsSizePerLocalWorkGroup = {}; + size_t ldsUsageSizeInBytes = {}; + size_t scratchMemUsageInBytes = {}; + + }; + static_assert( sizeof( ShaderResourceUsageAMD ) == sizeof( VkShaderResourceUsageAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ShaderStatisticsInfoAMD + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR_14 ShaderStatisticsInfoAMD(VULKAN_HPP_NAMESPACE::ShaderStageFlags shaderStageMask_ = {}, VULKAN_HPP_NAMESPACE::ShaderResourceUsageAMD resourceUsage_ = {}, uint32_t numPhysicalVgprs_ = {}, uint32_t numPhysicalSgprs_ = {}, uint32_t numAvailableVgprs_ = {}, uint32_t numAvailableSgprs_ = {}, std::array const& computeWorkGroupSize_ = {}) VULKAN_HPP_NOEXCEPT + : shaderStageMask( shaderStageMask_ ), resourceUsage( resourceUsage_ ), numPhysicalVgprs( numPhysicalVgprs_ ), numPhysicalSgprs( numPhysicalSgprs_ ), numAvailableVgprs( numAvailableVgprs_ ), numAvailableSgprs( numAvailableSgprs_ ), computeWorkGroupSize( computeWorkGroupSize_ ) + {} + + VULKAN_HPP_CONSTEXPR_14 ShaderStatisticsInfoAMD( ShaderStatisticsInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ShaderStatisticsInfoAMD( VkShaderStatisticsInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ShaderStatisticsInfoAMD & operator=( VkShaderStatisticsInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ShaderStatisticsInfoAMD & operator=( ShaderStatisticsInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ShaderStatisticsInfoAMD ) ); + return *this; + } + + + operator VkShaderStatisticsInfoAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkShaderStatisticsInfoAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ShaderStatisticsInfoAMD const& ) const = default; +#else + bool operator==( ShaderStatisticsInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( shaderStageMask == rhs.shaderStageMask ) + && ( resourceUsage == rhs.resourceUsage ) + && ( numPhysicalVgprs == rhs.numPhysicalVgprs ) + && ( numPhysicalSgprs == rhs.numPhysicalSgprs ) + && ( numAvailableVgprs == rhs.numAvailableVgprs ) + && ( numAvailableSgprs == rhs.numAvailableSgprs ) + && ( computeWorkGroupSize == rhs.computeWorkGroupSize ); + } + + bool operator!=( ShaderStatisticsInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + VULKAN_HPP_NAMESPACE::ShaderStageFlags shaderStageMask = {}; + VULKAN_HPP_NAMESPACE::ShaderResourceUsageAMD resourceUsage = {}; + uint32_t numPhysicalVgprs = {}; + uint32_t numPhysicalSgprs = {}; + uint32_t numAvailableVgprs = {}; + uint32_t numAvailableSgprs = {}; + VULKAN_HPP_NAMESPACE::ArrayWrapper1D computeWorkGroupSize = {}; + + }; + static_assert( sizeof( ShaderStatisticsInfoAMD ) == sizeof( VkShaderStatisticsInfoAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct SharedPresentSurfaceCapabilitiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSharedPresentSurfaceCapabilitiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SharedPresentSurfaceCapabilitiesKHR(VULKAN_HPP_NAMESPACE::ImageUsageFlags sharedPresentSupportedUsageFlags_ = {}) VULKAN_HPP_NOEXCEPT + : sharedPresentSupportedUsageFlags( sharedPresentSupportedUsageFlags_ ) + {} + + VULKAN_HPP_CONSTEXPR SharedPresentSurfaceCapabilitiesKHR( SharedPresentSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SharedPresentSurfaceCapabilitiesKHR( VkSharedPresentSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SharedPresentSurfaceCapabilitiesKHR & operator=( VkSharedPresentSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SharedPresentSurfaceCapabilitiesKHR & operator=( SharedPresentSurfaceCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SharedPresentSurfaceCapabilitiesKHR ) ); + return *this; + } + + + operator VkSharedPresentSurfaceCapabilitiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSharedPresentSurfaceCapabilitiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SharedPresentSurfaceCapabilitiesKHR const& ) const = default; +#else + bool operator==( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( sharedPresentSupportedUsageFlags == rhs.sharedPresentSupportedUsageFlags ); + } + + bool operator!=( SharedPresentSurfaceCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSharedPresentSurfaceCapabilitiesKHR; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::ImageUsageFlags sharedPresentSupportedUsageFlags = {}; + + }; + static_assert( sizeof( SharedPresentSurfaceCapabilitiesKHR ) == sizeof( VkSharedPresentSurfaceCapabilitiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SharedPresentSurfaceCapabilitiesKHR; + }; + +#ifdef VK_USE_PLATFORM_GGP + struct StreamDescriptorSurfaceCreateInfoGGP + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eStreamDescriptorSurfaceCreateInfoGGP; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR StreamDescriptorSurfaceCreateInfoGGP(VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateFlagsGGP flags_ = {}, GgpStreamDescriptor streamDescriptor_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), streamDescriptor( streamDescriptor_ ) + {} + + VULKAN_HPP_CONSTEXPR StreamDescriptorSurfaceCreateInfoGGP( StreamDescriptorSurfaceCreateInfoGGP const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + StreamDescriptorSurfaceCreateInfoGGP( VkStreamDescriptorSurfaceCreateInfoGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + StreamDescriptorSurfaceCreateInfoGGP & operator=( VkStreamDescriptorSurfaceCreateInfoGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + StreamDescriptorSurfaceCreateInfoGGP & operator=( StreamDescriptorSurfaceCreateInfoGGP const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( StreamDescriptorSurfaceCreateInfoGGP ) ); + return *this; + } + + StreamDescriptorSurfaceCreateInfoGGP & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + StreamDescriptorSurfaceCreateInfoGGP & setFlags( VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateFlagsGGP flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + StreamDescriptorSurfaceCreateInfoGGP & setStreamDescriptor( GgpStreamDescriptor streamDescriptor_ ) VULKAN_HPP_NOEXCEPT + { + streamDescriptor = streamDescriptor_; + return *this; + } + + + operator VkStreamDescriptorSurfaceCreateInfoGGP const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkStreamDescriptorSurfaceCreateInfoGGP &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( StreamDescriptorSurfaceCreateInfoGGP const& ) const = default; +#else + bool operator==( StreamDescriptorSurfaceCreateInfoGGP const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( memcmp( &streamDescriptor, &rhs.streamDescriptor, sizeof( GgpStreamDescriptor ) ) == 0 ); + } + + bool operator!=( StreamDescriptorSurfaceCreateInfoGGP const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eStreamDescriptorSurfaceCreateInfoGGP; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateFlagsGGP flags = {}; + GgpStreamDescriptor streamDescriptor = {}; + + }; + static_assert( sizeof( StreamDescriptorSurfaceCreateInfoGGP ) == sizeof( VkStreamDescriptorSurfaceCreateInfoGGP ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = StreamDescriptorSurfaceCreateInfoGGP; + }; +#endif /*VK_USE_PLATFORM_GGP*/ + + struct SubpassDescriptionDepthStencilResolve + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSubpassDescriptionDepthStencilResolve; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SubpassDescriptionDepthStencilResolve(VULKAN_HPP_NAMESPACE::ResolveModeFlagBits depthResolveMode_ = VULKAN_HPP_NAMESPACE::ResolveModeFlagBits::eNone, VULKAN_HPP_NAMESPACE::ResolveModeFlagBits stencilResolveMode_ = VULKAN_HPP_NAMESPACE::ResolveModeFlagBits::eNone, const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilResolveAttachment_ = {}) VULKAN_HPP_NOEXCEPT + : depthResolveMode( depthResolveMode_ ), stencilResolveMode( stencilResolveMode_ ), pDepthStencilResolveAttachment( pDepthStencilResolveAttachment_ ) + {} + + VULKAN_HPP_CONSTEXPR SubpassDescriptionDepthStencilResolve( SubpassDescriptionDepthStencilResolve const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SubpassDescriptionDepthStencilResolve( VkSubpassDescriptionDepthStencilResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SubpassDescriptionDepthStencilResolve & operator=( VkSubpassDescriptionDepthStencilResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SubpassDescriptionDepthStencilResolve & operator=( SubpassDescriptionDepthStencilResolve const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SubpassDescriptionDepthStencilResolve ) ); + return *this; + } + + SubpassDescriptionDepthStencilResolve & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SubpassDescriptionDepthStencilResolve & setDepthResolveMode( VULKAN_HPP_NAMESPACE::ResolveModeFlagBits depthResolveMode_ ) VULKAN_HPP_NOEXCEPT + { + depthResolveMode = depthResolveMode_; + return *this; + } + + SubpassDescriptionDepthStencilResolve & setStencilResolveMode( VULKAN_HPP_NAMESPACE::ResolveModeFlagBits stencilResolveMode_ ) VULKAN_HPP_NOEXCEPT + { + stencilResolveMode = stencilResolveMode_; + return *this; + } + + SubpassDescriptionDepthStencilResolve & setPDepthStencilResolveAttachment( const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilResolveAttachment_ ) VULKAN_HPP_NOEXCEPT + { + pDepthStencilResolveAttachment = pDepthStencilResolveAttachment_; + return *this; + } + + + operator VkSubpassDescriptionDepthStencilResolve const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSubpassDescriptionDepthStencilResolve &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SubpassDescriptionDepthStencilResolve const& ) const = default; +#else + bool operator==( SubpassDescriptionDepthStencilResolve const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( depthResolveMode == rhs.depthResolveMode ) + && ( stencilResolveMode == rhs.stencilResolveMode ) + && ( pDepthStencilResolveAttachment == rhs.pDepthStencilResolveAttachment ); + } + + bool operator!=( SubpassDescriptionDepthStencilResolve const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSubpassDescriptionDepthStencilResolve; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ResolveModeFlagBits depthResolveMode = VULKAN_HPP_NAMESPACE::ResolveModeFlagBits::eNone; + VULKAN_HPP_NAMESPACE::ResolveModeFlagBits stencilResolveMode = VULKAN_HPP_NAMESPACE::ResolveModeFlagBits::eNone; + const VULKAN_HPP_NAMESPACE::AttachmentReference2* pDepthStencilResolveAttachment = {}; + + }; + static_assert( sizeof( SubpassDescriptionDepthStencilResolve ) == sizeof( VkSubpassDescriptionDepthStencilResolve ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SubpassDescriptionDepthStencilResolve; + }; + using SubpassDescriptionDepthStencilResolveKHR = SubpassDescriptionDepthStencilResolve; + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct SurfaceCapabilitiesFullScreenExclusiveEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceCapabilitiesFullScreenExclusiveEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesFullScreenExclusiveEXT(VULKAN_HPP_NAMESPACE::Bool32 fullScreenExclusiveSupported_ = {}) VULKAN_HPP_NOEXCEPT + : fullScreenExclusiveSupported( fullScreenExclusiveSupported_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceCapabilitiesFullScreenExclusiveEXT( SurfaceCapabilitiesFullScreenExclusiveEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceCapabilitiesFullScreenExclusiveEXT( VkSurfaceCapabilitiesFullScreenExclusiveEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceCapabilitiesFullScreenExclusiveEXT & operator=( VkSurfaceCapabilitiesFullScreenExclusiveEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceCapabilitiesFullScreenExclusiveEXT & operator=( SurfaceCapabilitiesFullScreenExclusiveEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceCapabilitiesFullScreenExclusiveEXT ) ); + return *this; + } + + SurfaceCapabilitiesFullScreenExclusiveEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SurfaceCapabilitiesFullScreenExclusiveEXT & setFullScreenExclusiveSupported( VULKAN_HPP_NAMESPACE::Bool32 fullScreenExclusiveSupported_ ) VULKAN_HPP_NOEXCEPT + { + fullScreenExclusiveSupported = fullScreenExclusiveSupported_; + return *this; + } + + + operator VkSurfaceCapabilitiesFullScreenExclusiveEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceCapabilitiesFullScreenExclusiveEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceCapabilitiesFullScreenExclusiveEXT const& ) const = default; +#else + bool operator==( SurfaceCapabilitiesFullScreenExclusiveEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fullScreenExclusiveSupported == rhs.fullScreenExclusiveSupported ); + } + + bool operator!=( SurfaceCapabilitiesFullScreenExclusiveEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceCapabilitiesFullScreenExclusiveEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 fullScreenExclusiveSupported = {}; + + }; + static_assert( sizeof( SurfaceCapabilitiesFullScreenExclusiveEXT ) == sizeof( VkSurfaceCapabilitiesFullScreenExclusiveEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceCapabilitiesFullScreenExclusiveEXT; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct SurfaceFullScreenExclusiveInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceFullScreenExclusiveInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveInfoEXT(VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT fullScreenExclusive_ = VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT::eDefault) VULKAN_HPP_NOEXCEPT + : fullScreenExclusive( fullScreenExclusive_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveInfoEXT( SurfaceFullScreenExclusiveInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceFullScreenExclusiveInfoEXT( VkSurfaceFullScreenExclusiveInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceFullScreenExclusiveInfoEXT & operator=( VkSurfaceFullScreenExclusiveInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceFullScreenExclusiveInfoEXT & operator=( SurfaceFullScreenExclusiveInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceFullScreenExclusiveInfoEXT ) ); + return *this; + } + + SurfaceFullScreenExclusiveInfoEXT & setPNext( void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SurfaceFullScreenExclusiveInfoEXT & setFullScreenExclusive( VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT fullScreenExclusive_ ) VULKAN_HPP_NOEXCEPT + { + fullScreenExclusive = fullScreenExclusive_; + return *this; + } + + + operator VkSurfaceFullScreenExclusiveInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceFullScreenExclusiveInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceFullScreenExclusiveInfoEXT const& ) const = default; +#else + bool operator==( SurfaceFullScreenExclusiveInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( fullScreenExclusive == rhs.fullScreenExclusive ); + } + + bool operator!=( SurfaceFullScreenExclusiveInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceFullScreenExclusiveInfoEXT; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT fullScreenExclusive = VULKAN_HPP_NAMESPACE::FullScreenExclusiveEXT::eDefault; + + }; + static_assert( sizeof( SurfaceFullScreenExclusiveInfoEXT ) == sizeof( VkSurfaceFullScreenExclusiveInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceFullScreenExclusiveInfoEXT; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct SurfaceFullScreenExclusiveWin32InfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceFullScreenExclusiveWin32InfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveWin32InfoEXT(HMONITOR hmonitor_ = {}) VULKAN_HPP_NOEXCEPT + : hmonitor( hmonitor_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceFullScreenExclusiveWin32InfoEXT( SurfaceFullScreenExclusiveWin32InfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceFullScreenExclusiveWin32InfoEXT( VkSurfaceFullScreenExclusiveWin32InfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceFullScreenExclusiveWin32InfoEXT & operator=( VkSurfaceFullScreenExclusiveWin32InfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceFullScreenExclusiveWin32InfoEXT & operator=( SurfaceFullScreenExclusiveWin32InfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceFullScreenExclusiveWin32InfoEXT ) ); + return *this; + } + + SurfaceFullScreenExclusiveWin32InfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SurfaceFullScreenExclusiveWin32InfoEXT & setHmonitor( HMONITOR hmonitor_ ) VULKAN_HPP_NOEXCEPT + { + hmonitor = hmonitor_; + return *this; + } + + + operator VkSurfaceFullScreenExclusiveWin32InfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceFullScreenExclusiveWin32InfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceFullScreenExclusiveWin32InfoEXT const& ) const = default; +#else + bool operator==( SurfaceFullScreenExclusiveWin32InfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( hmonitor == rhs.hmonitor ); + } + + bool operator!=( SurfaceFullScreenExclusiveWin32InfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceFullScreenExclusiveWin32InfoEXT; + const void* pNext = {}; + HMONITOR hmonitor = {}; + + }; + static_assert( sizeof( SurfaceFullScreenExclusiveWin32InfoEXT ) == sizeof( VkSurfaceFullScreenExclusiveWin32InfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceFullScreenExclusiveWin32InfoEXT; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct SurfaceProtectedCapabilitiesKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSurfaceProtectedCapabilitiesKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SurfaceProtectedCapabilitiesKHR(VULKAN_HPP_NAMESPACE::Bool32 supportsProtected_ = {}) VULKAN_HPP_NOEXCEPT + : supportsProtected( supportsProtected_ ) + {} + + VULKAN_HPP_CONSTEXPR SurfaceProtectedCapabilitiesKHR( SurfaceProtectedCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SurfaceProtectedCapabilitiesKHR( VkSurfaceProtectedCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SurfaceProtectedCapabilitiesKHR & operator=( VkSurfaceProtectedCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SurfaceProtectedCapabilitiesKHR & operator=( SurfaceProtectedCapabilitiesKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SurfaceProtectedCapabilitiesKHR ) ); + return *this; + } + + SurfaceProtectedCapabilitiesKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SurfaceProtectedCapabilitiesKHR & setSupportsProtected( VULKAN_HPP_NAMESPACE::Bool32 supportsProtected_ ) VULKAN_HPP_NOEXCEPT + { + supportsProtected = supportsProtected_; + return *this; + } + + + operator VkSurfaceProtectedCapabilitiesKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSurfaceProtectedCapabilitiesKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SurfaceProtectedCapabilitiesKHR const& ) const = default; +#else + bool operator==( SurfaceProtectedCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supportsProtected == rhs.supportsProtected ); + } + + bool operator!=( SurfaceProtectedCapabilitiesKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSurfaceProtectedCapabilitiesKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 supportsProtected = {}; + + }; + static_assert( sizeof( SurfaceProtectedCapabilitiesKHR ) == sizeof( VkSurfaceProtectedCapabilitiesKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SurfaceProtectedCapabilitiesKHR; + }; + + struct SwapchainCounterCreateInfoEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSwapchainCounterCreateInfoEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SwapchainCounterCreateInfoEXT(VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT surfaceCounters_ = {}) VULKAN_HPP_NOEXCEPT + : surfaceCounters( surfaceCounters_ ) + {} + + VULKAN_HPP_CONSTEXPR SwapchainCounterCreateInfoEXT( SwapchainCounterCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SwapchainCounterCreateInfoEXT( VkSwapchainCounterCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SwapchainCounterCreateInfoEXT & operator=( VkSwapchainCounterCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SwapchainCounterCreateInfoEXT & operator=( SwapchainCounterCreateInfoEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SwapchainCounterCreateInfoEXT ) ); + return *this; + } + + SwapchainCounterCreateInfoEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SwapchainCounterCreateInfoEXT & setSurfaceCounters( VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT surfaceCounters_ ) VULKAN_HPP_NOEXCEPT + { + surfaceCounters = surfaceCounters_; + return *this; + } + + + operator VkSwapchainCounterCreateInfoEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSwapchainCounterCreateInfoEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SwapchainCounterCreateInfoEXT const& ) const = default; +#else + bool operator==( SwapchainCounterCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( surfaceCounters == rhs.surfaceCounters ); + } + + bool operator!=( SwapchainCounterCreateInfoEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSwapchainCounterCreateInfoEXT; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::SurfaceCounterFlagsEXT surfaceCounters = {}; + + }; + static_assert( sizeof( SwapchainCounterCreateInfoEXT ) == sizeof( VkSwapchainCounterCreateInfoEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SwapchainCounterCreateInfoEXT; + }; + + struct SwapchainDisplayNativeHdrCreateInfoAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eSwapchainDisplayNativeHdrCreateInfoAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR SwapchainDisplayNativeHdrCreateInfoAMD(VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable_ = {}) VULKAN_HPP_NOEXCEPT + : localDimmingEnable( localDimmingEnable_ ) + {} + + VULKAN_HPP_CONSTEXPR SwapchainDisplayNativeHdrCreateInfoAMD( SwapchainDisplayNativeHdrCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + SwapchainDisplayNativeHdrCreateInfoAMD( VkSwapchainDisplayNativeHdrCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + SwapchainDisplayNativeHdrCreateInfoAMD & operator=( VkSwapchainDisplayNativeHdrCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + SwapchainDisplayNativeHdrCreateInfoAMD & operator=( SwapchainDisplayNativeHdrCreateInfoAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( SwapchainDisplayNativeHdrCreateInfoAMD ) ); + return *this; + } + + SwapchainDisplayNativeHdrCreateInfoAMD & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + SwapchainDisplayNativeHdrCreateInfoAMD & setLocalDimmingEnable( VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable_ ) VULKAN_HPP_NOEXCEPT + { + localDimmingEnable = localDimmingEnable_; + return *this; + } + + + operator VkSwapchainDisplayNativeHdrCreateInfoAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkSwapchainDisplayNativeHdrCreateInfoAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( SwapchainDisplayNativeHdrCreateInfoAMD const& ) const = default; +#else + bool operator==( SwapchainDisplayNativeHdrCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( localDimmingEnable == rhs.localDimmingEnable ); + } + + bool operator!=( SwapchainDisplayNativeHdrCreateInfoAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eSwapchainDisplayNativeHdrCreateInfoAMD; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable = {}; + + }; + static_assert( sizeof( SwapchainDisplayNativeHdrCreateInfoAMD ) == sizeof( VkSwapchainDisplayNativeHdrCreateInfoAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = SwapchainDisplayNativeHdrCreateInfoAMD; + }; + + struct TextureLODGatherFormatPropertiesAMD + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eTextureLodGatherFormatPropertiesAMD; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR TextureLODGatherFormatPropertiesAMD(VULKAN_HPP_NAMESPACE::Bool32 supportsTextureGatherLODBiasAMD_ = {}) VULKAN_HPP_NOEXCEPT + : supportsTextureGatherLODBiasAMD( supportsTextureGatherLODBiasAMD_ ) + {} + + VULKAN_HPP_CONSTEXPR TextureLODGatherFormatPropertiesAMD( TextureLODGatherFormatPropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + TextureLODGatherFormatPropertiesAMD( VkTextureLODGatherFormatPropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + TextureLODGatherFormatPropertiesAMD & operator=( VkTextureLODGatherFormatPropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + TextureLODGatherFormatPropertiesAMD & operator=( TextureLODGatherFormatPropertiesAMD const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( TextureLODGatherFormatPropertiesAMD ) ); + return *this; + } + + + operator VkTextureLODGatherFormatPropertiesAMD const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkTextureLODGatherFormatPropertiesAMD &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( TextureLODGatherFormatPropertiesAMD const& ) const = default; +#else + bool operator==( TextureLODGatherFormatPropertiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( supportsTextureGatherLODBiasAMD == rhs.supportsTextureGatherLODBiasAMD ); + } + + bool operator!=( TextureLODGatherFormatPropertiesAMD const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eTextureLodGatherFormatPropertiesAMD; + void* pNext = {}; + VULKAN_HPP_NAMESPACE::Bool32 supportsTextureGatherLODBiasAMD = {}; + + }; + static_assert( sizeof( TextureLODGatherFormatPropertiesAMD ) == sizeof( VkTextureLODGatherFormatPropertiesAMD ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = TextureLODGatherFormatPropertiesAMD; + }; + + struct TimelineSemaphoreSubmitInfo + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eTimelineSemaphoreSubmitInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR TimelineSemaphoreSubmitInfo(uint32_t waitSemaphoreValueCount_ = {}, const uint64_t* pWaitSemaphoreValues_ = {}, uint32_t signalSemaphoreValueCount_ = {}, const uint64_t* pSignalSemaphoreValues_ = {}) VULKAN_HPP_NOEXCEPT + : waitSemaphoreValueCount( waitSemaphoreValueCount_ ), pWaitSemaphoreValues( pWaitSemaphoreValues_ ), signalSemaphoreValueCount( signalSemaphoreValueCount_ ), pSignalSemaphoreValues( pSignalSemaphoreValues_ ) + {} + + VULKAN_HPP_CONSTEXPR TimelineSemaphoreSubmitInfo( TimelineSemaphoreSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + TimelineSemaphoreSubmitInfo( VkTimelineSemaphoreSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + TimelineSemaphoreSubmitInfo( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreValues_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreValues_ = {} ) + : waitSemaphoreValueCount( static_cast( waitSemaphoreValues_.size() ) ), pWaitSemaphoreValues( waitSemaphoreValues_.data() ), signalSemaphoreValueCount( static_cast( signalSemaphoreValues_.size() ) ), pSignalSemaphoreValues( signalSemaphoreValues_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + TimelineSemaphoreSubmitInfo & operator=( VkTimelineSemaphoreSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + TimelineSemaphoreSubmitInfo & operator=( TimelineSemaphoreSubmitInfo const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( TimelineSemaphoreSubmitInfo ) ); + return *this; + } + + TimelineSemaphoreSubmitInfo & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + TimelineSemaphoreSubmitInfo & setWaitSemaphoreValueCount( uint32_t waitSemaphoreValueCount_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreValueCount = waitSemaphoreValueCount_; + return *this; + } + + TimelineSemaphoreSubmitInfo & setPWaitSemaphoreValues( const uint64_t* pWaitSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + pWaitSemaphoreValues = pWaitSemaphoreValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + TimelineSemaphoreSubmitInfo & setWaitSemaphoreValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & waitSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + waitSemaphoreValueCount = static_cast( waitSemaphoreValues_.size() ); + pWaitSemaphoreValues = waitSemaphoreValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + TimelineSemaphoreSubmitInfo & setSignalSemaphoreValueCount( uint32_t signalSemaphoreValueCount_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreValueCount = signalSemaphoreValueCount_; + return *this; + } + + TimelineSemaphoreSubmitInfo & setPSignalSemaphoreValues( const uint64_t* pSignalSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + pSignalSemaphoreValues = pSignalSemaphoreValues_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + TimelineSemaphoreSubmitInfo & setSignalSemaphoreValues( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & signalSemaphoreValues_ ) VULKAN_HPP_NOEXCEPT + { + signalSemaphoreValueCount = static_cast( signalSemaphoreValues_.size() ); + pSignalSemaphoreValues = signalSemaphoreValues_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkTimelineSemaphoreSubmitInfo const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkTimelineSemaphoreSubmitInfo &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( TimelineSemaphoreSubmitInfo const& ) const = default; +#else + bool operator==( TimelineSemaphoreSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( waitSemaphoreValueCount == rhs.waitSemaphoreValueCount ) + && ( pWaitSemaphoreValues == rhs.pWaitSemaphoreValues ) + && ( signalSemaphoreValueCount == rhs.signalSemaphoreValueCount ) + && ( pSignalSemaphoreValues == rhs.pSignalSemaphoreValues ); + } + + bool operator!=( TimelineSemaphoreSubmitInfo const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eTimelineSemaphoreSubmitInfo; + const void* pNext = {}; + uint32_t waitSemaphoreValueCount = {}; + const uint64_t* pWaitSemaphoreValues = {}; + uint32_t signalSemaphoreValueCount = {}; + const uint64_t* pSignalSemaphoreValues = {}; + + }; + static_assert( sizeof( TimelineSemaphoreSubmitInfo ) == sizeof( VkTimelineSemaphoreSubmitInfo ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = TimelineSemaphoreSubmitInfo; + }; + using TimelineSemaphoreSubmitInfoKHR = TimelineSemaphoreSubmitInfo; + + struct TraceRaysIndirectCommandKHR + { + + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR TraceRaysIndirectCommandKHR(uint32_t width_ = {}, uint32_t height_ = {}, uint32_t depth_ = {}) VULKAN_HPP_NOEXCEPT + : width( width_ ), height( height_ ), depth( depth_ ) + {} + + VULKAN_HPP_CONSTEXPR TraceRaysIndirectCommandKHR( TraceRaysIndirectCommandKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + TraceRaysIndirectCommandKHR( VkTraceRaysIndirectCommandKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + + explicit TraceRaysIndirectCommandKHR( Extent2D const& extent2D, uint32_t depth_ = {} ) + : width( extent2D.width ) + , height( extent2D.height ) + , depth( depth_ ) + {} +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + TraceRaysIndirectCommandKHR & operator=( VkTraceRaysIndirectCommandKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + TraceRaysIndirectCommandKHR & operator=( TraceRaysIndirectCommandKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( TraceRaysIndirectCommandKHR ) ); + return *this; + } + + TraceRaysIndirectCommandKHR & setWidth( uint32_t width_ ) VULKAN_HPP_NOEXCEPT + { + width = width_; + return *this; + } + + TraceRaysIndirectCommandKHR & setHeight( uint32_t height_ ) VULKAN_HPP_NOEXCEPT + { + height = height_; + return *this; + } + + TraceRaysIndirectCommandKHR & setDepth( uint32_t depth_ ) VULKAN_HPP_NOEXCEPT + { + depth = depth_; + return *this; + } + + + operator VkTraceRaysIndirectCommandKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkTraceRaysIndirectCommandKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( TraceRaysIndirectCommandKHR const& ) const = default; +#else + bool operator==( TraceRaysIndirectCommandKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( width == rhs.width ) + && ( height == rhs.height ) + && ( depth == rhs.depth ); + } + + bool operator!=( TraceRaysIndirectCommandKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + uint32_t width = {}; + uint32_t height = {}; + uint32_t depth = {}; + + }; + static_assert( sizeof( TraceRaysIndirectCommandKHR ) == sizeof( VkTraceRaysIndirectCommandKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + struct ValidationFeaturesEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eValidationFeaturesEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ValidationFeaturesEXT(uint32_t enabledValidationFeatureCount_ = {}, const VULKAN_HPP_NAMESPACE::ValidationFeatureEnableEXT* pEnabledValidationFeatures_ = {}, uint32_t disabledValidationFeatureCount_ = {}, const VULKAN_HPP_NAMESPACE::ValidationFeatureDisableEXT* pDisabledValidationFeatures_ = {}) VULKAN_HPP_NOEXCEPT + : enabledValidationFeatureCount( enabledValidationFeatureCount_ ), pEnabledValidationFeatures( pEnabledValidationFeatures_ ), disabledValidationFeatureCount( disabledValidationFeatureCount_ ), pDisabledValidationFeatures( pDisabledValidationFeatures_ ) + {} + + VULKAN_HPP_CONSTEXPR ValidationFeaturesEXT( ValidationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ValidationFeaturesEXT( VkValidationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ValidationFeaturesEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & enabledValidationFeatures_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & disabledValidationFeatures_ = {} ) + : enabledValidationFeatureCount( static_cast( enabledValidationFeatures_.size() ) ), pEnabledValidationFeatures( enabledValidationFeatures_.data() ), disabledValidationFeatureCount( static_cast( disabledValidationFeatures_.size() ) ), pDisabledValidationFeatures( disabledValidationFeatures_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ValidationFeaturesEXT & operator=( VkValidationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ValidationFeaturesEXT & operator=( ValidationFeaturesEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ValidationFeaturesEXT ) ); + return *this; + } + + ValidationFeaturesEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ValidationFeaturesEXT & setEnabledValidationFeatureCount( uint32_t enabledValidationFeatureCount_ ) VULKAN_HPP_NOEXCEPT + { + enabledValidationFeatureCount = enabledValidationFeatureCount_; + return *this; + } + + ValidationFeaturesEXT & setPEnabledValidationFeatures( const VULKAN_HPP_NAMESPACE::ValidationFeatureEnableEXT* pEnabledValidationFeatures_ ) VULKAN_HPP_NOEXCEPT + { + pEnabledValidationFeatures = pEnabledValidationFeatures_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ValidationFeaturesEXT & setEnabledValidationFeatures( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & enabledValidationFeatures_ ) VULKAN_HPP_NOEXCEPT + { + enabledValidationFeatureCount = static_cast( enabledValidationFeatures_.size() ); + pEnabledValidationFeatures = enabledValidationFeatures_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + ValidationFeaturesEXT & setDisabledValidationFeatureCount( uint32_t disabledValidationFeatureCount_ ) VULKAN_HPP_NOEXCEPT + { + disabledValidationFeatureCount = disabledValidationFeatureCount_; + return *this; + } + + ValidationFeaturesEXT & setPDisabledValidationFeatures( const VULKAN_HPP_NAMESPACE::ValidationFeatureDisableEXT* pDisabledValidationFeatures_ ) VULKAN_HPP_NOEXCEPT + { + pDisabledValidationFeatures = pDisabledValidationFeatures_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ValidationFeaturesEXT & setDisabledValidationFeatures( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & disabledValidationFeatures_ ) VULKAN_HPP_NOEXCEPT + { + disabledValidationFeatureCount = static_cast( disabledValidationFeatures_.size() ); + pDisabledValidationFeatures = disabledValidationFeatures_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkValidationFeaturesEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkValidationFeaturesEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ValidationFeaturesEXT const& ) const = default; +#else + bool operator==( ValidationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( enabledValidationFeatureCount == rhs.enabledValidationFeatureCount ) + && ( pEnabledValidationFeatures == rhs.pEnabledValidationFeatures ) + && ( disabledValidationFeatureCount == rhs.disabledValidationFeatureCount ) + && ( pDisabledValidationFeatures == rhs.pDisabledValidationFeatures ); + } + + bool operator!=( ValidationFeaturesEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eValidationFeaturesEXT; + const void* pNext = {}; + uint32_t enabledValidationFeatureCount = {}; + const VULKAN_HPP_NAMESPACE::ValidationFeatureEnableEXT* pEnabledValidationFeatures = {}; + uint32_t disabledValidationFeatureCount = {}; + const VULKAN_HPP_NAMESPACE::ValidationFeatureDisableEXT* pDisabledValidationFeatures = {}; + + }; + static_assert( sizeof( ValidationFeaturesEXT ) == sizeof( VkValidationFeaturesEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ValidationFeaturesEXT; + }; + + struct ValidationFlagsEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eValidationFlagsEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ValidationFlagsEXT(uint32_t disabledValidationCheckCount_ = {}, const VULKAN_HPP_NAMESPACE::ValidationCheckEXT* pDisabledValidationChecks_ = {}) VULKAN_HPP_NOEXCEPT + : disabledValidationCheckCount( disabledValidationCheckCount_ ), pDisabledValidationChecks( pDisabledValidationChecks_ ) + {} + + VULKAN_HPP_CONSTEXPR ValidationFlagsEXT( ValidationFlagsEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ValidationFlagsEXT( VkValidationFlagsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ValidationFlagsEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & disabledValidationChecks_ ) + : disabledValidationCheckCount( static_cast( disabledValidationChecks_.size() ) ), pDisabledValidationChecks( disabledValidationChecks_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ValidationFlagsEXT & operator=( VkValidationFlagsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ValidationFlagsEXT & operator=( ValidationFlagsEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ValidationFlagsEXT ) ); + return *this; + } + + ValidationFlagsEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ValidationFlagsEXT & setDisabledValidationCheckCount( uint32_t disabledValidationCheckCount_ ) VULKAN_HPP_NOEXCEPT + { + disabledValidationCheckCount = disabledValidationCheckCount_; + return *this; + } + + ValidationFlagsEXT & setPDisabledValidationChecks( const VULKAN_HPP_NAMESPACE::ValidationCheckEXT* pDisabledValidationChecks_ ) VULKAN_HPP_NOEXCEPT + { + pDisabledValidationChecks = pDisabledValidationChecks_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + ValidationFlagsEXT & setDisabledValidationChecks( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & disabledValidationChecks_ ) VULKAN_HPP_NOEXCEPT + { + disabledValidationCheckCount = static_cast( disabledValidationChecks_.size() ); + pDisabledValidationChecks = disabledValidationChecks_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkValidationFlagsEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkValidationFlagsEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ValidationFlagsEXT const& ) const = default; +#else + bool operator==( ValidationFlagsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( disabledValidationCheckCount == rhs.disabledValidationCheckCount ) + && ( pDisabledValidationChecks == rhs.pDisabledValidationChecks ); + } + + bool operator!=( ValidationFlagsEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eValidationFlagsEXT; + const void* pNext = {}; + uint32_t disabledValidationCheckCount = {}; + const VULKAN_HPP_NAMESPACE::ValidationCheckEXT* pDisabledValidationChecks = {}; + + }; + static_assert( sizeof( ValidationFlagsEXT ) == sizeof( VkValidationFlagsEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ValidationFlagsEXT; + }; + +#ifdef VK_USE_PLATFORM_VI_NN + struct ViSurfaceCreateInfoNN + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eViSurfaceCreateInfoNN; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR ViSurfaceCreateInfoNN(VULKAN_HPP_NAMESPACE::ViSurfaceCreateFlagsNN flags_ = {}, void* window_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), window( window_ ) + {} + + VULKAN_HPP_CONSTEXPR ViSurfaceCreateInfoNN( ViSurfaceCreateInfoNN const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + ViSurfaceCreateInfoNN( VkViSurfaceCreateInfoNN const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + ViSurfaceCreateInfoNN & operator=( VkViSurfaceCreateInfoNN const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + ViSurfaceCreateInfoNN & operator=( ViSurfaceCreateInfoNN const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( ViSurfaceCreateInfoNN ) ); + return *this; + } + + ViSurfaceCreateInfoNN & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + ViSurfaceCreateInfoNN & setFlags( VULKAN_HPP_NAMESPACE::ViSurfaceCreateFlagsNN flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + ViSurfaceCreateInfoNN & setWindow( void* window_ ) VULKAN_HPP_NOEXCEPT + { + window = window_; + return *this; + } + + + operator VkViSurfaceCreateInfoNN const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkViSurfaceCreateInfoNN &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( ViSurfaceCreateInfoNN const& ) const = default; +#else + bool operator==( ViSurfaceCreateInfoNN const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( window == rhs.window ); + } + + bool operator!=( ViSurfaceCreateInfoNN const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eViSurfaceCreateInfoNN; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::ViSurfaceCreateFlagsNN flags = {}; + void* window = {}; + + }; + static_assert( sizeof( ViSurfaceCreateInfoNN ) == sizeof( VkViSurfaceCreateInfoNN ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = ViSurfaceCreateInfoNN; + }; +#endif /*VK_USE_PLATFORM_VI_NN*/ + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + struct WaylandSurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWaylandSurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WaylandSurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateFlagsKHR flags_ = {}, struct wl_display* display_ = {}, struct wl_surface* surface_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), display( display_ ), surface( surface_ ) + {} + + VULKAN_HPP_CONSTEXPR WaylandSurfaceCreateInfoKHR( WaylandSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WaylandSurfaceCreateInfoKHR( VkWaylandSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WaylandSurfaceCreateInfoKHR & operator=( VkWaylandSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + WaylandSurfaceCreateInfoKHR & operator=( WaylandSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( WaylandSurfaceCreateInfoKHR ) ); + return *this; + } + + WaylandSurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WaylandSurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + WaylandSurfaceCreateInfoKHR & setDisplay( struct wl_display* display_ ) VULKAN_HPP_NOEXCEPT + { + display = display_; + return *this; + } + + WaylandSurfaceCreateInfoKHR & setSurface( struct wl_surface* surface_ ) VULKAN_HPP_NOEXCEPT + { + surface = surface_; + return *this; + } + + + operator VkWaylandSurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWaylandSurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WaylandSurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( WaylandSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( display == rhs.display ) + && ( surface == rhs.surface ); + } + + bool operator!=( WaylandSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWaylandSurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateFlagsKHR flags = {}; + struct wl_display* display = {}; + struct wl_surface* surface = {}; + + }; + static_assert( sizeof( WaylandSurfaceCreateInfoKHR ) == sizeof( VkWaylandSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = WaylandSurfaceCreateInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct Win32KeyedMutexAcquireReleaseInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWin32KeyedMutexAcquireReleaseInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Win32KeyedMutexAcquireReleaseInfoKHR(uint32_t acquireCount_ = {}, const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs_ = {}, const uint64_t* pAcquireKeys_ = {}, const uint32_t* pAcquireTimeouts_ = {}, uint32_t releaseCount_ = {}, const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs_ = {}, const uint64_t* pReleaseKeys_ = {}) VULKAN_HPP_NOEXCEPT + : acquireCount( acquireCount_ ), pAcquireSyncs( pAcquireSyncs_ ), pAcquireKeys( pAcquireKeys_ ), pAcquireTimeouts( pAcquireTimeouts_ ), releaseCount( releaseCount_ ), pReleaseSyncs( pReleaseSyncs_ ), pReleaseKeys( pReleaseKeys_ ) + {} + + VULKAN_HPP_CONSTEXPR Win32KeyedMutexAcquireReleaseInfoKHR( Win32KeyedMutexAcquireReleaseInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Win32KeyedMutexAcquireReleaseInfoKHR( VkWin32KeyedMutexAcquireReleaseInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireSyncs_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireKeys_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireTimeouts_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseSyncs_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseKeys_ = {} ) + : acquireCount( static_cast( acquireSyncs_.size() ) ), pAcquireSyncs( acquireSyncs_.data() ), pAcquireKeys( acquireKeys_.data() ), pAcquireTimeouts( acquireTimeouts_.data() ), releaseCount( static_cast( releaseSyncs_.size() ) ), pReleaseSyncs( releaseSyncs_.data() ), pReleaseKeys( releaseKeys_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( acquireSyncs_.size() == acquireKeys_.size() ); + VULKAN_HPP_ASSERT( acquireSyncs_.size() == acquireTimeouts_.size() ); + VULKAN_HPP_ASSERT( acquireKeys_.size() == acquireTimeouts_.size() ); +#else + if ( acquireSyncs_.size() != acquireKeys_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoKHR::Win32KeyedMutexAcquireReleaseInfoKHR: acquireSyncs_.size() != acquireKeys_.size()" ); + } + if ( acquireSyncs_.size() != acquireTimeouts_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoKHR::Win32KeyedMutexAcquireReleaseInfoKHR: acquireSyncs_.size() != acquireTimeouts_.size()" ); + } + if ( acquireKeys_.size() != acquireTimeouts_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoKHR::Win32KeyedMutexAcquireReleaseInfoKHR: acquireKeys_.size() != acquireTimeouts_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( releaseSyncs_.size() == releaseKeys_.size() ); +#else + if ( releaseSyncs_.size() != releaseKeys_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoKHR::Win32KeyedMutexAcquireReleaseInfoKHR: releaseSyncs_.size() != releaseKeys_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Win32KeyedMutexAcquireReleaseInfoKHR & operator=( VkWin32KeyedMutexAcquireReleaseInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR & operator=( Win32KeyedMutexAcquireReleaseInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoKHR ) ); + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR & setAcquireCount( uint32_t acquireCount_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = acquireCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR & setPAcquireSyncs( const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireSyncs = pAcquireSyncs_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR & setAcquireSyncs( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireSyncs_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireSyncs_.size() ); + pAcquireSyncs = acquireSyncs_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoKHR & setPAcquireKeys( const uint64_t* pAcquireKeys_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireKeys = pAcquireKeys_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR & setAcquireKeys( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireKeys_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireKeys_.size() ); + pAcquireKeys = acquireKeys_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoKHR & setPAcquireTimeouts( const uint32_t* pAcquireTimeouts_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireTimeouts = pAcquireTimeouts_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR & setAcquireTimeouts( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireTimeouts_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireTimeouts_.size() ); + pAcquireTimeouts = acquireTimeouts_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoKHR & setReleaseCount( uint32_t releaseCount_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = releaseCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoKHR & setPReleaseSyncs( const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs_ ) VULKAN_HPP_NOEXCEPT + { + pReleaseSyncs = pReleaseSyncs_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR & setReleaseSyncs( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseSyncs_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = static_cast( releaseSyncs_.size() ); + pReleaseSyncs = releaseSyncs_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoKHR & setPReleaseKeys( const uint64_t* pReleaseKeys_ ) VULKAN_HPP_NOEXCEPT + { + pReleaseKeys = pReleaseKeys_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoKHR & setReleaseKeys( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseKeys_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = static_cast( releaseKeys_.size() ); + pReleaseKeys = releaseKeys_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWin32KeyedMutexAcquireReleaseInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWin32KeyedMutexAcquireReleaseInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Win32KeyedMutexAcquireReleaseInfoKHR const& ) const = default; +#else + bool operator==( Win32KeyedMutexAcquireReleaseInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( acquireCount == rhs.acquireCount ) + && ( pAcquireSyncs == rhs.pAcquireSyncs ) + && ( pAcquireKeys == rhs.pAcquireKeys ) + && ( pAcquireTimeouts == rhs.pAcquireTimeouts ) + && ( releaseCount == rhs.releaseCount ) + && ( pReleaseSyncs == rhs.pReleaseSyncs ) + && ( pReleaseKeys == rhs.pReleaseKeys ); + } + + bool operator!=( Win32KeyedMutexAcquireReleaseInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWin32KeyedMutexAcquireReleaseInfoKHR; + const void* pNext = {}; + uint32_t acquireCount = {}; + const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs = {}; + const uint64_t* pAcquireKeys = {}; + const uint32_t* pAcquireTimeouts = {}; + uint32_t releaseCount = {}; + const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs = {}; + const uint64_t* pReleaseKeys = {}; + + }; + static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoKHR ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = Win32KeyedMutexAcquireReleaseInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct Win32KeyedMutexAcquireReleaseInfoNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWin32KeyedMutexAcquireReleaseInfoNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Win32KeyedMutexAcquireReleaseInfoNV(uint32_t acquireCount_ = {}, const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs_ = {}, const uint64_t* pAcquireKeys_ = {}, const uint32_t* pAcquireTimeoutMilliseconds_ = {}, uint32_t releaseCount_ = {}, const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs_ = {}, const uint64_t* pReleaseKeys_ = {}) VULKAN_HPP_NOEXCEPT + : acquireCount( acquireCount_ ), pAcquireSyncs( pAcquireSyncs_ ), pAcquireKeys( pAcquireKeys_ ), pAcquireTimeoutMilliseconds( pAcquireTimeoutMilliseconds_ ), releaseCount( releaseCount_ ), pReleaseSyncs( pReleaseSyncs_ ), pReleaseKeys( pReleaseKeys_ ) + {} + + VULKAN_HPP_CONSTEXPR Win32KeyedMutexAcquireReleaseInfoNV( Win32KeyedMutexAcquireReleaseInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Win32KeyedMutexAcquireReleaseInfoNV( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireSyncs_, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireKeys_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireTimeoutMilliseconds_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseSyncs_ = {}, VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseKeys_ = {} ) + : acquireCount( static_cast( acquireSyncs_.size() ) ), pAcquireSyncs( acquireSyncs_.data() ), pAcquireKeys( acquireKeys_.data() ), pAcquireTimeoutMilliseconds( acquireTimeoutMilliseconds_.data() ), releaseCount( static_cast( releaseSyncs_.size() ) ), pReleaseSyncs( releaseSyncs_.data() ), pReleaseKeys( releaseKeys_.data() ) + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( acquireSyncs_.size() == acquireKeys_.size() ); + VULKAN_HPP_ASSERT( acquireSyncs_.size() == acquireTimeoutMilliseconds_.size() ); + VULKAN_HPP_ASSERT( acquireKeys_.size() == acquireTimeoutMilliseconds_.size() ); +#else + if ( acquireSyncs_.size() != acquireKeys_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoNV::Win32KeyedMutexAcquireReleaseInfoNV: acquireSyncs_.size() != acquireKeys_.size()" ); + } + if ( acquireSyncs_.size() != acquireTimeoutMilliseconds_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoNV::Win32KeyedMutexAcquireReleaseInfoNV: acquireSyncs_.size() != acquireTimeoutMilliseconds_.size()" ); + } + if ( acquireKeys_.size() != acquireTimeoutMilliseconds_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoNV::Win32KeyedMutexAcquireReleaseInfoNV: acquireKeys_.size() != acquireTimeoutMilliseconds_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( releaseSyncs_.size() == releaseKeys_.size() ); +#else + if ( releaseSyncs_.size() != releaseKeys_.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Win32KeyedMutexAcquireReleaseInfoNV::Win32KeyedMutexAcquireReleaseInfoNV: releaseSyncs_.size() != releaseKeys_.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Win32KeyedMutexAcquireReleaseInfoNV & operator=( VkWin32KeyedMutexAcquireReleaseInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV & operator=( Win32KeyedMutexAcquireReleaseInfoNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) ); + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV & setAcquireCount( uint32_t acquireCount_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = acquireCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV & setPAcquireSyncs( const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireSyncs = pAcquireSyncs_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV & setAcquireSyncs( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireSyncs_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireSyncs_.size() ); + pAcquireSyncs = acquireSyncs_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoNV & setPAcquireKeys( const uint64_t* pAcquireKeys_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireKeys = pAcquireKeys_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV & setAcquireKeys( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireKeys_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireKeys_.size() ); + pAcquireKeys = acquireKeys_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoNV & setPAcquireTimeoutMilliseconds( const uint32_t* pAcquireTimeoutMilliseconds_ ) VULKAN_HPP_NOEXCEPT + { + pAcquireTimeoutMilliseconds = pAcquireTimeoutMilliseconds_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV & setAcquireTimeoutMilliseconds( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & acquireTimeoutMilliseconds_ ) VULKAN_HPP_NOEXCEPT + { + acquireCount = static_cast( acquireTimeoutMilliseconds_.size() ); + pAcquireTimeoutMilliseconds = acquireTimeoutMilliseconds_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoNV & setReleaseCount( uint32_t releaseCount_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = releaseCount_; + return *this; + } + + Win32KeyedMutexAcquireReleaseInfoNV & setPReleaseSyncs( const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs_ ) VULKAN_HPP_NOEXCEPT + { + pReleaseSyncs = pReleaseSyncs_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV & setReleaseSyncs( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseSyncs_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = static_cast( releaseSyncs_.size() ); + pReleaseSyncs = releaseSyncs_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + Win32KeyedMutexAcquireReleaseInfoNV & setPReleaseKeys( const uint64_t* pReleaseKeys_ ) VULKAN_HPP_NOEXCEPT + { + pReleaseKeys = pReleaseKeys_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + Win32KeyedMutexAcquireReleaseInfoNV & setReleaseKeys( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & releaseKeys_ ) VULKAN_HPP_NOEXCEPT + { + releaseCount = static_cast( releaseKeys_.size() ); + pReleaseKeys = releaseKeys_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWin32KeyedMutexAcquireReleaseInfoNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWin32KeyedMutexAcquireReleaseInfoNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Win32KeyedMutexAcquireReleaseInfoNV const& ) const = default; +#else + bool operator==( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( acquireCount == rhs.acquireCount ) + && ( pAcquireSyncs == rhs.pAcquireSyncs ) + && ( pAcquireKeys == rhs.pAcquireKeys ) + && ( pAcquireTimeoutMilliseconds == rhs.pAcquireTimeoutMilliseconds ) + && ( releaseCount == rhs.releaseCount ) + && ( pReleaseSyncs == rhs.pReleaseSyncs ) + && ( pReleaseKeys == rhs.pReleaseKeys ); + } + + bool operator!=( Win32KeyedMutexAcquireReleaseInfoNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWin32KeyedMutexAcquireReleaseInfoNV; + const void* pNext = {}; + uint32_t acquireCount = {}; + const VULKAN_HPP_NAMESPACE::DeviceMemory* pAcquireSyncs = {}; + const uint64_t* pAcquireKeys = {}; + const uint32_t* pAcquireTimeoutMilliseconds = {}; + uint32_t releaseCount = {}; + const VULKAN_HPP_NAMESPACE::DeviceMemory* pReleaseSyncs = {}; + const uint64_t* pReleaseKeys = {}; + + }; + static_assert( sizeof( Win32KeyedMutexAcquireReleaseInfoNV ) == sizeof( VkWin32KeyedMutexAcquireReleaseInfoNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = Win32KeyedMutexAcquireReleaseInfoNV; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + +#ifdef VK_USE_PLATFORM_WIN32_KHR + struct Win32SurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWin32SurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Win32SurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::Win32SurfaceCreateFlagsKHR flags_ = {}, HINSTANCE hinstance_ = {}, HWND hwnd_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), hinstance( hinstance_ ), hwnd( hwnd_ ) + {} + + VULKAN_HPP_CONSTEXPR Win32SurfaceCreateInfoKHR( Win32SurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + Win32SurfaceCreateInfoKHR( VkWin32SurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + Win32SurfaceCreateInfoKHR & operator=( VkWin32SurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + Win32SurfaceCreateInfoKHR & operator=( Win32SurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( Win32SurfaceCreateInfoKHR ) ); + return *this; + } + + Win32SurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + Win32SurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::Win32SurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + Win32SurfaceCreateInfoKHR & setHinstance( HINSTANCE hinstance_ ) VULKAN_HPP_NOEXCEPT + { + hinstance = hinstance_; + return *this; + } + + Win32SurfaceCreateInfoKHR & setHwnd( HWND hwnd_ ) VULKAN_HPP_NOEXCEPT + { + hwnd = hwnd_; + return *this; + } + + + operator VkWin32SurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWin32SurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Win32SurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( Win32SurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( hinstance == rhs.hinstance ) + && ( hwnd == rhs.hwnd ); + } + + bool operator!=( Win32SurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWin32SurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::Win32SurfaceCreateFlagsKHR flags = {}; + HINSTANCE hinstance = {}; + HWND hwnd = {}; + + }; + static_assert( sizeof( Win32SurfaceCreateInfoKHR ) == sizeof( VkWin32SurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = Win32SurfaceCreateInfoKHR; + }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + struct WriteDescriptorSetAccelerationStructureKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWriteDescriptorSetAccelerationStructureKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureKHR(uint32_t accelerationStructureCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructureCount( accelerationStructureCount_ ), pAccelerationStructures( pAccelerationStructures_ ) + {} + + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureKHR( WriteDescriptorSetAccelerationStructureKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WriteDescriptorSetAccelerationStructureKHR( VkWriteDescriptorSetAccelerationStructureKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & accelerationStructures_ ) + : accelerationStructureCount( static_cast( accelerationStructures_.size() ) ), pAccelerationStructures( accelerationStructures_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WriteDescriptorSetAccelerationStructureKHR & operator=( VkWriteDescriptorSetAccelerationStructureKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + WriteDescriptorSetAccelerationStructureKHR & operator=( WriteDescriptorSetAccelerationStructureKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( WriteDescriptorSetAccelerationStructureKHR ) ); + return *this; + } + + WriteDescriptorSetAccelerationStructureKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSetAccelerationStructureKHR & setAccelerationStructureCount( uint32_t accelerationStructureCount_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = accelerationStructureCount_; + return *this; + } + + WriteDescriptorSetAccelerationStructureKHR & setPAccelerationStructures( const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + pAccelerationStructures = pAccelerationStructures_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureKHR & setAccelerationStructures( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & accelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = static_cast( accelerationStructures_.size() ); + pAccelerationStructures = accelerationStructures_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWriteDescriptorSetAccelerationStructureKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWriteDescriptorSetAccelerationStructureKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WriteDescriptorSetAccelerationStructureKHR const& ) const = default; +#else + bool operator==( WriteDescriptorSetAccelerationStructureKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureCount == rhs.accelerationStructureCount ) + && ( pAccelerationStructures == rhs.pAccelerationStructures ); + } + + bool operator!=( WriteDescriptorSetAccelerationStructureKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWriteDescriptorSetAccelerationStructureKHR; + const void* pNext = {}; + uint32_t accelerationStructureCount = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures = {}; + + }; + static_assert( sizeof( WriteDescriptorSetAccelerationStructureKHR ) == sizeof( VkWriteDescriptorSetAccelerationStructureKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = WriteDescriptorSetAccelerationStructureKHR; + }; + + struct WriteDescriptorSetAccelerationStructureNV + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWriteDescriptorSetAccelerationStructureNV; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureNV(uint32_t accelerationStructureCount_ = {}, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures_ = {}) VULKAN_HPP_NOEXCEPT + : accelerationStructureCount( accelerationStructureCount_ ), pAccelerationStructures( pAccelerationStructures_ ) + {} + + VULKAN_HPP_CONSTEXPR WriteDescriptorSetAccelerationStructureNV( WriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WriteDescriptorSetAccelerationStructureNV( VkWriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureNV( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & accelerationStructures_ ) + : accelerationStructureCount( static_cast( accelerationStructures_.size() ) ), pAccelerationStructures( accelerationStructures_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WriteDescriptorSetAccelerationStructureNV & operator=( VkWriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & operator=( WriteDescriptorSetAccelerationStructureNV const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( WriteDescriptorSetAccelerationStructureNV ) ); + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setAccelerationStructureCount( uint32_t accelerationStructureCount_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = accelerationStructureCount_; + return *this; + } + + WriteDescriptorSetAccelerationStructureNV & setPAccelerationStructures( const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + pAccelerationStructures = pAccelerationStructures_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + WriteDescriptorSetAccelerationStructureNV & setAccelerationStructures( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & accelerationStructures_ ) VULKAN_HPP_NOEXCEPT + { + accelerationStructureCount = static_cast( accelerationStructures_.size() ); + pAccelerationStructures = accelerationStructures_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWriteDescriptorSetAccelerationStructureNV const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWriteDescriptorSetAccelerationStructureNV &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WriteDescriptorSetAccelerationStructureNV const& ) const = default; +#else + bool operator==( WriteDescriptorSetAccelerationStructureNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( accelerationStructureCount == rhs.accelerationStructureCount ) + && ( pAccelerationStructures == rhs.pAccelerationStructures ); + } + + bool operator!=( WriteDescriptorSetAccelerationStructureNV const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWriteDescriptorSetAccelerationStructureNV; + const void* pNext = {}; + uint32_t accelerationStructureCount = {}; + const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures = {}; + + }; + static_assert( sizeof( WriteDescriptorSetAccelerationStructureNV ) == sizeof( VkWriteDescriptorSetAccelerationStructureNV ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = WriteDescriptorSetAccelerationStructureNV; + }; + + struct WriteDescriptorSetInlineUniformBlockEXT + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eWriteDescriptorSetInlineUniformBlockEXT; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR WriteDescriptorSetInlineUniformBlockEXT(uint32_t dataSize_ = {}, const void* pData_ = {}) VULKAN_HPP_NOEXCEPT + : dataSize( dataSize_ ), pData( pData_ ) + {} + + VULKAN_HPP_CONSTEXPR WriteDescriptorSetInlineUniformBlockEXT( WriteDescriptorSetInlineUniformBlockEXT const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + WriteDescriptorSetInlineUniformBlockEXT( VkWriteDescriptorSetInlineUniformBlockEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + WriteDescriptorSetInlineUniformBlockEXT( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) + : dataSize( static_cast( data_.size() * sizeof(T) ) ), pData( data_.data() ) + {} +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + WriteDescriptorSetInlineUniformBlockEXT & operator=( VkWriteDescriptorSetInlineUniformBlockEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT & operator=( WriteDescriptorSetInlineUniformBlockEXT const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( WriteDescriptorSetInlineUniformBlockEXT ) ); + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT & setDataSize( uint32_t dataSize_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = dataSize_; + return *this; + } + + WriteDescriptorSetInlineUniformBlockEXT & setPData( const void* pData_ ) VULKAN_HPP_NOEXCEPT + { + pData = pData_; + return *this; + } + +#if !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + template + WriteDescriptorSetInlineUniformBlockEXT & setData( VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries const & data_ ) VULKAN_HPP_NOEXCEPT + { + dataSize = static_cast( data_.size() * sizeof(T) ); + pData = data_.data(); + return *this; + } +#endif // !defined(VULKAN_HPP_DISABLE_ENHANCED_MODE) + + + operator VkWriteDescriptorSetInlineUniformBlockEXT const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkWriteDescriptorSetInlineUniformBlockEXT &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( WriteDescriptorSetInlineUniformBlockEXT const& ) const = default; +#else + bool operator==( WriteDescriptorSetInlineUniformBlockEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( dataSize == rhs.dataSize ) + && ( pData == rhs.pData ); + } + + bool operator!=( WriteDescriptorSetInlineUniformBlockEXT const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eWriteDescriptorSetInlineUniformBlockEXT; + const void* pNext = {}; + uint32_t dataSize = {}; + const void* pData = {}; + + }; + static_assert( sizeof( WriteDescriptorSetInlineUniformBlockEXT ) == sizeof( VkWriteDescriptorSetInlineUniformBlockEXT ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = WriteDescriptorSetInlineUniformBlockEXT; + }; + +#ifdef VK_USE_PLATFORM_XCB_KHR + struct XcbSurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eXcbSurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR XcbSurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::XcbSurfaceCreateFlagsKHR flags_ = {}, xcb_connection_t* connection_ = {}, xcb_window_t window_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), connection( connection_ ), window( window_ ) + {} + + VULKAN_HPP_CONSTEXPR XcbSurfaceCreateInfoKHR( XcbSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + XcbSurfaceCreateInfoKHR( VkXcbSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + XcbSurfaceCreateInfoKHR & operator=( VkXcbSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + XcbSurfaceCreateInfoKHR & operator=( XcbSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( XcbSurfaceCreateInfoKHR ) ); + return *this; + } + + XcbSurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + XcbSurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::XcbSurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + XcbSurfaceCreateInfoKHR & setConnection( xcb_connection_t* connection_ ) VULKAN_HPP_NOEXCEPT + { + connection = connection_; + return *this; + } + + XcbSurfaceCreateInfoKHR & setWindow( xcb_window_t window_ ) VULKAN_HPP_NOEXCEPT + { + window = window_; + return *this; + } + + + operator VkXcbSurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkXcbSurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( XcbSurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( XcbSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( connection == rhs.connection ) + && ( memcmp( &window, &rhs.window, sizeof( xcb_window_t ) ) == 0 ); + } + + bool operator!=( XcbSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eXcbSurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::XcbSurfaceCreateFlagsKHR flags = {}; + xcb_connection_t* connection = {}; + xcb_window_t window = {}; + + }; + static_assert( sizeof( XcbSurfaceCreateInfoKHR ) == sizeof( VkXcbSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = XcbSurfaceCreateInfoKHR; + }; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + +#ifdef VK_USE_PLATFORM_XLIB_KHR + struct XlibSurfaceCreateInfoKHR + { + static const bool allowDuplicate = false; + static VULKAN_HPP_CONST_OR_CONSTEXPR StructureType structureType = StructureType::eXlibSurfaceCreateInfoKHR; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR XlibSurfaceCreateInfoKHR(VULKAN_HPP_NAMESPACE::XlibSurfaceCreateFlagsKHR flags_ = {}, Display* dpy_ = {}, Window window_ = {}) VULKAN_HPP_NOEXCEPT + : flags( flags_ ), dpy( dpy_ ), window( window_ ) + {} + + VULKAN_HPP_CONSTEXPR XlibSurfaceCreateInfoKHR( XlibSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT = default; + + XlibSurfaceCreateInfoKHR( VkXlibSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = rhs; + } +#endif // !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + + XlibSurfaceCreateInfoKHR & operator=( VkXlibSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + *this = *reinterpret_cast( &rhs ); + return *this; + } + + XlibSurfaceCreateInfoKHR & operator=( XlibSurfaceCreateInfoKHR const & rhs ) VULKAN_HPP_NOEXCEPT + { + memcpy( static_cast( this ), &rhs, sizeof( XlibSurfaceCreateInfoKHR ) ); + return *this; + } + + XlibSurfaceCreateInfoKHR & setPNext( const void* pNext_ ) VULKAN_HPP_NOEXCEPT + { + pNext = pNext_; + return *this; + } + + XlibSurfaceCreateInfoKHR & setFlags( VULKAN_HPP_NAMESPACE::XlibSurfaceCreateFlagsKHR flags_ ) VULKAN_HPP_NOEXCEPT + { + flags = flags_; + return *this; + } + + XlibSurfaceCreateInfoKHR & setDpy( Display* dpy_ ) VULKAN_HPP_NOEXCEPT + { + dpy = dpy_; + return *this; + } + + XlibSurfaceCreateInfoKHR & setWindow( Window window_ ) VULKAN_HPP_NOEXCEPT + { + window = window_; + return *this; + } + + + operator VkXlibSurfaceCreateInfoKHR const&() const VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + operator VkXlibSurfaceCreateInfoKHR &() VULKAN_HPP_NOEXCEPT + { + return *reinterpret_cast( this ); + } + + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( XlibSurfaceCreateInfoKHR const& ) const = default; +#else + bool operator==( XlibSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return ( sType == rhs.sType ) + && ( pNext == rhs.pNext ) + && ( flags == rhs.flags ) + && ( dpy == rhs.dpy ) + && ( memcmp( &window, &rhs.window, sizeof( Window ) ) == 0 ); + } + + bool operator!=( XlibSurfaceCreateInfoKHR const& rhs ) const VULKAN_HPP_NOEXCEPT + { + return !operator==( rhs ); + } +#endif + + + + public: + const VULKAN_HPP_NAMESPACE::StructureType sType = StructureType::eXlibSurfaceCreateInfoKHR; + const void* pNext = {}; + VULKAN_HPP_NAMESPACE::XlibSurfaceCreateFlagsKHR flags = {}; + Display* dpy = {}; + Window window = {}; + + }; + static_assert( sizeof( XlibSurfaceCreateInfoKHR ) == sizeof( VkXlibSurfaceCreateInfoKHR ), "struct and wrapper have different size!" ); + static_assert( std::is_standard_layout::value, "struct wrapper is not a standard layout!" ); + + template <> + struct CppType + { + using Type = XlibSurfaceCreateInfoKHR; + }; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + class DebugReportCallbackEXT + { + public: + using CType = VkDebugReportCallbackEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDebugReportCallbackEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eDebugReportCallbackEXT; + + public: + VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT() VULKAN_HPP_NOEXCEPT + : m_debugReportCallbackEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DebugReportCallbackEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_debugReportCallbackEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DebugReportCallbackEXT( VkDebugReportCallbackEXT debugReportCallbackEXT ) VULKAN_HPP_NOEXCEPT + : m_debugReportCallbackEXT( debugReportCallbackEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DebugReportCallbackEXT & operator=(VkDebugReportCallbackEXT debugReportCallbackEXT) VULKAN_HPP_NOEXCEPT + { + m_debugReportCallbackEXT = debugReportCallbackEXT; + return *this; + } +#endif + + DebugReportCallbackEXT & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_debugReportCallbackEXT = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugReportCallbackEXT const& ) const = default; +#else + bool operator==( DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT == rhs.m_debugReportCallbackEXT; + } + + bool operator!=(DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT != rhs.m_debugReportCallbackEXT; + } + + bool operator<(DebugReportCallbackEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT < rhs.m_debugReportCallbackEXT; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugReportCallbackEXT() const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_debugReportCallbackEXT == VK_NULL_HANDLE; + } + + private: + VkDebugReportCallbackEXT m_debugReportCallbackEXT; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT ) == sizeof( VkDebugReportCallbackEXT ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + + class DebugUtilsMessengerEXT + { + public: + using CType = VkDebugUtilsMessengerEXT; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eDebugUtilsMessengerEXT; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eUnknown; + + public: + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT() VULKAN_HPP_NOEXCEPT + : m_debugUtilsMessengerEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR DebugUtilsMessengerEXT( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_debugUtilsMessengerEXT(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT DebugUtilsMessengerEXT( VkDebugUtilsMessengerEXT debugUtilsMessengerEXT ) VULKAN_HPP_NOEXCEPT + : m_debugUtilsMessengerEXT( debugUtilsMessengerEXT ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DebugUtilsMessengerEXT & operator=(VkDebugUtilsMessengerEXT debugUtilsMessengerEXT) VULKAN_HPP_NOEXCEPT + { + m_debugUtilsMessengerEXT = debugUtilsMessengerEXT; + return *this; + } +#endif + + DebugUtilsMessengerEXT & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_debugUtilsMessengerEXT = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( DebugUtilsMessengerEXT const& ) const = default; +#else + bool operator==( DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT == rhs.m_debugUtilsMessengerEXT; + } + + bool operator!=(DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT != rhs.m_debugUtilsMessengerEXT; + } + + bool operator<(DebugUtilsMessengerEXT const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT < rhs.m_debugUtilsMessengerEXT; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkDebugUtilsMessengerEXT() const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_debugUtilsMessengerEXT == VK_NULL_HANDLE; + } + + private: + VkDebugUtilsMessengerEXT m_debugUtilsMessengerEXT; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT ) == sizeof( VkDebugUtilsMessengerEXT ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT; + }; + + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + class Instance; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDebugReportCallbackEXT = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueDebugUtilsMessengerEXT = UniqueHandle; + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueSurfaceKHR = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + class Instance + { + public: + using CType = VkInstance; + + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::ObjectType objectType = VULKAN_HPP_NAMESPACE::ObjectType::eInstance; + static VULKAN_HPP_CONST_OR_CONSTEXPR VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT debugReportObjectType = VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT::eInstance; + + public: + VULKAN_HPP_CONSTEXPR Instance() VULKAN_HPP_NOEXCEPT + : m_instance(VK_NULL_HANDLE) + {} + + VULKAN_HPP_CONSTEXPR Instance( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + : m_instance(VK_NULL_HANDLE) + {} + + VULKAN_HPP_TYPESAFE_EXPLICIT Instance( VkInstance instance ) VULKAN_HPP_NOEXCEPT + : m_instance( instance ) + {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Instance & operator=(VkInstance instance) VULKAN_HPP_NOEXCEPT + { + m_instance = instance; + return *this; + } +#endif + + Instance & operator=( std::nullptr_t ) VULKAN_HPP_NOEXCEPT + { + m_instance = VK_NULL_HANDLE; + return *this; + } + +#if defined(VULKAN_HPP_HAS_SPACESHIP_OPERATOR) + auto operator<=>( Instance const& ) const = default; +#else + bool operator==( Instance const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_instance == rhs.m_instance; + } + + bool operator!=(Instance const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_instance != rhs.m_instance; + } + + bool operator<(Instance const & rhs ) const VULKAN_HPP_NOEXCEPT + { + return m_instance < rhs.m_instance; + } +#endif + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD Result createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + VULKAN_HPP_NODISCARD Result createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + template + VULKAN_HPP_NODISCARD Result createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + + template + VULKAN_HPP_NODISCARD Result createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_IOS_MVK + template + VULKAN_HPP_NODISCARD Result createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + + +#ifdef VK_USE_PLATFORM_FUCHSIA + template + VULKAN_HPP_NODISCARD Result createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + +#ifdef VK_USE_PLATFORM_MACOS_MVK + template + VULKAN_HPP_NODISCARD Result createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + +#ifdef VK_USE_PLATFORM_METAL_EXT + template + VULKAN_HPP_NODISCARD Result createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + +#ifdef VK_USE_PLATFORM_GGP + template + VULKAN_HPP_NODISCARD Result createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_GGP*/ + + +#ifdef VK_USE_PLATFORM_VI_NN + template + VULKAN_HPP_NODISCARD Result createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_VI_NN*/ + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + VULKAN_HPP_NODISCARD Result createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD Result createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + VULKAN_HPP_NODISCARD Result createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + VULKAN_HPP_NODISCARD Result createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + + template + void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface VULKAN_HPP_DEFAULT_ARGUMENT_ASSIGNMENT, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDeviceGroups( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceGroupPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDeviceGroups( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD Result enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDeviceGroupsKHR( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceGroupPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDeviceGroupsKHR( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDevices( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = PhysicalDeviceAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumeratePhysicalDevices( PhysicalDeviceAllocator & physicalDeviceAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + PFN_vkVoidFunction getProcAddr( const char* pName, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + PFN_vkVoidFunction getProcAddr( const std::string & name, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + void submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) const VULKAN_HPP_NOEXCEPT; +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VkInstance() const VULKAN_HPP_NOEXCEPT + { + return m_instance; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT + { + return m_instance != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT + { + return m_instance == VK_NULL_HANDLE; + } + + private: + VkInstance m_instance; + }; + static_assert( sizeof( VULKAN_HPP_NAMESPACE::Instance ) == sizeof( VkInstance ), "handle and wrapper have different size!" ); + + template <> + struct VULKAN_HPP_DEPRECATED("vk::cpp_type is deprecated. Use vk::CppType instead.") cpp_type + { + using type = VULKAN_HPP_NAMESPACE::Instance; + }; + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Instance; + }; + + + template <> + struct CppType + { + using Type = VULKAN_HPP_NAMESPACE::Instance; + }; + + + template <> + struct isVulkanHandleType + { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool value = true; + }; + +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template class UniqueHandleTraits { public: using deleter = ObjectDestroy; }; + using UniqueInstance = UniqueHandle; +#endif /*VULKAN_HPP_NO_SMART_HANDLE*/ + + + template + VULKAN_HPP_NODISCARD Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename ResultValueType::type createInstance( const InstanceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional allocator VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName VULKAN_HPP_DEFAULT_ARGUMENT_NULLPTR_ASSIGNMENT, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = ExtensionPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateInstanceLayerProperties( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); + template , typename Dispatch = VULKAN_HPP_DEFAULT_DISPATCHER_TYPE, typename B = LayerPropertiesAllocator, typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD typename ResultValueType>::type enumerateInstanceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ) VULKAN_HPP_NOEXCEPT; +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + typename ResultValueType::type enumerateInstanceVersion( Dispatch const & d VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT ); +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result createInstance( const VULKAN_HPP_NAMESPACE::InstanceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Instance* pInstance, Dispatch const & d ) VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateInstance( reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkInstance *>( pInstance ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type createInstance( const InstanceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) + { + VULKAN_HPP_NAMESPACE::Instance instance; + Result result = static_cast( d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); + return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING "::createInstance" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type createInstanceUnique( const InstanceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) + { + VULKAN_HPP_NAMESPACE::Instance instance; + Result result = static_cast( d.vkCreateInstance( reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &instance ) ) ); + ObjectDestroy deleter( allocator, d ); + return createResultValue( result, instance, VULKAN_HPP_NAMESPACE_STRING "::createInstanceUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d ) VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumerateInstanceExtensionProperties( pLayerName, pPropertyCount, reinterpret_cast< VkExtensionProperties *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName, Dispatch const & d ) + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceExtensionProperties" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceExtensionProperties( Optional layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d ) + { + std::vector properties( extensionPropertiesAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceExtensionProperties( layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceExtensionProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d ) VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumerateInstanceLayerProperties( pPropertyCount, reinterpret_cast< VkLayerProperties *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceLayerProperties( Dispatch const & d ) + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceLayerProperties" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type enumerateInstanceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d ) + { + std::vector properties( layerPropertiesAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateInstanceLayerProperties( &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::enumerateInstanceLayerProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result enumerateInstanceVersion( uint32_t* pApiVersion, Dispatch const & d ) VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumerateInstanceVersion( pApiVersion ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type enumerateInstanceVersion( Dispatch const & d ) + { + uint32_t apiVersion; + Result result = static_cast( d.vkEnumerateInstanceVersion( &apiVersion ) ); + return createResultValue( result, apiVersion, VULKAN_HPP_NAMESPACE_STRING "::enumerateInstanceVersion" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::begin( const VULKAN_HPP_NAMESPACE::CommandBufferBeginInfo* pBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast( pBeginInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::begin( const CommandBufferBeginInfo & beginInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkBeginCommandBuffer( m_commandBuffer, reinterpret_cast( &beginInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::begin" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const VULKAN_HPP_NAMESPACE::ConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast( pConditionalRenderingBegin ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginConditionalRenderingEXT( const ConditionalRenderingBeginInfoEXT & conditionalRenderingBegin, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginConditionalRenderingEXT( m_commandBuffer, reinterpret_cast( &conditionalRenderingBegin ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( pLabelInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginQuery( m_commandBuffer, static_cast( queryPool ), query, static_cast( flags ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, VULKAN_HPP_NAMESPACE::QueryControlFlags flags, uint32_t index, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginQueryIndexedEXT( m_commandBuffer, static_cast( queryPool ), query, static_cast( flags ), index ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast( pRenderPassBegin ), static_cast( contents ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass( const RenderPassBeginInfo & renderPassBegin, VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass( m_commandBuffer, reinterpret_cast( &renderPassBegin ), static_cast( contents ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast( pRenderPassBegin ), reinterpret_cast( pSubpassBeginInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass2( m_commandBuffer, reinterpret_cast( &renderPassBegin ), reinterpret_cast( &subpassBeginInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassBeginInfo* pRenderPassBegin, const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast( pRenderPassBegin ), reinterpret_cast( pSubpassBeginInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginRenderPass2KHR( const RenderPassBeginInfo & renderPassBegin, const SubpassBeginInfo & subpassBeginInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginRenderPass2KHR( m_commandBuffer, reinterpret_cast( &renderPassBegin ), reinterpret_cast( &subpassBeginInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast( pCounterBuffers ), reinterpret_cast( pCounterBufferOffsets ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::beginTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy const & counterBuffers, ArrayProxy const & counterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); +#else + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::beginTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBeginTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size(), reinterpret_cast( counterBuffers.data() ), reinterpret_cast( counterBufferOffsets.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), firstSet, descriptorSetCount, reinterpret_cast( pDescriptorSets ), dynamicOffsetCount, pDynamicOffsets ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindDescriptorSets( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t firstSet, ArrayProxy const & descriptorSets, ArrayProxy const & dynamicOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindDescriptorSets( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), firstSet, descriptorSets.size(), reinterpret_cast( descriptorSets.data() ), dynamicOffsets.size(), dynamicOffsets.data() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindIndexBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::IndexType indexType, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindIndexBuffer( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( indexType ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindPipeline( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindPipeline( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( pipeline ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindPipelineShaderGroupNV( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t groupIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindPipelineShaderGroupNV( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( pipeline ), groupIndex ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindShadingRateImageNV( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindShadingRateImageNV( m_commandBuffer, static_cast( imageView ), static_cast( imageLayout ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast( pBuffers ), reinterpret_cast( pOffsets ), reinterpret_cast( pSizes ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindTransformFeedbackBuffersEXT( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, ArrayProxy const & sizes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); +#else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindTransformFeedbackBuffersEXT: buffers.size() != sizes.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBindTransformFeedbackBuffersEXT( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast( buffers.data() ), reinterpret_cast( offsets.data() ), reinterpret_cast( sizes.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast( pBuffers ), reinterpret_cast( pOffsets ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); +#else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers: buffers.size() != offsets.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBindVertexBuffers( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast( buffers.data() ), reinterpret_cast( offsets.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, uint32_t bindingCount, const VULKAN_HPP_NAMESPACE::Buffer* pBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pOffsets, const VULKAN_HPP_NAMESPACE::DeviceSize* pSizes, const VULKAN_HPP_NAMESPACE::DeviceSize* pStrides, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, bindingCount, reinterpret_cast( pBuffers ), reinterpret_cast( pOffsets ), reinterpret_cast( pSizes ), reinterpret_cast( pStrides ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::bindVertexBuffers2EXT( uint32_t firstBinding, ArrayProxy const & buffers, ArrayProxy const & offsets, ArrayProxy const & sizes, ArrayProxy const & strides, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( buffers.size() == offsets.size() ); + VULKAN_HPP_ASSERT( sizes.empty() || buffers.size() == sizes.size() ); + VULKAN_HPP_ASSERT( strides.empty() || buffers.size() == strides.size() ); +#else + if ( buffers.size() != offsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != offsets.size()" ); + } + if ( !sizes.empty() && buffers.size() != sizes.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != sizes.size()" ); + } + if ( !strides.empty() && buffers.size() != strides.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::bindVertexBuffers2EXT: buffers.size() != strides.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBindVertexBuffers2EXT( m_commandBuffer, firstBinding, buffers.size(), reinterpret_cast( buffers.data() ), reinterpret_cast( offsets.data() ), reinterpret_cast( sizes.data() ), reinterpret_cast( strides.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageBlit* pRegions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBlitImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ), static_cast( filter ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, VULKAN_HPP_NAMESPACE::Filter filter, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBlitImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size(), reinterpret_cast( regions.data() ), static_cast( filter ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const VULKAN_HPP_NAMESPACE::BlitImageInfo2KHR* pBlitImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast( pBlitImageInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::blitImage2KHR( const BlitImageInfo2KHR & blitImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBlitImage2KHR( m_commandBuffer, reinterpret_cast( &blitImageInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureInfoNV* pInfo, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast( pInfo ), static_cast( instanceData ), static_cast( instanceOffset ), static_cast( update ), static_cast( dst ), static_cast( src ), static_cast( scratch ), static_cast( scratchOffset ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructureNV( const AccelerationStructureInfoNV & info, VULKAN_HPP_NAMESPACE::Buffer instanceData, VULKAN_HPP_NAMESPACE::DeviceSize instanceOffset, VULKAN_HPP_NAMESPACE::Bool32 update, VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::Buffer scratch, VULKAN_HPP_NAMESPACE::DeviceSize scratchOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBuildAccelerationStructureNV( m_commandBuffer, reinterpret_cast( &info ), static_cast( instanceData ), static_cast( instanceOffset ), static_cast( update ), static_cast( dst ), static_cast( src ), static_cast( scratch ), static_cast( scratchOffset ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresIndirectKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::DeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const * ppMaxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBuildAccelerationStructuresIndirectKHR( m_commandBuffer, infoCount, reinterpret_cast( pInfos ), reinterpret_cast( pIndirectDeviceAddresses ), pIndirectStrides, ppMaxPrimitiveCounts ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresIndirectKHR( ArrayProxy const & infos, ArrayProxy const & indirectDeviceAddresses, ArrayProxy const & indirectStrides, ArrayProxy const & pMaxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == indirectDeviceAddresses.size() ); + VULKAN_HPP_ASSERT( infos.size() == indirectStrides.size() ); + VULKAN_HPP_ASSERT( infos.size() == pMaxPrimitiveCounts.size() ); +#else + if ( infos.size() != indirectDeviceAddresses.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectDeviceAddresses.size()" ); + } + if ( infos.size() != indirectStrides.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != indirectStrides.size()" ); + } + if ( infos.size() != pMaxPrimitiveCounts.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresIndirectKHR: infos.size() != pMaxPrimitiveCounts.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBuildAccelerationStructuresIndirectKHR( m_commandBuffer, infos.size(), reinterpret_cast( infos.data() ), reinterpret_cast( indirectDeviceAddresses.data() ), indirectStrides.data(), pMaxPrimitiveCounts.data() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresKHR( uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdBuildAccelerationStructuresKHR( m_commandBuffer, infoCount, reinterpret_cast( pInfos ), reinterpret_cast( ppBuildRangeInfos ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::buildAccelerationStructuresKHR( ArrayProxy const & infos, ArrayProxy const & pBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == pBuildRangeInfos.size() ); +#else + if ( infos.size() != pBuildRangeInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdBuildAccelerationStructuresKHR( m_commandBuffer, infos.size(), reinterpret_cast( infos.data() ), reinterpret_cast( pBuildRangeInfos.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( uint32_t attachmentCount, const VULKAN_HPP_NAMESPACE::ClearAttachment* pAttachments, uint32_t rectCount, const VULKAN_HPP_NAMESPACE::ClearRect* pRects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearAttachments( m_commandBuffer, attachmentCount, reinterpret_cast( pAttachments ), rectCount, reinterpret_cast( pRects ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearAttachments( ArrayProxy const & attachments, ArrayProxy const & rects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearAttachments( m_commandBuffer, attachments.size(), reinterpret_cast( attachments.data() ), rects.size(), reinterpret_cast( rects.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearColorValue* pColor, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearColorImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( pColor ), rangeCount, reinterpret_cast( pRanges ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearColorImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearColorValue & color, ArrayProxy const & ranges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearColorImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( &color ), ranges.size(), reinterpret_cast( ranges.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const VULKAN_HPP_NAMESPACE::ClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VULKAN_HPP_NAMESPACE::ImageSubresourceRange* pRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( pDepthStencil ), rangeCount, reinterpret_cast( pRanges ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::clearDepthStencilImage( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageLayout imageLayout, const ClearDepthStencilValue & depthStencil, ArrayProxy const & ranges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdClearDepthStencilImage( m_commandBuffer, static_cast( image ), static_cast( imageLayout ), reinterpret_cast( &depthStencil ), ranges.size(), reinterpret_cast( ranges.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureKHR( const CopyAccelerationStructureInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyAccelerationStructureKHR( m_commandBuffer, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV dst, VULKAN_HPP_NAMESPACE::AccelerationStructureNV src, VULKAN_HPP_NAMESPACE::CopyAccelerationStructureModeKHR mode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyAccelerationStructureNV( m_commandBuffer, static_cast( dst ), static_cast( src ), static_cast( mode ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyAccelerationStructureToMemoryKHR( const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyAccelerationStructureToMemoryKHR( m_commandBuffer, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBuffer( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstBuffer ), regionCount, reinterpret_cast( pRegions ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBuffer( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstBuffer ), regions.size(), reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferInfo2KHR* pCopyBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast( pCopyBufferInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBuffer2KHR( const CopyBufferInfo2KHR & copyBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBuffer2KHR( m_commandBuffer, reinterpret_cast( ©BufferInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage( VULKAN_HPP_NAMESPACE::Buffer srcBuffer, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBufferToImage( m_commandBuffer, static_cast( srcBuffer ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size(), reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const VULKAN_HPP_NAMESPACE::CopyBufferToImageInfo2KHR* pCopyBufferToImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast( pCopyBufferToImageInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyBufferToImage2KHR( const CopyBufferToImageInfo2KHR & copyBufferToImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyBufferToImage2KHR( m_commandBuffer, reinterpret_cast( ©BufferToImageInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size(), reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const VULKAN_HPP_NAMESPACE::CopyImageInfo2KHR* pCopyImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast( pCopyImageInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImage2KHR( const CopyImageInfo2KHR & copyImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImage2KHR( m_commandBuffer, reinterpret_cast( ©ImageInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::BufferImageCopy* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstBuffer ), regionCount, reinterpret_cast( pRegions ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, ArrayProxy const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImageToBuffer( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstBuffer ), regions.size(), reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const VULKAN_HPP_NAMESPACE::CopyImageToBufferInfo2KHR* pCopyImageToBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast( pCopyImageToBufferInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyImageToBuffer2KHR( const CopyImageToBufferInfo2KHR & copyImageToBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyImageToBuffer2KHR( m_commandBuffer, reinterpret_cast( ©ImageToBufferInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::copyMemoryToAccelerationStructureKHR( const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyMemoryToAccelerationStructureKHR( m_commandBuffer, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::copyQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdCopyQueryPoolResults( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount, static_cast( dstBuffer ), static_cast( dstOffset ), static_cast( stride ), static_cast( flags ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerBeginEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDebugMarkerBeginEXT( m_commandBuffer, reinterpret_cast( &markerInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerEndEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDebugMarkerEndEXT( m_commandBuffer ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerMarkerInfoEXT* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::debugMarkerInsertEXT( const DebugMarkerMarkerInfoEXT & markerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDebugMarkerInsertEXT( m_commandBuffer, reinterpret_cast( &markerInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::dispatch( uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDispatch( m_commandBuffer, groupCountX, groupCountY, groupCountZ ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBase( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDispatchBase( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchBaseKHR( uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDispatchBaseKHR( m_commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::dispatchIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDispatchIndirect( m_commandBuffer, static_cast( buffer ), static_cast( offset ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::draw( uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDraw( m_commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexed( uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndexed( m_commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndexedIndirect( m_commandBuffer, static_cast( buffer ), static_cast( offset ), drawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndexedIndirectCount( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndexedIndirectCountAMD( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndexedIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndexedIndirectCountKHR( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirect( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndirect( m_commandBuffer, static_cast( buffer ), static_cast( offset ), drawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectByteCountEXT( uint32_t instanceCount, uint32_t firstInstance, VULKAN_HPP_NAMESPACE::Buffer counterBuffer, VULKAN_HPP_NAMESPACE::DeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndirectByteCountEXT( m_commandBuffer, instanceCount, firstInstance, static_cast( counterBuffer ), static_cast( counterBufferOffset ), counterOffset, vertexStride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCount( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndirectCount( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountAMD( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndirectCountAMD( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::drawIndirectCountKHR( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawIndirectCountKHR( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectCountNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::Buffer countBuffer, VULKAN_HPP_NAMESPACE::DeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawMeshTasksIndirectCountNV( m_commandBuffer, static_cast( buffer ), static_cast( offset ), static_cast( countBuffer ), static_cast( countBufferOffset ), maxDrawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksIndirectNV( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceSize offset, uint32_t drawCount, uint32_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawMeshTasksIndirectNV( m_commandBuffer, static_cast( buffer ), static_cast( offset ), drawCount, stride ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::drawMeshTasksNV( uint32_t taskCount, uint32_t firstTask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdDrawMeshTasksNV( m_commandBuffer, taskCount, firstTask ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endConditionalRenderingEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndConditionalRenderingEXT( m_commandBuffer ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endDebugUtilsLabelEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndDebugUtilsLabelEXT( m_commandBuffer ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endQuery( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndQuery( m_commandBuffer, static_cast( queryPool ), query ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endQueryIndexedEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, uint32_t index, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndQueryIndexedEXT( m_commandBuffer, static_cast( queryPool ), query, index ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndRenderPass( m_commandBuffer ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast( pSubpassEndInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2( const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndRenderPass2( m_commandBuffer, reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast( pSubpassEndInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endRenderPass2KHR( const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndRenderPass2KHR( m_commandBuffer, reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VULKAN_HPP_NAMESPACE::Buffer* pCounterBuffers, const VULKAN_HPP_NAMESPACE::DeviceSize* pCounterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBufferCount, reinterpret_cast( pCounterBuffers ), reinterpret_cast( pCounterBufferOffsets ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::endTransformFeedbackEXT( uint32_t firstCounterBuffer, ArrayProxy const & counterBuffers, ArrayProxy const & counterBufferOffsets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( counterBufferOffsets.empty() || counterBuffers.size() == counterBufferOffsets.size() ); +#else + if ( !counterBufferOffsets.empty() && counterBuffers.size() != counterBufferOffsets.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::endTransformFeedbackEXT: counterBuffers.size() != counterBufferOffsets.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkCmdEndTransformFeedbackEXT( m_commandBuffer, firstCounterBuffer, counterBuffers.size(), reinterpret_cast( counterBuffers.data() ), reinterpret_cast( counterBufferOffsets.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdExecuteCommands( m_commandBuffer, commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::executeCommands( ArrayProxy const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdExecuteCommands( m_commandBuffer, commandBuffers.size(), reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast( isPreprocessed ), reinterpret_cast( pGeneratedCommandsInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::executeGeneratedCommandsNV( VULKAN_HPP_NAMESPACE::Bool32 isPreprocessed, const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdExecuteGeneratedCommandsNV( m_commandBuffer, static_cast( isPreprocessed ), reinterpret_cast( &generatedCommandsInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::fillBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize size, uint32_t data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdFillBuffer( m_commandBuffer, static_cast( dstBuffer ), static_cast( dstOffset ), static_cast( size ), data ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( pLabelInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdInsertDebugUtilsLabelEXT( m_commandBuffer, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass( VULKAN_HPP_NAMESPACE::SubpassContents contents, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdNextSubpass( m_commandBuffer, static_cast( contents ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast( pSubpassBeginInfo ), reinterpret_cast( pSubpassEndInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdNextSubpass2( m_commandBuffer, reinterpret_cast( &subpassBeginInfo ), reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const VULKAN_HPP_NAMESPACE::SubpassBeginInfo* pSubpassBeginInfo, const VULKAN_HPP_NAMESPACE::SubpassEndInfo* pSubpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast( pSubpassBeginInfo ), reinterpret_cast( pSubpassEndInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::nextSubpass2KHR( const SubpassBeginInfo & subpassBeginInfo, const SubpassEndInfo & subpassEndInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdNextSubpass2KHR( m_commandBuffer, reinterpret_cast( &subpassBeginInfo ), reinterpret_cast( &subpassEndInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast( srcStageMask ), static_cast( dstStageMask ), static_cast( dependencyFlags ), memoryBarrierCount, reinterpret_cast( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast( pImageMemoryBarriers ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pipelineBarrier( VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, VULKAN_HPP_NAMESPACE::DependencyFlags dependencyFlags, ArrayProxy const & memoryBarriers, ArrayProxy const & bufferMemoryBarriers, ArrayProxy const & imageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPipelineBarrier( m_commandBuffer, static_cast( srcStageMask ), static_cast( dstStageMask ), static_cast( dependencyFlags ), memoryBarriers.size(), reinterpret_cast( memoryBarriers.data() ), bufferMemoryBarriers.size(), reinterpret_cast( bufferMemoryBarriers.data() ), imageMemoryBarriers.size(), reinterpret_cast( imageMemoryBarriers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsInfoNV* pGeneratedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast( pGeneratedCommandsInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::preprocessGeneratedCommandsNV( const GeneratedCommandsInfoNV & generatedCommandsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPreprocessGeneratedCommandsNV( m_commandBuffer, reinterpret_cast( &generatedCommandsInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPushConstants( m_commandBuffer, static_cast( layout ), static_cast( stageFlags ), offset, size, pValues ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushConstants( VULKAN_HPP_NAMESPACE::PipelineLayout layout, VULKAN_HPP_NAMESPACE::ShaderStageFlags stageFlags, uint32_t offset, ArrayProxy const & values, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPushConstants( m_commandBuffer, static_cast( layout ), static_cast( stageFlags ), offset, values.size() * sizeof( T ), reinterpret_cast( values.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), set, descriptorWriteCount, reinterpret_cast( pDescriptorWrites ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetKHR( VULKAN_HPP_NAMESPACE::PipelineBindPoint pipelineBindPoint, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, ArrayProxy const & descriptorWrites, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPushDescriptorSetKHR( m_commandBuffer, static_cast( pipelineBindPoint ), static_cast( layout ), set, descriptorWrites.size(), reinterpret_cast( descriptorWrites.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::pushDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE::PipelineLayout layout, uint32_t set, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdPushDescriptorSetWithTemplateKHR( m_commandBuffer, static_cast( descriptorUpdateTemplate ), static_cast( layout ), set, pData ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::resetEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResetQueryPool( m_commandBuffer, static_cast( queryPool ), firstQuery, queryCount ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, uint32_t regionCount, const VULKAN_HPP_NAMESPACE::ImageResolve* pRegions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResolveImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regionCount, reinterpret_cast( pRegions ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage( VULKAN_HPP_NAMESPACE::Image srcImage, VULKAN_HPP_NAMESPACE::ImageLayout srcImageLayout, VULKAN_HPP_NAMESPACE::Image dstImage, VULKAN_HPP_NAMESPACE::ImageLayout dstImageLayout, ArrayProxy const & regions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResolveImage( m_commandBuffer, static_cast( srcImage ), static_cast( srcImageLayout ), static_cast( dstImage ), static_cast( dstImageLayout ), regions.size(), reinterpret_cast( regions.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const VULKAN_HPP_NAMESPACE::ResolveImageInfo2KHR* pResolveImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast( pResolveImageInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::resolveImage2KHR( const ResolveImageInfo2KHR & resolveImageInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdResolveImage2KHR( m_commandBuffer, reinterpret_cast( &resolveImageInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setBlendConstants( const float blendConstants[4], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetBlendConstants( m_commandBuffer, blendConstants ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setCheckpointNV( const void* pCheckpointMarker, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetCheckpointNV( m_commandBuffer, pCheckpointMarker ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VULKAN_HPP_NAMESPACE::CoarseSampleOrderCustomNV* pCustomSampleOrders, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast( sampleOrderType ), customSampleOrderCount, reinterpret_cast( pCustomSampleOrders ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setCoarseSampleOrderNV( VULKAN_HPP_NAMESPACE::CoarseSampleOrderTypeNV sampleOrderType, ArrayProxy const & customSampleOrders, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetCoarseSampleOrderNV( m_commandBuffer, static_cast( sampleOrderType ), customSampleOrders.size(), reinterpret_cast( customSampleOrders.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setCullModeEXT( VULKAN_HPP_NAMESPACE::CullModeFlags cullMode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetCullModeEXT( m_commandBuffer, static_cast( cullMode ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBias( float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthBias( m_commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBounds( float minDepthBounds, float maxDepthBounds, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthBounds( m_commandBuffer, minDepthBounds, maxDepthBounds ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthBoundsTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthBoundsTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthBoundsTestEnableEXT( m_commandBuffer, static_cast( depthBoundsTestEnable ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthCompareOpEXT( VULKAN_HPP_NAMESPACE::CompareOp depthCompareOp, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthCompareOpEXT( m_commandBuffer, static_cast( depthCompareOp ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthTestEnableEXT( m_commandBuffer, static_cast( depthTestEnable ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDepthWriteEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 depthWriteEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDepthWriteEnableEXT( m_commandBuffer, static_cast( depthWriteEnable ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMask( uint32_t deviceMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDeviceMask( m_commandBuffer, deviceMask ); + } + + template + VULKAN_HPP_INLINE void CommandBuffer::setDeviceMaskKHR( uint32_t deviceMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDeviceMaskKHR( m_commandBuffer, deviceMask ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VULKAN_HPP_NAMESPACE::Rect2D* pDiscardRectangles, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangleCount, reinterpret_cast( pDiscardRectangles ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setDiscardRectangleEXT( uint32_t firstDiscardRectangle, ArrayProxy const & discardRectangles, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetDiscardRectangleEXT( m_commandBuffer, firstDiscardRectangle, discardRectangles.size(), reinterpret_cast( discardRectangles.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setEvent( VULKAN_HPP_NAMESPACE::Event event, VULKAN_HPP_NAMESPACE::PipelineStageFlags stageMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetEvent( m_commandBuffer, static_cast( event ), static_cast( stageMask ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pExclusiveScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissorCount, reinterpret_cast( pExclusiveScissors ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setExclusiveScissorNV( uint32_t firstExclusiveScissor, ArrayProxy const & exclusiveScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetExclusiveScissorNV( m_commandBuffer, firstExclusiveScissor, exclusiveScissors.size(), reinterpret_cast( exclusiveScissors.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateEnumNV( VULKAN_HPP_NAMESPACE::FragmentShadingRateNV shadingRate, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetFragmentShadingRateEnumNV( m_commandBuffer, static_cast( shadingRate ), reinterpret_cast( combinerOps ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateKHR( const VULKAN_HPP_NAMESPACE::Extent2D* pFragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetFragmentShadingRateKHR( m_commandBuffer, reinterpret_cast( pFragmentSize ), reinterpret_cast( combinerOps ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setFragmentShadingRateKHR( const Extent2D & fragmentSize, const VULKAN_HPP_NAMESPACE::FragmentShadingRateCombinerOpKHR combinerOps[2], Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetFragmentShadingRateKHR( m_commandBuffer, reinterpret_cast( &fragmentSize ), reinterpret_cast( combinerOps ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setFrontFaceEXT( VULKAN_HPP_NAMESPACE::FrontFace frontFace, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetFrontFaceEXT( m_commandBuffer, static_cast( frontFace ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setLineStippleEXT( uint32_t lineStippleFactor, uint16_t lineStipplePattern, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetLineStippleEXT( m_commandBuffer, lineStippleFactor, lineStipplePattern ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setLineWidth( float lineWidth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetLineWidth( m_commandBuffer, lineWidth ); + } + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceMarkerInfoINTEL* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::setPerformanceMarkerINTEL( const PerformanceMarkerInfoINTEL & markerInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkCmdSetPerformanceMarkerINTEL( m_commandBuffer, reinterpret_cast( &markerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceMarkerINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceOverrideINTEL( const VULKAN_HPP_NAMESPACE::PerformanceOverrideInfoINTEL* pOverrideInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast( pOverrideInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::setPerformanceOverrideINTEL( const PerformanceOverrideInfoINTEL & overrideInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkCmdSetPerformanceOverrideINTEL( m_commandBuffer, reinterpret_cast( &overrideInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceOverrideINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::setPerformanceStreamMarkerINTEL( const VULKAN_HPP_NAMESPACE::PerformanceStreamMarkerInfoINTEL* pMarkerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast( pMarkerInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::setPerformanceStreamMarkerINTEL( const PerformanceStreamMarkerInfoINTEL & markerInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkCmdSetPerformanceStreamMarkerINTEL( m_commandBuffer, reinterpret_cast( &markerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::setPerformanceStreamMarkerINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setPrimitiveTopologyEXT( VULKAN_HPP_NAMESPACE::PrimitiveTopology primitiveTopology, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetPrimitiveTopologyEXT( m_commandBuffer, static_cast( primitiveTopology ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setRayTracingPipelineStackSizeKHR( uint32_t pipelineStackSize, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetRayTracingPipelineStackSizeKHR( m_commandBuffer, pipelineStackSize ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const VULKAN_HPP_NAMESPACE::SampleLocationsInfoEXT* pSampleLocationsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast( pSampleLocationsInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setSampleLocationsEXT( const SampleLocationsInfoEXT & sampleLocationsInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetSampleLocationsEXT( m_commandBuffer, reinterpret_cast( &sampleLocationsInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissorCount, reinterpret_cast( pScissors ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setScissor( uint32_t firstScissor, ArrayProxy const & scissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetScissor( m_commandBuffer, firstScissor, scissors.size(), reinterpret_cast( scissors.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( uint32_t scissorCount, const VULKAN_HPP_NAMESPACE::Rect2D* pScissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissorCount, reinterpret_cast( pScissors ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setScissorWithCountEXT( ArrayProxy const & scissors, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetScissorWithCountEXT( m_commandBuffer, scissors.size(), reinterpret_cast( scissors.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilCompareMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t compareMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetStencilCompareMask( m_commandBuffer, static_cast( faceMask ), compareMask ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilOpEXT( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, VULKAN_HPP_NAMESPACE::StencilOp failOp, VULKAN_HPP_NAMESPACE::StencilOp passOp, VULKAN_HPP_NAMESPACE::StencilOp depthFailOp, VULKAN_HPP_NAMESPACE::CompareOp compareOp, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetStencilOpEXT( m_commandBuffer, static_cast( faceMask ), static_cast( failOp ), static_cast( passOp ), static_cast( depthFailOp ), static_cast( compareOp ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilReference( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t reference, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetStencilReference( m_commandBuffer, static_cast( faceMask ), reference ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilTestEnableEXT( VULKAN_HPP_NAMESPACE::Bool32 stencilTestEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetStencilTestEnableEXT( m_commandBuffer, static_cast( stencilTestEnable ) ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setStencilWriteMask( VULKAN_HPP_NAMESPACE::StencilFaceFlags faceMask, uint32_t writeMask, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetStencilWriteMask( m_commandBuffer, static_cast( faceMask ), writeMask ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pViewports ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewport( uint32_t firstViewport, ArrayProxy const & viewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewport( m_commandBuffer, firstViewport, viewports.size(), reinterpret_cast( viewports.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ShadingRatePaletteNV* pShadingRatePalettes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pShadingRatePalettes ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportShadingRatePaletteNV( uint32_t firstViewport, ArrayProxy const & shadingRatePalettes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportShadingRatePaletteNV( m_commandBuffer, firstViewport, shadingRatePalettes.size(), reinterpret_cast( shadingRatePalettes.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::ViewportWScalingNV* pViewportWScalings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportCount, reinterpret_cast( pViewportWScalings ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWScalingNV( uint32_t firstViewport, ArrayProxy const & viewportWScalings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportWScalingNV( m_commandBuffer, firstViewport, viewportWScalings.size(), reinterpret_cast( viewportWScalings.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( uint32_t viewportCount, const VULKAN_HPP_NAMESPACE::Viewport* pViewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewportCount, reinterpret_cast( pViewports ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::setViewportWithCountEXT( ArrayProxy const & viewports, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdSetViewportWithCountEXT( m_commandBuffer, viewports.size(), reinterpret_cast( viewports.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast( pRaygenShaderBindingTable ), reinterpret_cast( pMissShaderBindingTable ), reinterpret_cast( pHitShaderBindingTable ), reinterpret_cast( pCallableShaderBindingTable ), static_cast( indirectDeviceAddress ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysIndirectKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, VULKAN_HPP_NAMESPACE::DeviceAddress indirectDeviceAddress, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdTraceRaysIndirectKHR( m_commandBuffer, reinterpret_cast( &raygenShaderBindingTable ), reinterpret_cast( &missShaderBindingTable ), reinterpret_cast( &hitShaderBindingTable ), reinterpret_cast( &callableShaderBindingTable ), static_cast( indirectDeviceAddress ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VULKAN_HPP_NAMESPACE::StridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast( pRaygenShaderBindingTable ), reinterpret_cast( pMissShaderBindingTable ), reinterpret_cast( pHitShaderBindingTable ), reinterpret_cast( pCallableShaderBindingTable ), width, height, depth ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysKHR( const StridedDeviceAddressRegionKHR & raygenShaderBindingTable, const StridedDeviceAddressRegionKHR & missShaderBindingTable, const StridedDeviceAddressRegionKHR & hitShaderBindingTable, const StridedDeviceAddressRegionKHR & callableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdTraceRaysKHR( m_commandBuffer, reinterpret_cast( &raygenShaderBindingTable ), reinterpret_cast( &missShaderBindingTable ), reinterpret_cast( &hitShaderBindingTable ), reinterpret_cast( &callableShaderBindingTable ), width, height, depth ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::traceRaysNV( VULKAN_HPP_NAMESPACE::Buffer raygenShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize raygenShaderBindingOffset, VULKAN_HPP_NAMESPACE::Buffer missShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize missShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer hitShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize hitShaderBindingStride, VULKAN_HPP_NAMESPACE::Buffer callableShaderBindingTableBuffer, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingOffset, VULKAN_HPP_NAMESPACE::DeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdTraceRaysNV( m_commandBuffer, static_cast( raygenShaderBindingTableBuffer ), static_cast( raygenShaderBindingOffset ), static_cast( missShaderBindingTableBuffer ), static_cast( missShaderBindingOffset ), static_cast( missShaderBindingStride ), static_cast( hitShaderBindingTableBuffer ), static_cast( hitShaderBindingOffset ), static_cast( hitShaderBindingStride ), static_cast( callableShaderBindingTableBuffer ), static_cast( callableShaderBindingOffset ), static_cast( callableShaderBindingStride ), width, height, depth ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, VULKAN_HPP_NAMESPACE::DeviceSize dataSize, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdUpdateBuffer( m_commandBuffer, static_cast( dstBuffer ), static_cast( dstOffset ), static_cast( dataSize ), pData ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::updateBuffer( VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, ArrayProxy const & data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdUpdateBuffer( m_commandBuffer, static_cast( dstBuffer ), static_cast( dstOffset ), data.size() * sizeof( T ), reinterpret_cast( data.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( uint32_t eventCount, const VULKAN_HPP_NAMESPACE::Event* pEvents, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VULKAN_HPP_NAMESPACE::MemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::BufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VULKAN_HPP_NAMESPACE::ImageMemoryBarrier* pImageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWaitEvents( m_commandBuffer, eventCount, reinterpret_cast( pEvents ), static_cast( srcStageMask ), static_cast( dstStageMask ), memoryBarrierCount, reinterpret_cast( pMemoryBarriers ), bufferMemoryBarrierCount, reinterpret_cast( pBufferMemoryBarriers ), imageMemoryBarrierCount, reinterpret_cast( pImageMemoryBarriers ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::waitEvents( ArrayProxy const & events, VULKAN_HPP_NAMESPACE::PipelineStageFlags srcStageMask, VULKAN_HPP_NAMESPACE::PipelineStageFlags dstStageMask, ArrayProxy const & memoryBarriers, ArrayProxy const & bufferMemoryBarriers, ArrayProxy const & imageMemoryBarriers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWaitEvents( m_commandBuffer, events.size(), reinterpret_cast( events.data() ), static_cast( srcStageMask ), static_cast( dstStageMask ), memoryBarriers.size(), reinterpret_cast( memoryBarriers.data() ), bufferMemoryBarriers.size(), reinterpret_cast( bufferMemoryBarriers.data() ), imageMemoryBarriers.size(), reinterpret_cast( imageMemoryBarriers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructureCount, reinterpret_cast( pAccelerationStructures ), static_cast( queryType ), static_cast( queryPool ), firstQuery ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteAccelerationStructuresPropertiesKHR( m_commandBuffer, accelerationStructures.size(), reinterpret_cast( accelerationStructures.data() ), static_cast( queryType ), static_cast( queryPool ), firstQuery ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructureCount, reinterpret_cast( pAccelerationStructures ), static_cast( queryType ), static_cast( queryPool ), firstQuery ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void CommandBuffer::writeAccelerationStructuresPropertiesNV( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteAccelerationStructuresPropertiesNV( m_commandBuffer, accelerationStructures.size(), reinterpret_cast( accelerationStructures.data() ), static_cast( queryType ), static_cast( queryPool ), firstQuery ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void CommandBuffer::writeBufferMarkerAMD( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::Buffer dstBuffer, VULKAN_HPP_NAMESPACE::DeviceSize dstOffset, uint32_t marker, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteBufferMarkerAMD( m_commandBuffer, static_cast( pipelineStage ), static_cast( dstBuffer ), static_cast( dstOffset ), marker ); + } + + + template + VULKAN_HPP_INLINE void CommandBuffer::writeTimestamp( VULKAN_HPP_NAMESPACE::PipelineStageFlagBits pipelineStage, VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t query, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkCmdWriteTimestamp( m_commandBuffer, static_cast( pipelineStage ), static_cast( queryPool ), query ); + } + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::end( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEndCommandBuffer( m_commandBuffer ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::end( Dispatch const & d ) const + { + Result result = static_cast( d.vkEndCommandBuffer( m_commandBuffer ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::end" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkResetCommandBuffer( m_commandBuffer, static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type CommandBuffer::reset( VULKAN_HPP_NAMESPACE::CommandBufferResetFlags flags, Dispatch const & d ) const + { + Result result = static_cast( d.vkResetCommandBuffer( m_commandBuffer, static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::CommandBuffer::reset" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquireFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::acquireFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + Result result = static_cast( d.vkAcquireFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireFullScreenExclusiveModeEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImage2KHR( const VULKAN_HPP_NAMESPACE::AcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast( pAcquireInfo ), pImageIndex ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::acquireNextImage2KHR( const AcquireNextImageInfoKHR & acquireInfo, Dispatch const & d ) const + { + uint32_t imageIndex; + Result result = static_cast( d.vkAcquireNextImage2KHR( m_device, reinterpret_cast( &acquireInfo ), &imageIndex ) ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImage2KHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eNotReady, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, uint32_t* pImageIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquireNextImageKHR( m_device, static_cast( swapchain ), timeout, static_cast( semaphore ), static_cast( fence ), pImageIndex ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::acquireNextImageKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint64_t timeout, VULKAN_HPP_NAMESPACE::Semaphore semaphore, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const + { + uint32_t imageIndex; + Result result = static_cast( d.vkAcquireNextImageKHR( m_device, static_cast( swapchain ), timeout, static_cast( semaphore ), static_cast( fence ), &imageIndex ) ); + return createResultValue( result, imageIndex, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireNextImageKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout, VULKAN_HPP_NAMESPACE::Result::eNotReady, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquirePerformanceConfigurationINTEL( const VULKAN_HPP_NAMESPACE::PerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL* pConfiguration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast( pAcquireInfo ), reinterpret_cast< VkPerformanceConfigurationINTEL *>( pConfiguration ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::acquirePerformanceConfigurationINTEL( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration; + Result result = static_cast( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast( &acquireInfo ), reinterpret_cast( &configuration ) ) ); + return createResultValue( result, configuration, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTEL" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::acquirePerformanceConfigurationINTELUnique( const PerformanceConfigurationAcquireInfoINTEL & acquireInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration; + Result result = static_cast( d.vkAcquirePerformanceConfigurationINTEL( m_device, reinterpret_cast( &acquireInfo ), reinterpret_cast( &configuration ) ) ); + ObjectRelease deleter( *this, d ); + return createResultValue( result, configuration, VULKAN_HPP_NAMESPACE_STRING "::Device::acquirePerformanceConfigurationINTELUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::acquireProfilingLockKHR( const VULKAN_HPP_NAMESPACE::AcquireProfilingLockInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::acquireProfilingLockKHR( const AcquireProfilingLockInfoKHR & info, Dispatch const & d ) const + { + Result result = static_cast( d.vkAcquireProfilingLockKHR( m_device, reinterpret_cast( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::acquireProfilingLockKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateCommandBuffers( const VULKAN_HPP_NAMESPACE::CommandBufferAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast< VkCommandBuffer *>( pCommandBuffers ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d ) const + { + std::vector commandBuffers( allocateInfo.commandBufferCount ); + Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); + return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateCommandBuffers( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d ) const + { + std::vector commandBuffers( allocateInfo.commandBufferCount, commandBufferAllocator ); + Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); + return createResultValue( result, commandBuffers, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffers" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, CommandBufferAllocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, Dispatch const & d ) const + { + std::vector, CommandBufferAllocator> uniqueCommandBuffers; + std::vector commandBuffers( allocateInfo.commandBufferCount ); + Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); + PoolFree deleter( *this, allocateInfo.commandPool, d ); + for ( size_t i=0; i < allocateInfo.commandBufferCount; i++ ) + { + uniqueCommandBuffers.push_back( UniqueHandle( commandBuffers[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueCommandBuffers ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, CommandBufferAllocator>>::type Device::allocateCommandBuffersUnique( const CommandBufferAllocateInfo & allocateInfo, CommandBufferAllocator & commandBufferAllocator, Dispatch const & d ) const + { + std::vector, CommandBufferAllocator> uniqueCommandBuffers( commandBufferAllocator ); + std::vector commandBuffers( allocateInfo.commandBufferCount ); + Result result = static_cast( d.vkAllocateCommandBuffers( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( commandBuffers.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueCommandBuffers.reserve( allocateInfo.commandBufferCount ); + PoolFree deleter( *this, allocateInfo.commandPool, d ); + for ( size_t i=0; i < allocateInfo.commandBufferCount; i++ ) + { + uniqueCommandBuffers.push_back( UniqueHandle( commandBuffers[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueCommandBuffers ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateCommandBuffersUnique" ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateDescriptorSets( const VULKAN_HPP_NAMESPACE::DescriptorSetAllocateInfo* pAllocateInfo, VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast< VkDescriptorSet *>( pDescriptorSets ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d ) const + { + std::vector descriptorSets( allocateInfo.descriptorSetCount ); + Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateDescriptorSets( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d ) const + { + std::vector descriptorSets( allocateInfo.descriptorSetCount, descriptorSetAllocator ); + Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, descriptorSets, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSets" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, DescriptorSetAllocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, Dispatch const & d ) const + { + std::vector, DescriptorSetAllocator> uniqueDescriptorSets; + std::vector descriptorSets( allocateInfo.descriptorSetCount ); + Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); + PoolFree deleter( *this, allocateInfo.descriptorPool, d ); + for ( size_t i=0; i < allocateInfo.descriptorSetCount; i++ ) + { + uniqueDescriptorSets.push_back( UniqueHandle( descriptorSets[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueDescriptorSets ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, DescriptorSetAllocator>>::type Device::allocateDescriptorSetsUnique( const DescriptorSetAllocateInfo & allocateInfo, DescriptorSetAllocator & descriptorSetAllocator, Dispatch const & d ) const + { + std::vector, DescriptorSetAllocator> uniqueDescriptorSets( descriptorSetAllocator ); + std::vector descriptorSets( allocateInfo.descriptorSetCount ); + Result result = static_cast( d.vkAllocateDescriptorSets( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( descriptorSets.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueDescriptorSets.reserve( allocateInfo.descriptorSetCount ); + PoolFree deleter( *this, allocateInfo.descriptorPool, d ); + for ( size_t i=0; i < allocateInfo.descriptorSetCount; i++ ) + { + uniqueDescriptorSets.push_back( UniqueHandle( descriptorSets[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueDescriptorSets ), VULKAN_HPP_NAMESPACE_STRING "::Device::allocateDescriptorSetsUnique" ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::allocateMemory( const VULKAN_HPP_NAMESPACE::MemoryAllocateInfo* pAllocateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeviceMemory* pMemory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( pAllocateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDeviceMemory *>( pMemory ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::allocateMemory( const MemoryAllocateInfo & allocateInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeviceMemory memory; + Result result = static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); + return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemory" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::allocateMemoryUnique( const MemoryAllocateInfo & allocateInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeviceMemory memory; + Result result = static_cast( d.vkAllocateMemory( m_device, reinterpret_cast( &allocateInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &memory ) ) ); + ObjectFree deleter( *this, allocator, d ); + return createResultValue( result, memory, VULKAN_HPP_NAMESPACE_STRING "::Device::allocateMemoryUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindAccelerationStructureMemoryNV( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindAccelerationStructureMemoryInfoNV* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindAccelerationStructureMemoryNV( ArrayProxy const & bindInfos, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindAccelerationStructureMemoryNV( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindAccelerationStructureMemoryNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindBufferMemory( m_device, static_cast( buffer ), static_cast( memory ), static_cast( memoryOffset ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindBufferMemory( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindBufferMemory( m_device, static_cast( buffer ), static_cast( memory ), static_cast( memoryOffset ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindBufferMemory2( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindBufferMemory2( ArrayProxy const & bindInfos, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindBufferMemory2( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindBufferMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindBufferMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindBufferMemory2KHR( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindBufferMemory2KHR( ArrayProxy const & bindInfos, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindBufferMemory2KHR( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindBufferMemory2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindImageMemory( m_device, static_cast( image ), static_cast( memory ), static_cast( memoryOffset ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindImageMemory( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize memoryOffset, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindImageMemory( m_device, static_cast( image ), static_cast( memory ), static_cast( memoryOffset ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindImageMemory2( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindImageMemory2( ArrayProxy const & bindInfos, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindImageMemory2( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::bindImageMemory2KHR( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindImageMemoryInfo* pBindInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBindImageMemory2KHR( m_device, bindInfoCount, reinterpret_cast( pBindInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::bindImageMemory2KHR( ArrayProxy const & bindInfos, Dispatch const & d ) const + { + Result result = static_cast( d.vkBindImageMemory2KHR( m_device, bindInfos.size(), reinterpret_cast( bindInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::bindImageMemory2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, uint32_t infoCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pInfos, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildRangeInfoKHR* const * ppBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkBuildAccelerationStructuresKHR( m_device, static_cast( deferredOperation ), infoCount, reinterpret_cast( pInfos ), reinterpret_cast( ppBuildRangeInfos ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::buildAccelerationStructuresKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, ArrayProxy const & infos, ArrayProxy const & pBuildRangeInfos, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( infos.size() == pBuildRangeInfos.size() ); +#else + if ( infos.size() != pBuildRangeInfos.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR: infos.size() != pBuildRangeInfos.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + Result result = static_cast( d.vkBuildAccelerationStructuresKHR( m_device, static_cast( deferredOperation ), infos.size(), reinterpret_cast( infos.data() ), reinterpret_cast( pBuildRangeInfos.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::buildAccelerationStructuresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCompileDeferredNV( m_device, static_cast( pipeline ), shader ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::compileDeferredNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t shader, Dispatch const & d ) const + { + Result result = static_cast( d.vkCompileDeferredNV( m_device, static_cast( pipeline ), shader ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::compileDeferredNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCopyAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureInfoKHR & info, Dispatch const & d ) const + { + Result result = static_cast( d.vkCopyAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyAccelerationStructureToMemoryInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCopyAccelerationStructureToMemoryKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyAccelerationStructureToMemoryKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyAccelerationStructureToMemoryInfoKHR & info, Dispatch const & d ) const + { + Result result = static_cast( d.vkCopyAccelerationStructureToMemoryKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyAccelerationStructureToMemoryKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const VULKAN_HPP_NAMESPACE::CopyMemoryToAccelerationStructureInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCopyMemoryToAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::copyMemoryToAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, const CopyMemoryToAccelerationStructureInfoKHR & info, Dispatch const & d ) const + { + Result result = static_cast( d.vkCopyMemoryToAccelerationStructureKHR( m_device, static_cast( deferredOperation ), reinterpret_cast( &info ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::copyMemoryToAccelerationStructureKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createAccelerationStructureKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructure, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkAccelerationStructureKHR *>( pAccelerationStructure ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createAccelerationStructureKHR( const AccelerationStructureCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createAccelerationStructureKHRUnique( const AccelerationStructureCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createAccelerationStructureNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::AccelerationStructureNV* pAccelerationStructure, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkAccelerationStructureNV *>( pAccelerationStructure ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::createAccelerationStructureNV( const AccelerationStructureCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNV" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createAccelerationStructureNVUnique( const AccelerationStructureCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure; + Result result = static_cast( d.vkCreateAccelerationStructureNV( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &accelerationStructure ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, accelerationStructure, VULKAN_HPP_NAMESPACE_STRING "::Device::createAccelerationStructureNVUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBuffer( const VULKAN_HPP_NAMESPACE::BufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Buffer* pBuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkBuffer *>( pBuffer ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createBuffer( const BufferCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Buffer buffer; + Result result = static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createBuffer" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createBufferUnique( const BufferCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Buffer buffer; + Result result = static_cast( d.vkCreateBuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &buffer ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createBufferView( const VULKAN_HPP_NAMESPACE::BufferViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::BufferView* pView, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkBufferView *>( pView ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createBufferView( const BufferViewCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::BufferView view; + Result result = static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferView" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createBufferViewUnique( const BufferViewCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::BufferView view; + Result result = static_cast( d.vkCreateBufferView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createBufferViewUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createCommandPool( const VULKAN_HPP_NAMESPACE::CommandPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::CommandPool* pCommandPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkCommandPool *>( pCommandPool ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createCommandPool( const CommandPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::CommandPool commandPool; + Result result = static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); + return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPool" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createCommandPoolUnique( const CommandPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::CommandPool commandPool; + Result result = static_cast( d.vkCreateCommandPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &commandPool ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, commandPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createCommandPoolUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::ComputePipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createComputePipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::createComputePipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipeline", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines; + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createComputePipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createComputePipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const ComputePipelineCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateComputePipelines( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createComputePipelineUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDeferredOperationKHR( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DeferredOperationKHR* pDeferredOperation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast( pAllocator ), reinterpret_cast< VkDeferredOperationKHR *>( pDeferredOperation ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::createDeferredOperationKHR( Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation; + Result result = static_cast( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &deferredOperation ) ) ); + return createResultValue( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDeferredOperationKHRUnique( Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation; + Result result = static_cast( d.vkCreateDeferredOperationKHR( m_device, reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &deferredOperation ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, deferredOperation, VULKAN_HPP_NAMESPACE_STRING "::Device::createDeferredOperationKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorPool( const VULKAN_HPP_NAMESPACE::DescriptorPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorPool* pDescriptorPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDescriptorPool *>( pDescriptorPool ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createDescriptorPool( const DescriptorPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool; + Result result = static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); + return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPool" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorPoolUnique( const DescriptorPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool; + Result result = static_cast( d.vkCreateDescriptorPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorPool ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorPoolUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorSetLayout( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorSetLayout* pSetLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDescriptorSetLayout *>( pSetLayout ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createDescriptorSetLayout( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorSetLayout setLayout; + Result result = static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); + return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayout" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorSetLayoutUnique( const DescriptorSetLayoutCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorSetLayout setLayout; + Result result = static_cast( d.vkCreateDescriptorSetLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &setLayout ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, setLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorSetLayoutUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplate( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDescriptorUpdateTemplate *>( pDescriptorUpdateTemplate ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createDescriptorUpdateTemplate( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplate" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorUpdateTemplateUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplate( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createDescriptorUpdateTemplateKHR( const VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplateCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate* pDescriptorUpdateTemplate, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDescriptorUpdateTemplate *>( pDescriptorUpdateTemplate ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createDescriptorUpdateTemplateKHR( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createDescriptorUpdateTemplateKHRUnique( const DescriptorUpdateTemplateCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate; + Result result = static_cast( d.vkCreateDescriptorUpdateTemplateKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &descriptorUpdateTemplate ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, descriptorUpdateTemplate, VULKAN_HPP_NAMESPACE_STRING "::Device::createDescriptorUpdateTemplateKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createEvent( const VULKAN_HPP_NAMESPACE::EventCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Event* pEvent, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateEvent( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkEvent *>( pEvent ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createEvent( const EventCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Event event; + Result result = static_cast( d.vkCreateEvent( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); + return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING "::Device::createEvent" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createEventUnique( const EventCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Event event; + Result result = static_cast( d.vkCreateEvent( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &event ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, event, VULKAN_HPP_NAMESPACE_STRING "::Device::createEventUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFence( const VULKAN_HPP_NAMESPACE::FenceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateFence( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createFence( const FenceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkCreateFence( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::createFence" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createFenceUnique( const FenceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkCreateFence( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::createFenceUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createFramebuffer( const VULKAN_HPP_NAMESPACE::FramebufferCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Framebuffer* pFramebuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkFramebuffer *>( pFramebuffer ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createFramebuffer( const FramebufferCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Framebuffer framebuffer; + Result result = static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); + return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebuffer" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createFramebufferUnique( const FramebufferCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Framebuffer framebuffer; + Result result = static_cast( d.vkCreateFramebuffer( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &framebuffer ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, framebuffer, VULKAN_HPP_NAMESPACE_STRING "::Device::createFramebufferUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::GraphicsPipelineCreateInfo* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createGraphicsPipelines( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelines", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::createGraphicsPipeline( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipeline", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines; + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createGraphicsPipelinesUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelinesUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createGraphicsPipelineUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const GraphicsPipelineCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateGraphicsPipelines( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createGraphicsPipelineUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImage( const VULKAN_HPP_NAMESPACE::ImageCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Image* pImage, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateImage( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkImage *>( pImage ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createImage( const ImageCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Image image; + Result result = static_cast( d.vkCreateImage( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); + return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING "::Device::createImage" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createImageUnique( const ImageCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Image image; + Result result = static_cast( d.vkCreateImage( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &image ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, image, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createImageView( const VULKAN_HPP_NAMESPACE::ImageViewCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ImageView* pView, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateImageView( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkImageView *>( pView ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createImageView( const ImageViewCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageView view; + Result result = static_cast( d.vkCreateImageView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageView" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createImageViewUnique( const ImageViewCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageView view; + Result result = static_cast( d.vkCreateImageView( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &view ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, view, VULKAN_HPP_NAMESPACE_STRING "::Device::createImageViewUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createIndirectCommandsLayoutNV( const VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV* pIndirectCommandsLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkIndirectCommandsLayoutNV *>( pIndirectCommandsLayout ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createIndirectCommandsLayoutNV( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout; + Result result = static_cast( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); + return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNV" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createIndirectCommandsLayoutNVUnique( const IndirectCommandsLayoutCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout; + Result result = static_cast( d.vkCreateIndirectCommandsLayoutNV( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &indirectCommandsLayout ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, indirectCommandsLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createIndirectCommandsLayoutNVUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineCache( const VULKAN_HPP_NAMESPACE::PipelineCacheCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineCache* pPipelineCache, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipelineCache *>( pPipelineCache ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createPipelineCache( const PipelineCacheCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache; + Result result = static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); + return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCache" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createPipelineCacheUnique( const PipelineCacheCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache; + Result result = static_cast( d.vkCreatePipelineCache( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineCache ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipelineCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineCacheUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPipelineLayout( const VULKAN_HPP_NAMESPACE::PipelineLayoutCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PipelineLayout* pPipelineLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipelineLayout *>( pPipelineLayout ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createPipelineLayout( const PipelineLayoutCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout; + Result result = static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); + return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayout" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createPipelineLayoutUnique( const PipelineLayoutCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout; + Result result = static_cast( d.vkCreatePipelineLayout( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipelineLayout ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipelineLayout, VULKAN_HPP_NAMESPACE_STRING "::Device::createPipelineLayoutUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createPrivateDataSlotEXT( const VULKAN_HPP_NAMESPACE::PrivateDataSlotCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT* pPrivateDataSlot, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPrivateDataSlotEXT *>( pPrivateDataSlot ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::createPrivateDataSlotEXT( const PrivateDataSlotCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot; + Result result = static_cast( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); + return createResultValue( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createPrivateDataSlotEXTUnique( const PrivateDataSlotCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot; + Result result = static_cast( d.vkCreatePrivateDataSlotEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &privateDataSlot ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, privateDataSlot, VULKAN_HPP_NAMESPACE_STRING "::Device::createPrivateDataSlotEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createQueryPool( const VULKAN_HPP_NAMESPACE::QueryPoolCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::QueryPool* pQueryPool, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkQueryPool *>( pQueryPool ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createQueryPool( const QueryPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::QueryPool queryPool; + Result result = static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); + return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPool" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createQueryPoolUnique( const QueryPoolCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::QueryPool queryPool; + Result result = static_cast( d.vkCreateQueryPool( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &queryPool ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, queryPool, VULKAN_HPP_NAMESPACE_STRING "::Device::createQueryPoolUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelinesKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::createRayTracingPipelineKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines; + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createRayTracingPipelinesKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelineKHRUnique( VULKAN_HPP_NAMESPACE::DeferredOperationKHR deferredOperation, VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRayTracingPipelinesKHR( m_device, static_cast( deferredOperation ), static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineKHRUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eOperationDeferredKHR, VULKAN_HPP_NAMESPACE::Result::eOperationNotDeferredKHR, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, uint32_t createInfoCount, const VULKAN_HPP_NAMESPACE::RayTracingPipelineCreateInfoNV* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Pipeline* pPipelines, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), createInfoCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast< VkPipeline *>( pPipelines ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelinesNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector pipelines( createInfos.size(), pipelineAllocator ); + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + return createResultValue( result, pipelines, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::createRayTracingPipelineNV( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNV", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines; + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue, PipelineAllocator>> Device::createRayTracingPipelinesNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, ArrayProxy const & createInfos, Optional allocator, PipelineAllocator & pipelineAllocator, Dispatch const & d ) const + { + std::vector, PipelineAllocator> uniquePipelines( pipelineAllocator ); + std::vector pipelines( createInfos.size() ); + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( pipelines.data() ) ) ); + if ( ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess )|| ( result == VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT ) ) + { + uniquePipelines.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniquePipelines.push_back( UniqueHandle( pipelines[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniquePipelines ), VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelinesNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::createRayTracingPipelineNVUnique( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const RayTracingPipelineCreateInfoNV & createInfo, Optional allocator, Dispatch const & d ) const + { + Pipeline pipeline; + Result result = static_cast( d.vkCreateRayTracingPipelinesNV( m_device, static_cast( pipelineCache ), 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &pipeline ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, pipeline, VULKAN_HPP_NAMESPACE_STRING "::Device::createRayTracingPipelineNVUnique", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::ePipelineCompileRequiredEXT }, deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createRenderPass( const RenderPassCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRenderPassUnique( const RenderPassCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPassUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateRenderPass2( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createRenderPass2( const RenderPassCreateInfo2 & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRenderPass2Unique( const RenderPassCreateInfo2 & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2Unique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createRenderPass2KHR( const VULKAN_HPP_NAMESPACE::RenderPassCreateInfo2* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::RenderPass* pRenderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkRenderPass *>( pRenderPass ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createRenderPass2KHR( const RenderPassCreateInfo2 & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createRenderPass2KHRUnique( const RenderPassCreateInfo2 & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RenderPass renderPass; + Result result = static_cast( d.vkCreateRenderPass2KHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &renderPass ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, renderPass, VULKAN_HPP_NAMESPACE_STRING "::Device::createRenderPass2KHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSampler( const VULKAN_HPP_NAMESPACE::SamplerCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Sampler* pSampler, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSampler( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSampler *>( pSampler ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSampler( const SamplerCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Sampler sampler; + Result result = static_cast( d.vkCreateSampler( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); + return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING "::Device::createSampler" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerUnique( const SamplerCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Sampler sampler; + Result result = static_cast( d.vkCreateSampler( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &sampler ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, sampler, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversion( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSamplerYcbcrConversion *>( pYcbcrConversion ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSamplerYcbcrConversion( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversion" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerYcbcrConversionUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversion( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSamplerYcbcrConversionKHR( const VULKAN_HPP_NAMESPACE::SamplerYcbcrConversionCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion* pYcbcrConversion, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSamplerYcbcrConversion *>( pYcbcrConversion ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSamplerYcbcrConversionKHR( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSamplerYcbcrConversionKHRUnique( const SamplerYcbcrConversionCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion; + Result result = static_cast( d.vkCreateSamplerYcbcrConversionKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &ycbcrConversion ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, ycbcrConversion, VULKAN_HPP_NAMESPACE_STRING "::Device::createSamplerYcbcrConversionKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Semaphore* pSemaphore, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSemaphore *>( pSemaphore ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSemaphore( const SemaphoreCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Semaphore semaphore; + Result result = static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); + return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphore" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSemaphoreUnique( const SemaphoreCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Semaphore semaphore; + Result result = static_cast( d.vkCreateSemaphore( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &semaphore ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, semaphore, VULKAN_HPP_NAMESPACE_STRING "::Device::createSemaphoreUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createShaderModule( const VULKAN_HPP_NAMESPACE::ShaderModuleCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ShaderModule* pShaderModule, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkShaderModule *>( pShaderModule ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createShaderModule( const ShaderModuleCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ShaderModule shaderModule; + Result result = static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); + return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModule" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createShaderModuleUnique( const ShaderModuleCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ShaderModule shaderModule; + Result result = static_cast( d.vkCreateShaderModule( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &shaderModule ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, shaderModule, VULKAN_HPP_NAMESPACE_STRING "::Device::createShaderModuleUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSharedSwapchainsKHR( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfos, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSharedSwapchainsKHR( m_device, swapchainCount, reinterpret_cast( pCreateInfos ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSwapchainKHR *>( pSwapchains ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSharedSwapchainsKHR( ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector swapchains( createInfos.size() ); + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); + return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSharedSwapchainsKHR( ArrayProxy const & createInfos, Optional allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d ) const + { + std::vector swapchains( createInfos.size(), swapchainKHRAllocator ); + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); + return createResultValue( result, swapchains, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSharedSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy const & createInfos, Optional allocator, Dispatch const & d ) const + { + std::vector, SwapchainKHRAllocator> uniqueSwapchains; + std::vector swapchains( createInfos.size() ); + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueSwapchains.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniqueSwapchains.push_back( UniqueHandle( swapchains[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueSwapchains ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + } + + template >::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, SwapchainKHRAllocator>>::type Device::createSharedSwapchainsKHRUnique( ArrayProxy const & createInfos, Optional allocator, SwapchainKHRAllocator & swapchainKHRAllocator, Dispatch const & d ) const + { + std::vector, SwapchainKHRAllocator> uniqueSwapchains( swapchainKHRAllocator ); + std::vector swapchains( createInfos.size() ); + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, createInfos.size(), reinterpret_cast( createInfos.data() ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( swapchains.data() ) ) ); + if ( result == VULKAN_HPP_NAMESPACE::Result::eSuccess ) + { + uniqueSwapchains.reserve( createInfos.size() ); + ObjectDestroy deleter( *this, allocator, d ); + for ( size_t i=0; i < createInfos.size(); i++ ) + { + uniqueSwapchains.push_back( UniqueHandle( swapchains[i], deleter ) ); + } + } + return createResultValue( result, std::move( uniqueSwapchains ), VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainsKHRUnique" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSharedSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSharedSwapchainsKHR( m_device, 1, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSharedSwapchainKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createSwapchainKHR( const VULKAN_HPP_NAMESPACE::SwapchainCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSwapchainKHR *>( pSwapchain ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::createSwapchainKHR( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::createSwapchainKHRUnique( const SwapchainCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain; + Result result = static_cast( d.vkCreateSwapchainKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &swapchain ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, swapchain, VULKAN_HPP_NAMESPACE_STRING "::Device::createSwapchainKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::createValidationCacheEXT( const VULKAN_HPP_NAMESPACE::ValidationCacheCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pValidationCache, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkValidationCacheEXT *>( pValidationCache ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::createValidationCacheEXT( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache; + Result result = static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); + return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::createValidationCacheEXTUnique( const ValidationCacheCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache; + Result result = static_cast( d.vkCreateValidationCacheEXT( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &validationCache ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, validationCache, VULKAN_HPP_NAMESPACE_STRING "::Device::createValidationCacheEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectNameInfoEXT* pNameInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast( pNameInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::debugMarkerSetObjectNameEXT( const DebugMarkerObjectNameInfoEXT & nameInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkDebugMarkerSetObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectNameEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::debugMarkerSetObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugMarkerObjectTagInfoEXT* pTagInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast( pTagInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::debugMarkerSetObjectTagEXT( const DebugMarkerObjectTagInfoEXT & tagInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkDebugMarkerSetObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::debugMarkerSetObjectTagEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkDeferredOperationJoinKHR( m_device, static_cast( operation ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::deferredOperationJoinKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const + { + Result result = static_cast( d.vkDeferredOperationJoinKHR( m_device, static_cast( operation ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::deferredOperationJoinKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eThreadDoneKHR, VULKAN_HPP_NAMESPACE::Result::eThreadIdleKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureKHR( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureKHR( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureKHR( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureKHR accelerationStructure, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureKHR( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyAccelerationStructureNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast( accelerationStructure ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyAccelerationStructureNV( m_device, static_cast( accelerationStructure ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyBuffer( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Buffer buffer, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBuffer( m_device, static_cast( buffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyBufferView( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::BufferView bufferView, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyBufferView( m_device, static_cast( bufferView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::CommandPool commandPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyCommandPool( m_device, static_cast( commandPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDeferredOperationKHR( m_device, static_cast( operation ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDeferredOperationKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDeferredOperationKHR( m_device, static_cast( operation ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDeferredOperationKHR( m_device, static_cast( operation ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDeferredOperationKHR( m_device, static_cast( operation ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorPool( m_device, static_cast( descriptorPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorSetLayout( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorSetLayout descriptorSetLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorSetLayout( m_device, static_cast( descriptorSetLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplate( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyDescriptorUpdateTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplateKHR( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDescriptorUpdateTemplate( m_device, static_cast( descriptorUpdateTemplate ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDevice( m_device, reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDevice( m_device, reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyEvent( VULKAN_HPP_NAMESPACE::Event event, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Event event, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyEvent( m_device, static_cast( event ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyFence( VULKAN_HPP_NAMESPACE::Fence fence, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Fence fence, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFence( m_device, static_cast( fence ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyFramebuffer( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Framebuffer framebuffer, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyFramebuffer( m_device, static_cast( framebuffer ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyImage( VULKAN_HPP_NAMESPACE::Image image, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Image image, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImage( m_device, static_cast( image ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyImageView( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ImageView imageView, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyImageView( m_device, static_cast( imageView ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyIndirectCommandsLayoutNV( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV indirectCommandsLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyIndirectCommandsLayoutNV( m_device, static_cast( indirectCommandsLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipeline( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Pipeline pipeline, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipeline( m_device, static_cast( pipeline ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipelineCache( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineCache( m_device, static_cast( pipelineCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPipelineLayout( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PipelineLayout pipelineLayout, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPipelineLayout( m_device, static_cast( pipelineLayout ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast( privateDataSlot ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyPrivateDataSlotEXT( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast( privateDataSlot ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast( privateDataSlot ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyPrivateDataSlotEXT( m_device, static_cast( privateDataSlot ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::QueryPool queryPool, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyQueryPool( m_device, static_cast( queryPool ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyRenderPass( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyRenderPass( m_device, static_cast( renderPass ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySampler( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Sampler sampler, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySampler( m_device, static_cast( sampler ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversion( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySamplerYcbcrConversionKHR( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversionKHR( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion ycbcrConversion, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySamplerYcbcrConversion( m_device, static_cast( ycbcrConversion ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySemaphore( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySemaphore( m_device, static_cast( semaphore ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyShaderModule( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ShaderModule shaderModule, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyShaderModule( m_device, static_cast( shaderModule ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroySwapchainKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySwapchainKHR( m_device, static_cast( swapchain ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroyValidationCacheEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::destroy( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyValidationCacheEXT( m_device, static_cast( validationCache ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitIdle( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkDeviceWaitIdle( m_device ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::waitIdle( Dispatch const & d ) const + { + Result result = static_cast( d.vkDeviceWaitIdle( m_device ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitIdle" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayPowerInfoEXT* pDisplayPowerInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkDisplayPowerControlEXT( m_device, static_cast( display ), reinterpret_cast( pDisplayPowerInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::displayPowerControlEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayPowerInfoEXT & displayPowerInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkDisplayPowerControlEXT( m_device, static_cast( display ), reinterpret_cast( &displayPowerInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::displayPowerControlEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::flushMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkFlushMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast( pMemoryRanges ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::flushMappedMemoryRanges( ArrayProxy const & memoryRanges, Dispatch const & d ) const + { + Result result = static_cast( d.vkFlushMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::flushMappedMemoryRanges" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::freeCommandBuffers( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBuffers.size(), reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, uint32_t commandBufferCount, const VULKAN_HPP_NAMESPACE::CommandBuffer* pCommandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBufferCount, reinterpret_cast( pCommandBuffers ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::CommandPool commandPool, ArrayProxy const & commandBuffers, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeCommandBuffers( m_device, static_cast( commandPool ), commandBuffers.size(), reinterpret_cast( commandBuffers.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE Result Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSetCount, reinterpret_cast( pDescriptorSets ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::freeDescriptorSets( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy const & descriptorSets, Dispatch const & d ) const + { + Result result = static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSets.size(), reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::freeDescriptorSets" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE Result Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, uint32_t descriptorSetCount, const VULKAN_HPP_NAMESPACE::DescriptorSet* pDescriptorSets, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSetCount, reinterpret_cast( pDescriptorSets ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::free( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, ArrayProxy const & descriptorSets, Dispatch const & d ) const + { + Result result = static_cast( d.vkFreeDescriptorSets( m_device, static_cast( descriptorPool ), descriptorSets.size(), reinterpret_cast( descriptorSets.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::free" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::freeMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::free( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkFreeMemory( m_device, static_cast( memory ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const VULKAN_HPP_NAMESPACE::AccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR* pSizeInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetAccelerationStructureBuildSizesKHR( m_device, static_cast( buildType ), reinterpret_cast( pBuildInfo ), pMaxPrimitiveCounts, reinterpret_cast< VkAccelerationStructureBuildSizesInfoKHR *>( pSizeInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR Device::getAccelerationStructureBuildSizesKHR( VULKAN_HPP_NAMESPACE::AccelerationStructureBuildTypeKHR buildType, const AccelerationStructureBuildGeometryInfoKHR & buildInfo, ArrayProxy const & maxPrimitiveCounts, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( maxPrimitiveCounts.size() == buildInfo.geometryCount ); +#else + if ( maxPrimitiveCounts.size() != buildInfo.geometryCount ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureBuildSizesKHR: maxPrimitiveCounts.size() != buildInfo.geometryCount" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + VULKAN_HPP_NAMESPACE::AccelerationStructureBuildSizesInfoKHR sizeInfo; + d.vkGetAccelerationStructureBuildSizesKHR( m_device, static_cast( buildType ), reinterpret_cast( &buildInfo ), maxPrimitiveCounts.data(), reinterpret_cast( &sizeInfo ) ); + return sizeInfo; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureDeviceAddressInfoKHR* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceAddress Device::getAccelerationStructureAddressKHR( const AccelerationStructureDeviceAddressInfoKHR & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetAccelerationStructureDeviceAddressKHR( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), dataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, ArrayProxy const &data, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), data.size() * sizeof( T ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getAccelerationStructureHandleNV" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, size_t dataSize, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), data.size() * sizeof( T ), reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getAccelerationStructureHandleNV( VULKAN_HPP_NAMESPACE::AccelerationStructureNV accelerationStructure, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkGetAccelerationStructureHandleNV( m_device, static_cast( accelerationStructure ), sizeof( T ), reinterpret_cast( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getAccelerationStructureHandleNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getAccelerationStructureMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::AccelerationStructureMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2KHR *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR memoryRequirements; + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getAccelerationStructureMemoryRequirementsNV( const AccelerationStructureMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2KHR & memoryRequirements = structureChain.template get(); + d.vkGetAccelerationStructureMemoryRequirementsNV( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer* buffer, VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, buffer, reinterpret_cast< VkAndroidHardwareBufferPropertiesANDROID *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID properties; + Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::getAndroidHardwareBufferPropertiesANDROID( const struct AHardwareBuffer & buffer, Dispatch const & d ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::AndroidHardwareBufferPropertiesANDROID & properties = structureChain.template get(); + Result result = static_cast( d.vkGetAndroidHardwareBufferPropertiesANDROID( m_device, &buffer, reinterpret_cast( &properties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::Device::getAndroidHardwareBufferPropertiesANDROID" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetBufferDeviceAddress( m_device, reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddress( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferDeviceAddress( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressEXT( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferDeviceAddressEXT( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast( pInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE DeviceAddress Device::getBufferAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferDeviceAddressKHR( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetBufferMemoryRequirements( m_device, static_cast( buffer ), reinterpret_cast< VkMemoryRequirements *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getBufferMemoryRequirements( VULKAN_HPP_NAMESPACE::Buffer buffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; + d.vkGetBufferMemoryRequirements( m_device, static_cast( buffer ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getBufferMemoryRequirements2( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get(); + d.vkGetBufferMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getBufferMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::BufferMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getBufferMemoryRequirements2KHR( const BufferMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get(); + d.vkGetBufferMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddress( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferOpaqueCaptureAddress( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::BufferDeviceAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE uint64_t Device::getBufferOpaqueCaptureAddressKHR( const BufferDeviceAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetBufferOpaqueCaptureAddressKHR( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getCalibratedTimestampsEXT( uint32_t timestampCount, const VULKAN_HPP_NAMESPACE::CalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetCalibratedTimestampsEXT( m_device, timestampCount, reinterpret_cast( pTimestampInfos ), pTimestamps, pMaxDeviation ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getCalibratedTimestampsEXT( ArrayProxy const ×tampInfos, ArrayProxy const ×tamps, Dispatch const &d ) const + { + #ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( timestampInfos.size() == timestamps.size() ); +#else + if ( timestampInfos.size() != timestamps.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::VkDevice::getCalibratedTimestampsEXT: timestampInfos.size() != timestamps.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + uint64_t maxDeviation; + Result result = static_cast( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size() , reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); + return createResultValue( result, maxDeviation, VULKAN_HPP_NAMESPACE_STRING"::Device::getCalibratedTimestampsEXT" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, uint64_t>>::type Device::getCalibratedTimestampsEXT( ArrayProxy const & timestampInfos, Dispatch const & d ) const + { + std::pair,uint64_t> data( std::piecewise_construct, std::forward_as_tuple( timestampInfos.size() ), std::forward_as_tuple( 0 ) ); + std::vector & timestamps = data.first; + uint64_t & maxDeviation = data.second; + Result result = static_cast( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType, uint64_t>>::type Device::getCalibratedTimestampsEXT( ArrayProxy const & timestampInfos, Uint64_tAllocator & uint64_tAllocator, Dispatch const & d ) const + { + std::pair,uint64_t> data( std::piecewise_construct, std::forward_as_tuple( timestampInfos.size(), uint64_tAllocator ), std::forward_as_tuple( 0 ) ); + std::vector & timestamps = data.first; + uint64_t & maxDeviation = data.second; + Result result = static_cast( d.vkGetCalibratedTimestampsEXT( m_device, timestampInfos.size(), reinterpret_cast( timestampInfos.data() ), timestamps.data(), &maxDeviation ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getCalibratedTimestampsEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE uint32_t Device::getDeferredOperationMaxConcurrencyKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeferredOperationMaxConcurrencyKHR( m_device, static_cast( operation ) ); + } + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDeferredOperationResultKHR( m_device, static_cast( operation ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getDeferredOperationResultKHR( VULKAN_HPP_NAMESPACE::DeferredOperationKHR operation, Dispatch const & d ) const + { + Result result = static_cast( d.vkGetDeferredOperationResultKHR( m_device, static_cast( operation ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getDeferredOperationResultKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupport( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast< VkDescriptorSetLayoutSupport *>( pSupport ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport support; + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return support; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getDescriptorSetLayoutSupport( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport & support = structureChain.template get(); + d.vkGetDescriptorSetLayoutSupport( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getDescriptorSetLayoutSupportKHR( const VULKAN_HPP_NAMESPACE::DescriptorSetLayoutCreateInfo* pCreateInfo, VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport* pSupport, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( pCreateInfo ), reinterpret_cast< VkDescriptorSetLayoutSupport *>( pSupport ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport support; + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return support; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getDescriptorSetLayoutSupportKHR( const DescriptorSetLayoutCreateInfo & createInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::DescriptorSetLayoutSupport & support = structureChain.template get(); + d.vkGetDescriptorSetLayoutSupportKHR( m_device, reinterpret_cast( &createInfo ), reinterpret_cast( &support ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getAccelerationStructureCompatibilityKHR( const VULKAN_HPP_NAMESPACE::AccelerationStructureVersionInfoKHR* pVersionInfo, VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR* pCompatibility, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast( pVersionInfo ), reinterpret_cast< VkAccelerationStructureCompatibilityKHR *>( pCompatibility ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR Device::getAccelerationStructureCompatibilityKHR( const AccelerationStructureVersionInfoKHR & versionInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::AccelerationStructureCompatibilityKHR compatibility; + d.vkGetDeviceAccelerationStructureCompatibilityKHR( m_device, reinterpret_cast( &versionInfo ), reinterpret_cast( &compatibility ) ); + return compatibility; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast< VkPeerMemoryFeatureFlags *>( pPeerMemoryFeatures ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeatures( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags peerMemoryFeatures; + d.vkGetDeviceGroupPeerMemoryFeatures( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( &peerMemoryFeatures ) ); + return peerMemoryFeatures; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags* pPeerMemoryFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast< VkPeerMemoryFeatureFlags *>( pPeerMemoryFeatures ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags Device::getGroupPeerMemoryFeaturesKHR( uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PeerMemoryFeatureFlags peerMemoryFeatures; + d.vkGetDeviceGroupPeerMemoryFeaturesKHR( m_device, heapIndex, localDeviceIndex, remoteDeviceIndex, reinterpret_cast( &peerMemoryFeatures ) ); + return peerMemoryFeatures; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupPresentCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast< VkDeviceGroupPresentCapabilitiesKHR *>( pDeviceGroupPresentCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getGroupPresentCapabilitiesKHR( Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentCapabilitiesKHR deviceGroupPresentCapabilities; + Result result = static_cast( d.vkGetDeviceGroupPresentCapabilitiesKHR( m_device, reinterpret_cast( &deviceGroupPresentCapabilities ) ) ); + return createResultValue( result, deviceGroupPresentCapabilities, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupPresentCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast( pSurfaceInfo ), reinterpret_cast< VkDeviceGroupPresentModeFlagsKHR *>( pModes ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getGroupSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; + Result result = static_cast( d.vkGetDeviceGroupSurfacePresentModes2EXT( m_device, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &modes ) ) ); + return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModes2EXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR* pModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast( surface ), reinterpret_cast< VkDeviceGroupPresentModeFlagsKHR *>( pModes ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getGroupSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DeviceGroupPresentModeFlagsKHR modes; + Result result = static_cast( d.vkGetDeviceGroupSurfacePresentModesKHR( m_device, static_cast( surface ), reinterpret_cast( &modes ) ) ); + return createResultValue( result, modes, VULKAN_HPP_NAMESPACE_STRING "::Device::getGroupSurfacePresentModesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize* pCommittedMemoryInBytes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceMemoryCommitment( m_device, static_cast( memory ), reinterpret_cast< VkDeviceSize *>( pCommittedMemoryInBytes ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::DeviceSize Device::getMemoryCommitment( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::DeviceSize committedMemoryInBytes; + d.vkGetDeviceMemoryCommitment( m_device, static_cast( memory ), reinterpret_cast( &committedMemoryInBytes ) ); + return committedMemoryInBytes; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddress( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceMemoryOpaqueCaptureAddress( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const VULKAN_HPP_NAMESPACE::DeviceMemoryOpaqueCaptureAddressInfo* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE uint64_t Device::getMemoryOpaqueCaptureAddressKHR( const DeviceMemoryOpaqueCaptureAddressInfo & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceMemoryOpaqueCaptureAddressKHR( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const char* pName, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceProcAddr( m_device, pName ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Device::getProcAddr( const std::string & name, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetDeviceProcAddr( m_device, name.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast< VkQueue *>( pQueue ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue( uint32_t queueFamilyIndex, uint32_t queueIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::Queue queue; + d.vkGetDeviceQueue( m_device, queueFamilyIndex, queueIndex, reinterpret_cast( &queue ) ); + return queue; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getQueue2( const VULKAN_HPP_NAMESPACE::DeviceQueueInfo2* pQueueInfo, VULKAN_HPP_NAMESPACE::Queue* pQueue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetDeviceQueue2( m_device, reinterpret_cast( pQueueInfo ), reinterpret_cast< VkQueue *>( pQueue ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Queue Device::getQueue2( const DeviceQueueInfo2 & queueInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::Queue queue; + d.vkGetDeviceQueue2( m_device, reinterpret_cast( &queueInfo ), reinterpret_cast( &queue ) ); + return queue; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetEventStatus( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getEventStatus( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const + { + Result result = static_cast( d.vkGetEventStatus( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getEventStatus", { VULKAN_HPP_NAMESPACE::Result::eEventSet, VULKAN_HPP_NAMESPACE::Result::eEventReset } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceFdKHR( const VULKAN_HPP_NAMESPACE::FenceGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetFenceFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getFenceFdKHR( const FenceGetFdInfoKHR & getFdInfo, Dispatch const & d ) const + { + int fd; + Result result = static_cast( d.vkGetFenceFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetFenceStatus( m_device, static_cast( fence ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceStatus( VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const + { + Result result = static_cast( d.vkGetFenceStatus( m_device, static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceStatus", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::FenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getFenceWin32HandleKHR( const FenceGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetFenceWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getFenceWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_INLINE void Device::getGeneratedCommandsMemoryRequirementsNV( const VULKAN_HPP_NAMESPACE::GeneratedCommandsMemoryRequirementsInfoNV* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getGeneratedCommandsMemoryRequirementsNV( const GeneratedCommandsMemoryRequirementsInfoNV & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get(); + d.vkGetGeneratedCommandsMemoryRequirementsNV( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast( image ), reinterpret_cast< VkImageDrmFormatModifierPropertiesEXT *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::getImageDrmFormatModifierPropertiesEXT( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageDrmFormatModifierPropertiesEXT properties; + Result result = static_cast( d.vkGetImageDrmFormatModifierPropertiesEXT( m_device, static_cast( image ), reinterpret_cast( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageDrmFormatModifierPropertiesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, VULKAN_HPP_NAMESPACE::MemoryRequirements* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageMemoryRequirements( m_device, static_cast( image ), reinterpret_cast< VkMemoryRequirements *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements Device::getImageMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements memoryRequirements; + d.vkGetImageMemoryRequirements( m_device, static_cast( image ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getImageMemoryRequirements2( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get(); + d.vkGetImageMemoryRequirements2( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageMemoryRequirementsInfo2* pInfo, VULKAN_HPP_NAMESPACE::MemoryRequirements2* pMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), reinterpret_cast< VkMemoryRequirements2 *>( pMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryRequirements2 Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MemoryRequirements2 memoryRequirements; + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return memoryRequirements; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain Device::getImageMemoryRequirements2KHR( const ImageMemoryRequirementsInfo2 & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::MemoryRequirements2 & memoryRequirements = structureChain.template get(); + d.vkGetImageMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), reinterpret_cast( &memoryRequirements ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements *>( pSparseMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements( VULKAN_HPP_NAMESPACE::Image image, SparseImageMemoryRequirementsAllocator & sparseImageMemoryRequirementsAllocator, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements( sparseImageMemoryRequirementsAllocator ); + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements( m_device, static_cast( image ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements2 *>( pSparseMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements( sparseImageMemoryRequirements2Allocator ); + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void Device::getImageSparseMemoryRequirements2KHR( const VULKAN_HPP_NAMESPACE::ImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VULKAN_HPP_NAMESPACE::SparseImageMemoryRequirements2* pSparseMemoryRequirements, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( pInfo ), pSparseMemoryRequirementCount, reinterpret_cast< VkSparseImageMemoryRequirements2 *>( pSparseMemoryRequirements ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements; + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Device::getImageSparseMemoryRequirements2KHR( const ImageSparseMemoryRequirementsInfo2 & info, SparseImageMemoryRequirements2Allocator & sparseImageMemoryRequirements2Allocator, Dispatch const & d ) const + { + std::vector sparseMemoryRequirements( sparseImageMemoryRequirements2Allocator ); + uint32_t sparseMemoryRequirementCount; + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, nullptr ); + sparseMemoryRequirements.resize( sparseMemoryRequirementCount ); + d.vkGetImageSparseMemoryRequirements2KHR( m_device, reinterpret_cast( &info ), &sparseMemoryRequirementCount, reinterpret_cast( sparseMemoryRequirements.data() ) ); + VULKAN_HPP_ASSERT( sparseMemoryRequirementCount <= sparseMemoryRequirements.size() ); + return sparseMemoryRequirements; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const VULKAN_HPP_NAMESPACE::ImageSubresource* pSubresource, VULKAN_HPP_NAMESPACE::SubresourceLayout* pLayout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetImageSubresourceLayout( m_device, static_cast( image ), reinterpret_cast( pSubresource ), reinterpret_cast< VkSubresourceLayout *>( pLayout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::SubresourceLayout Device::getImageSubresourceLayout( VULKAN_HPP_NAMESPACE::Image image, const ImageSubresource & subresource, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::SubresourceLayout layout; + d.vkGetImageSubresourceLayout( m_device, static_cast( image ), reinterpret_cast( &subresource ), reinterpret_cast( &layout ) ); + return layout; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetImageViewAddressNVX( m_device, static_cast( imageView ), reinterpret_cast< VkImageViewAddressPropertiesNVX *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getImageViewAddressNVX( VULKAN_HPP_NAMESPACE::ImageView imageView, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageViewAddressPropertiesNVX properties; + Result result = static_cast( d.vkGetImageViewAddressNVX( m_device, static_cast( imageView ), reinterpret_cast( &properties ) ) ); + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING "::Device::getImageViewAddressNVX" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const VULKAN_HPP_NAMESPACE::ImageViewHandleInfoNVX* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast( pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE uint32_t Device::getImageViewHandleNVX( const ImageViewHandleInfoNVX & info, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetImageViewHandleNVX( m_device, reinterpret_cast( &info ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryAndroidHardwareBufferANDROID( const VULKAN_HPP_NAMESPACE::MemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast( pInfo ), pBuffer ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryAndroidHardwareBufferANDROID( const MemoryGetAndroidHardwareBufferInfoANDROID & info, Dispatch const & d ) const + { + struct AHardwareBuffer* buffer; + Result result = static_cast( d.vkGetMemoryAndroidHardwareBufferANDROID( m_device, reinterpret_cast( &info ), &buffer ) ); + return createResultValue( result, buffer, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryAndroidHardwareBufferANDROID" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdKHR( const VULKAN_HPP_NAMESPACE::MemoryGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryFdKHR( const MemoryGetFdInfoKHR & getFdInfo, Dispatch const & d ) const + { + int fd; + Result result = static_cast( d.vkGetMemoryFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR* pMemoryFdProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast( handleType ), fd, reinterpret_cast< VkMemoryFdPropertiesKHR *>( pMemoryFdProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryFdPropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, int fd, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::MemoryFdPropertiesKHR memoryFdProperties; + Result result = static_cast( d.vkGetMemoryFdPropertiesKHR( m_device, static_cast( handleType ), fd, reinterpret_cast( &memoryFdProperties ) ) ); + return createResultValue( result, memoryFdProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryFdPropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast( handleType ), pHostPointer, reinterpret_cast< VkMemoryHostPointerPropertiesEXT *>( pMemoryHostPointerProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryHostPointerPropertiesEXT( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::MemoryHostPointerPropertiesEXT memoryHostPointerProperties; + Result result = static_cast( d.vkGetMemoryHostPointerPropertiesEXT( m_device, static_cast( handleType ), pHostPointer, reinterpret_cast( &memoryHostPointerProperties ) ) ); + return createResultValue( result, memoryHostPointerProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryHostPointerPropertiesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleKHR( const VULKAN_HPP_NAMESPACE::MemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryWin32HandleKHR( const MemoryGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetMemoryWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryWin32HandleNV( m_device, static_cast( memory ), static_cast( handleType ), pHandle ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryWin32HandleNV( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV handleType, Dispatch const & d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetMemoryWin32HandleNV( m_device, static_cast( memory ), static_cast( handleType ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandleNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast( handleType ), handle, reinterpret_cast< VkMemoryWin32HandlePropertiesKHR *>( pMemoryWin32HandleProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getMemoryWin32HandlePropertiesKHR( VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::MemoryWin32HandlePropertiesKHR memoryWin32HandleProperties; + Result result = static_cast( d.vkGetMemoryWin32HandlePropertiesKHR( m_device, static_cast( handleType ), handle, reinterpret_cast( &memoryWin32HandleProperties ) ) ); + return createResultValue( result, memoryWin32HandleProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getMemoryWin32HandlePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VULKAN_HPP_NAMESPACE::PastPresentationTimingGOOGLE* pPresentationTimings, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), pPresentationTimingCount, reinterpret_cast< VkPastPresentationTimingGOOGLE *>( pPresentationTimings ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + std::vector presentationTimings; + uint32_t presentationTimingCount; + Result result; + do + { + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentationTimingCount ) + { + presentationTimings.resize( presentationTimingCount ); + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, reinterpret_cast( presentationTimings.data() ) ) ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentationTimingCount < presentationTimings.size() ) ) + { + presentationTimings.resize( presentationTimingCount ); + } + return createResultValue( result, presentationTimings, VULKAN_HPP_NAMESPACE_STRING"::Device::getPastPresentationTimingGOOGLE" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPastPresentationTimingGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, PastPresentationTimingGOOGLEAllocator & pastPresentationTimingGOOGLEAllocator, Dispatch const & d ) const + { + std::vector presentationTimings( pastPresentationTimingGOOGLEAllocator ); + uint32_t presentationTimingCount; + Result result; + do + { + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentationTimingCount ) + { + presentationTimings.resize( presentationTimingCount ); + result = static_cast( d.vkGetPastPresentationTimingGOOGLE( m_device, static_cast( swapchain ), &presentationTimingCount, reinterpret_cast( presentationTimings.data() ) ) ); + VULKAN_HPP_ASSERT( presentationTimingCount <= presentationTimings.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentationTimingCount < presentationTimings.size() ) ) + { + presentationTimings.resize( presentationTimingCount ); + } + return createResultValue( result, presentationTimings, VULKAN_HPP_NAMESPACE_STRING"::Device::getPastPresentationTimingGOOGLE" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, VULKAN_HPP_NAMESPACE::PerformanceValueINTEL* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPerformanceParameterINTEL( m_device, static_cast( parameter ), reinterpret_cast< VkPerformanceValueINTEL *>( pValue ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getPerformanceParameterINTEL( VULKAN_HPP_NAMESPACE::PerformanceParameterTypeINTEL parameter, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::PerformanceValueINTEL value; + Result result = static_cast( d.vkGetPerformanceParameterINTEL( m_device, static_cast( parameter ), reinterpret_cast( &value ) ) ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getPerformanceParameterINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, size_t* pDataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), pDataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Dispatch const & d ) const + { + std::vector data; + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineCacheData" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineCacheData( VULKAN_HPP_NAMESPACE::PipelineCache pipelineCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const + { + std::vector data( uint8_tAllocator ); + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetPipelineCacheData( m_device, static_cast( pipelineCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineCacheData" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableInternalRepresentationsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VULKAN_HPP_NAMESPACE::PipelineExecutableInternalRepresentationKHR* pInternalRepresentations, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast( pExecutableInfo ), pInternalRepresentationCount, reinterpret_cast< VkPipelineExecutableInternalRepresentationKHR *>( pInternalRepresentations ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d ) const + { + std::vector internalRepresentations; + uint32_t internalRepresentationCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast( &executableInfo ), &internalRepresentationCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && internalRepresentationCount ) + { + internalRepresentations.resize( internalRepresentationCount ); + result = static_cast( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast( &executableInfo ), &internalRepresentationCount, reinterpret_cast( internalRepresentations.data() ) ) ); + VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( internalRepresentationCount < internalRepresentations.size() ) ) + { + internalRepresentations.resize( internalRepresentationCount ); + } + return createResultValue( result, internalRepresentations, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableInternalRepresentationsKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutableInternalRepresentationsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableInternalRepresentationKHRAllocator & pipelineExecutableInternalRepresentationKHRAllocator, Dispatch const & d ) const + { + std::vector internalRepresentations( pipelineExecutableInternalRepresentationKHRAllocator ); + uint32_t internalRepresentationCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast( &executableInfo ), &internalRepresentationCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && internalRepresentationCount ) + { + internalRepresentations.resize( internalRepresentationCount ); + result = static_cast( d.vkGetPipelineExecutableInternalRepresentationsKHR( m_device, reinterpret_cast( &executableInfo ), &internalRepresentationCount, reinterpret_cast( internalRepresentations.data() ) ) ); + VULKAN_HPP_ASSERT( internalRepresentationCount <= internalRepresentations.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( internalRepresentationCount < internalRepresentations.size() ) ) + { + internalRepresentations.resize( internalRepresentationCount ); + } + return createResultValue( result, internalRepresentations, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableInternalRepresentationsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutablePropertiesKHR( const VULKAN_HPP_NAMESPACE::PipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VULKAN_HPP_NAMESPACE::PipelineExecutablePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast( pPipelineInfo ), pExecutableCount, reinterpret_cast< VkPipelineExecutablePropertiesKHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, Dispatch const & d ) const + { + std::vector properties; + uint32_t executableCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast( &pipelineInfo ), &executableCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && executableCount ) + { + properties.resize( executableCount ); + result = static_cast( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast( &pipelineInfo ), &executableCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( executableCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( executableCount < properties.size() ) ) + { + properties.resize( executableCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutablePropertiesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutablePropertiesKHR( const PipelineInfoKHR & pipelineInfo, PipelineExecutablePropertiesKHRAllocator & pipelineExecutablePropertiesKHRAllocator, Dispatch const & d ) const + { + std::vector properties( pipelineExecutablePropertiesKHRAllocator ); + uint32_t executableCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast( &pipelineInfo ), &executableCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && executableCount ) + { + properties.resize( executableCount ); + result = static_cast( d.vkGetPipelineExecutablePropertiesKHR( m_device, reinterpret_cast( &pipelineInfo ), &executableCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( executableCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( executableCount < properties.size() ) ) + { + properties.resize( executableCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutablePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getPipelineExecutableStatisticsKHR( const VULKAN_HPP_NAMESPACE::PipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VULKAN_HPP_NAMESPACE::PipelineExecutableStatisticKHR* pStatistics, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast( pExecutableInfo ), pStatisticCount, reinterpret_cast< VkPipelineExecutableStatisticKHR *>( pStatistics ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, Dispatch const & d ) const + { + std::vector statistics; + uint32_t statisticCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast( &executableInfo ), &statisticCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && statisticCount ) + { + statistics.resize( statisticCount ); + result = static_cast( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast( &executableInfo ), &statisticCount, reinterpret_cast( statistics.data() ) ) ); + VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( statisticCount < statistics.size() ) ) + { + statistics.resize( statisticCount ); + } + return createResultValue( result, statistics, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableStatisticsKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getPipelineExecutableStatisticsKHR( const PipelineExecutableInfoKHR & executableInfo, PipelineExecutableStatisticKHRAllocator & pipelineExecutableStatisticKHRAllocator, Dispatch const & d ) const + { + std::vector statistics( pipelineExecutableStatisticKHRAllocator ); + uint32_t statisticCount; + Result result; + do + { + result = static_cast( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast( &executableInfo ), &statisticCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && statisticCount ) + { + statistics.resize( statisticCount ); + result = static_cast( d.vkGetPipelineExecutableStatisticsKHR( m_device, reinterpret_cast( &executableInfo ), &statisticCount, reinterpret_cast( statistics.data() ) ) ); + VULKAN_HPP_ASSERT( statisticCount <= statistics.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( statisticCount < statistics.size() ) ) + { + statistics.resize( statisticCount ); + } + return createResultValue( result, statistics, VULKAN_HPP_NAMESPACE_STRING"::Device::getPipelineExecutableStatisticsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPrivateDataEXT( m_device, static_cast( objectType ), objectHandle, static_cast( privateDataSlot ), pData ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint64_t Device::getPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + uint64_t data; + d.vkGetPrivateDataEXT( m_device, static_cast( objectType ), objectHandle, static_cast( privateDataSlot ), &data ); + return data; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, dataSize, pData, static_cast( stride ), static_cast( flags ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, ArrayProxy const &data, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ) , reinterpret_cast( data.data() ), static_cast( stride ), static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getQueryPoolResults", { Result::eSuccess, Result::eNotReady } ); + + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue> Device::getQueryPoolResults( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, data.size() * sizeof( T ), reinterpret_cast( data.data() ), static_cast( stride ), static_cast( flags ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResults", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE ResultValue Device::getQueryPoolResult( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VULKAN_HPP_NAMESPACE::DeviceSize stride, VULKAN_HPP_NAMESPACE::QueryResultFlags flags, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkGetQueryPoolResults( m_device, static_cast( queryPool ), firstQuery, queryCount, sizeof( T ), reinterpret_cast( &data ), static_cast( stride ), static_cast( flags ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getQueryPoolResult", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eNotReady } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::getRayTracingCaptureReplayShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandlesKHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingCaptureReplayShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingCaptureReplayShaderGroupHandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getRayTracingShaderGroupHandlesKHR" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::getRayTracingShaderGroupHandlesKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesKHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingShaderGroupHandleKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesKHR( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, dataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, ArrayProxy const &data, Dispatch const &d ) const + { + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ) , reinterpret_cast( data.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::getRayTracingShaderGroupHandlesNV" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::getRayTracingShaderGroupHandlesNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, data.size() * sizeof( T ), reinterpret_cast( data.data() ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandlesNV" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRayTracingShaderGroupHandleNV( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t firstGroup, uint32_t groupCount, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkGetRayTracingShaderGroupHandlesNV( m_device, static_cast( pipeline ), firstGroup, groupCount, sizeof( T ), reinterpret_cast( &data ) ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::getRayTracingShaderGroupHandleNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE DeviceSize Device::getRayTracingShaderGroupStackSizeKHR( VULKAN_HPP_NAMESPACE::Pipeline pipeline, uint32_t group, VULKAN_HPP_NAMESPACE::ShaderGroupShaderKHR groupShader, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRayTracingShaderGroupStackSizeKHR( m_device, static_cast( pipeline ), group, static_cast( groupShader ) ) ); + } + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE* pDisplayTimingProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast( swapchain ), reinterpret_cast< VkRefreshCycleDurationGOOGLE *>( pDisplayTimingProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getRefreshCycleDurationGOOGLE( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::RefreshCycleDurationGOOGLE displayTimingProperties; + Result result = static_cast( d.vkGetRefreshCycleDurationGOOGLE( m_device, static_cast( swapchain ), reinterpret_cast( &displayTimingProperties ) ) ); + return createResultValue( result, displayTimingProperties, VULKAN_HPP_NAMESPACE_STRING "::Device::getRefreshCycleDurationGOOGLE" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, VULKAN_HPP_NAMESPACE::Extent2D* pGranularity, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetRenderAreaGranularity( m_device, static_cast( renderPass ), reinterpret_cast< VkExtent2D *>( pGranularity ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Extent2D Device::getRenderAreaGranularity( VULKAN_HPP_NAMESPACE::RenderPass renderPass, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::Extent2D granularity; + d.vkGetRenderAreaGranularity( m_device, static_cast( renderPass ), reinterpret_cast( &granularity ) ); + return granularity; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSemaphoreCounterValue( m_device, static_cast( semaphore ), pValue ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getSemaphoreCounterValue( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d ) const + { + uint64_t value; + Result result = static_cast( d.vkGetSemaphoreCounterValue( m_device, static_cast( semaphore ), &value ) ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValue" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, uint64_t* pValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSemaphoreCounterValueKHR( m_device, static_cast( semaphore ), pValue ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getSemaphoreCounterValueKHR( VULKAN_HPP_NAMESPACE::Semaphore semaphore, Dispatch const & d ) const + { + uint64_t value; + Result result = static_cast( d.vkGetSemaphoreCounterValueKHR( m_device, static_cast( semaphore ), &value ) ); + return createResultValue( result, value, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreCounterValueKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast( pGetFdInfo ), pFd ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getSemaphoreFdKHR( const SemaphoreGetFdInfoKHR & getFdInfo, Dispatch const & d ) const + { + int fd; + Result result = static_cast( d.vkGetSemaphoreFdKHR( m_device, reinterpret_cast( &getFdInfo ), &fd ) ); + return createResultValue( result, fd, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::SemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast( pGetWin32HandleInfo ), pHandle ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getSemaphoreWin32HandleKHR( const SemaphoreGetWin32HandleInfoKHR & getWin32HandleInfo, Dispatch const & d ) const + { + HANDLE handle; + Result result = static_cast( d.vkGetSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &getWin32HandleInfo ), &handle ) ); + return createResultValue( result, handle, VULKAN_HPP_NAMESPACE_STRING "::Device::getSemaphoreWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), pInfoSize, pInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Dispatch const & d ) const + { + std::vector info; + size_t infoSize; + Result result; + do + { + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && infoSize ) + { + info.resize( infoSize ); + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, reinterpret_cast( info.data() ) ) ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( infoSize < info.size() ) ) + { + info.resize( infoSize ); + } + return createResultValue( result, info, VULKAN_HPP_NAMESPACE_STRING"::Device::getShaderInfoAMD" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getShaderInfoAMD( VULKAN_HPP_NAMESPACE::Pipeline pipeline, VULKAN_HPP_NAMESPACE::ShaderStageFlagBits shaderStage, VULKAN_HPP_NAMESPACE::ShaderInfoTypeAMD infoType, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const + { + std::vector info( uint8_tAllocator ); + size_t infoSize; + Result result; + do + { + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && infoSize ) + { + info.resize( infoSize ); + result = static_cast( d.vkGetShaderInfoAMD( m_device, static_cast( pipeline ), static_cast( shaderStage ), static_cast( infoType ), &infoSize, reinterpret_cast( info.data() ) ) ); + VULKAN_HPP_ASSERT( infoSize <= info.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( infoSize < info.size() ) ) + { + info.resize( infoSize ); + } + return createResultValue( result, info, VULKAN_HPP_NAMESPACE_STRING"::Device::getShaderInfoAMD" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSwapchainCounterEXT( m_device, static_cast( swapchain ), static_cast( counter ), pCounterValue ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::getSwapchainCounterEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, VULKAN_HPP_NAMESPACE::SurfaceCounterFlagBitsEXT counter, Dispatch const & d ) const + { + uint64_t counterValue; + Result result = static_cast( d.vkGetSwapchainCounterEXT( m_device, static_cast( swapchain ), static_cast( counter ), &counterValue ) ); + return createResultValue( result, counterValue, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainCounterEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VULKAN_HPP_NAMESPACE::Image* pSwapchainImages, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), pSwapchainImageCount, reinterpret_cast< VkImage *>( pSwapchainImages ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + std::vector swapchainImages; + uint32_t swapchainImageCount; + Result result; + do + { + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && swapchainImageCount ) + { + swapchainImages.resize( swapchainImageCount ); + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, reinterpret_cast( swapchainImages.data() ) ) ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( swapchainImageCount < swapchainImages.size() ) ) + { + swapchainImages.resize( swapchainImageCount ); + } + return createResultValue( result, swapchainImages, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainImagesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getSwapchainImagesKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, ImageAllocator & imageAllocator, Dispatch const & d ) const + { + std::vector swapchainImages( imageAllocator ); + uint32_t swapchainImageCount; + Result result; + do + { + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && swapchainImageCount ) + { + swapchainImages.resize( swapchainImageCount ); + result = static_cast( d.vkGetSwapchainImagesKHR( m_device, static_cast( swapchain ), &swapchainImageCount, reinterpret_cast( swapchainImages.data() ) ) ); + VULKAN_HPP_ASSERT( swapchainImageCount <= swapchainImages.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( swapchainImageCount < swapchainImages.size() ) ) + { + swapchainImages.resize( swapchainImageCount ); + } + return createResultValue( result, swapchainImages, VULKAN_HPP_NAMESPACE_STRING"::Device::getSwapchainImagesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetSwapchainStatusKHR( m_device, static_cast( swapchain ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getSwapchainStatusKHR( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + Result result = static_cast( d.vkGetSwapchainStatusKHR( m_device, static_cast( swapchain ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::getSwapchainStatusKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, size_t* pDataSize, void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), pDataSize, pData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Dispatch const & d ) const + { + std::vector data; + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getValidationCacheDataEXT" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Device::getValidationCacheDataEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT validationCache, Uint8_tAllocator & uint8_tAllocator, Dispatch const & d ) const + { + std::vector data( uint8_tAllocator ); + size_t dataSize; + Result result; + do + { + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, nullptr ) ); + if ( ( result == Result::eSuccess ) && dataSize ) + { + data.resize( dataSize ); + result = static_cast( d.vkGetValidationCacheDataEXT( m_device, static_cast( validationCache ), &dataSize, reinterpret_cast( data.data() ) ) ); + VULKAN_HPP_ASSERT( dataSize <= data.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( dataSize < data.size() ) ) + { + data.resize( dataSize ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::Device::getValidationCacheDataEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceFdKHR( const VULKAN_HPP_NAMESPACE::ImportFenceFdInfoKHR* pImportFenceFdInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkImportFenceFdKHR( m_device, reinterpret_cast( pImportFenceFdInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::importFenceFdKHR( const ImportFenceFdInfoKHR & importFenceFdInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkImportFenceFdKHR( m_device, reinterpret_cast( &importFenceFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importFenceWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast( pImportFenceWin32HandleInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::importFenceWin32HandleKHR( const ImportFenceWin32HandleInfoKHR & importFenceWin32HandleInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkImportFenceWin32HandleKHR( m_device, reinterpret_cast( &importFenceWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importFenceWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreFdKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast( pImportSemaphoreFdInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::importSemaphoreFdKHR( const ImportSemaphoreFdInfoKHR & importSemaphoreFdInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkImportSemaphoreFdKHR( m_device, reinterpret_cast( &importSemaphoreFdInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreFdKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::importSemaphoreWin32HandleKHR( const VULKAN_HPP_NAMESPACE::ImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast( pImportSemaphoreWin32HandleInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::importSemaphoreWin32HandleKHR( const ImportSemaphoreWin32HandleInfoKHR & importSemaphoreWin32HandleInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkImportSemaphoreWin32HandleKHR( m_device, reinterpret_cast( &importSemaphoreWin32HandleInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::importSemaphoreWin32HandleKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::initializePerformanceApiINTEL( const VULKAN_HPP_NAMESPACE::InitializePerformanceApiInfoINTEL* pInitializeInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast( pInitializeInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::initializePerformanceApiINTEL( const InitializePerformanceApiInfoINTEL & initializeInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkInitializePerformanceApiINTEL( m_device, reinterpret_cast( &initializeInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::initializePerformanceApiINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::invalidateMappedMemoryRanges( uint32_t memoryRangeCount, const VULKAN_HPP_NAMESPACE::MappedMemoryRange* pMemoryRanges, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkInvalidateMappedMemoryRanges( m_device, memoryRangeCount, reinterpret_cast( pMemoryRanges ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::invalidateMappedMemoryRanges( ArrayProxy const & memoryRanges, Dispatch const & d ) const + { + Result result = static_cast( d.vkInvalidateMappedMemoryRanges( m_device, memoryRanges.size(), reinterpret_cast( memoryRanges.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::invalidateMappedMemoryRanges" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, void** ppData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkMapMemory( m_device, static_cast( memory ), static_cast( offset ), static_cast( size ), static_cast( flags ), ppData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::mapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, VULKAN_HPP_NAMESPACE::DeviceSize offset, VULKAN_HPP_NAMESPACE::DeviceSize size, VULKAN_HPP_NAMESPACE::MemoryMapFlags flags, Dispatch const & d ) const + { + void* pData; + Result result = static_cast( d.vkMapMemory( m_device, static_cast( memory ), static_cast( offset ), static_cast( size ), static_cast( flags ), &pData ) ); + return createResultValue( result, pData, VULKAN_HPP_NAMESPACE_STRING "::Device::mapMemory" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::PipelineCache* pSrcCaches, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkMergePipelineCaches( m_device, static_cast( dstCache ), srcCacheCount, reinterpret_cast( pSrcCaches ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::mergePipelineCaches( VULKAN_HPP_NAMESPACE::PipelineCache dstCache, ArrayProxy const & srcCaches, Dispatch const & d ) const + { + Result result = static_cast( d.vkMergePipelineCaches( m_device, static_cast( dstCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergePipelineCaches" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, uint32_t srcCacheCount, const VULKAN_HPP_NAMESPACE::ValidationCacheEXT* pSrcCaches, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkMergeValidationCachesEXT( m_device, static_cast( dstCache ), srcCacheCount, reinterpret_cast( pSrcCaches ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::mergeValidationCachesEXT( VULKAN_HPP_NAMESPACE::ValidationCacheEXT dstCache, ArrayProxy const & srcCaches, Dispatch const & d ) const + { + Result result = static_cast( d.vkMergeValidationCachesEXT( m_device, static_cast( dstCache ), srcCaches.size(), reinterpret_cast( srcCaches.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::mergeValidationCachesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::registerEventEXT( const VULKAN_HPP_NAMESPACE::DeviceEventInfoEXT* pDeviceEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast( pDeviceEventInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::registerEventEXT( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast( &deviceEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::registerEventEXTUnique( const DeviceEventInfoEXT & deviceEventInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkRegisterDeviceEventEXT( m_device, reinterpret_cast( &deviceEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerEventEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayEventInfoEXT* pDisplayEventInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Fence* pFence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkRegisterDisplayEventEXT( m_device, static_cast( display ), reinterpret_cast( pDisplayEventInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkFence *>( pFence ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::registerDisplayEventEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkRegisterDisplayEventEXT( m_device, static_cast( display ), reinterpret_cast( &displayEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Device::registerDisplayEventEXTUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayEventInfoEXT & displayEventInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Fence fence; + Result result = static_cast( d.vkRegisterDisplayEventEXT( m_device, static_cast( display ), reinterpret_cast( &displayEventInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &fence ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, fence, VULKAN_HPP_NAMESPACE_STRING "::Device::registerDisplayEventEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkReleaseFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::releaseFullScreenExclusiveModeEXT( VULKAN_HPP_NAMESPACE::SwapchainKHR swapchain, Dispatch const & d ) const + { + Result result = static_cast( d.vkReleaseFullScreenExclusiveModeEXT( m_device, static_cast( swapchain ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releaseFullScreenExclusiveModeEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::releasePerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const + { + Result result = static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::releasePerformanceConfigurationINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::release( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const + { + Result result = static_cast( d.vkReleasePerformanceConfigurationINTEL( m_device, static_cast( configuration ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::release" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_INLINE void Device::releaseProfilingLockKHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkReleaseProfilingLockKHR( m_device ); + } + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkResetCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::resetCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolResetFlags flags, Dispatch const & d ) const + { + Result result = static_cast( d.vkResetCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetCommandPool" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkResetDescriptorPool( m_device, static_cast( descriptorPool ), static_cast( flags ) ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::resetDescriptorPool( VULKAN_HPP_NAMESPACE::DescriptorPool descriptorPool, VULKAN_HPP_NAMESPACE::DescriptorPoolResetFlags flags, Dispatch const & d ) const + { + Result result = static_cast( d.vkResetDescriptorPool( m_device, static_cast( descriptorPool ), static_cast( flags ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetDescriptorPool" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkResetEvent( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::resetEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const + { + Result result = static_cast( d.vkResetEvent( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetEvent" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::resetFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkResetFences( m_device, fenceCount, reinterpret_cast( pFences ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::resetFences( ArrayProxy const & fences, Dispatch const & d ) const + { + Result result = static_cast( d.vkResetFences( m_device, fences.size(), reinterpret_cast( fences.data() ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::resetFences" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::resetQueryPool( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkResetQueryPool( m_device, static_cast( queryPool ), firstQuery, queryCount ); + } + + template + VULKAN_HPP_INLINE void Device::resetQueryPoolEXT( VULKAN_HPP_NAMESPACE::QueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkResetQueryPoolEXT( m_device, static_cast( queryPool ), firstQuery, queryCount ); + } + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectNameEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectNameInfoEXT* pNameInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast( pNameInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::setDebugUtilsObjectNameEXT( const DebugUtilsObjectNameInfoEXT & nameInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkSetDebugUtilsObjectNameEXT( m_device, reinterpret_cast( &nameInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectNameEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setDebugUtilsObjectTagEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsObjectTagInfoEXT* pTagInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast( pTagInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::setDebugUtilsObjectTagEXT( const DebugUtilsObjectTagInfoEXT & tagInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkSetDebugUtilsObjectTagEXT( m_device, reinterpret_cast( &tagInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setDebugUtilsObjectTagEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSetEvent( m_device, static_cast( event ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::setEvent( VULKAN_HPP_NAMESPACE::Event event, Dispatch const & d ) const + { + Result result = static_cast( d.vkSetEvent( m_device, static_cast( event ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setEvent" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( uint32_t swapchainCount, const VULKAN_HPP_NAMESPACE::SwapchainKHR* pSwapchains, const VULKAN_HPP_NAMESPACE::HdrMetadataEXT* pMetadata, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkSetHdrMetadataEXT( m_device, swapchainCount, reinterpret_cast( pSwapchains ), reinterpret_cast( pMetadata ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::setHdrMetadataEXT( ArrayProxy const & swapchains, ArrayProxy const & metadata, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT_WHEN_NO_EXCEPTIONS + { +#ifdef VULKAN_HPP_NO_EXCEPTIONS + VULKAN_HPP_ASSERT( swapchains.size() == metadata.size() ); +#else + if ( swapchains.size() != metadata.size() ) + { + throw LogicError( VULKAN_HPP_NAMESPACE_STRING "::Device::setHdrMetadataEXT: swapchains.size() != metadata.size()" ); + } +#endif /*VULKAN_HPP_NO_EXCEPTIONS*/ + + d.vkSetHdrMetadataEXT( m_device, swapchains.size(), reinterpret_cast( swapchains.data() ), reinterpret_cast( metadata.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::setLocalDimmingAMD( VULKAN_HPP_NAMESPACE::SwapchainKHR swapChain, VULKAN_HPP_NAMESPACE::Bool32 localDimmingEnable, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkSetLocalDimmingAMD( m_device, static_cast( swapChain ), static_cast( localDimmingEnable ) ); + } + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSetPrivateDataEXT( m_device, static_cast( objectType ), objectHandle, static_cast( privateDataSlot ), data ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type Device::setPrivateDataEXT( VULKAN_HPP_NAMESPACE::ObjectType objectType, uint64_t objectHandle, VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT privateDataSlot, uint64_t data, Dispatch const & d ) const + { + Result result = static_cast( d.vkSetPrivateDataEXT( m_device, static_cast( objectType ), objectHandle, static_cast( privateDataSlot ), data ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::setPrivateDataEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphore( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSignalSemaphore( m_device, reinterpret_cast( pSignalInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::signalSemaphore( const SemaphoreSignalInfo & signalInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkSignalSemaphore( m_device, reinterpret_cast( &signalInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphore" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::signalSemaphoreKHR( const VULKAN_HPP_NAMESPACE::SemaphoreSignalInfo* pSignalInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast( pSignalInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::signalSemaphoreKHR( const SemaphoreSignalInfo & signalInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkSignalSemaphoreKHR( m_device, reinterpret_cast( &signalInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::signalSemaphoreKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Device::trimCommandPool( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkTrimCommandPool( m_device, static_cast( commandPool ), static_cast( flags ) ); + } + + template + VULKAN_HPP_INLINE void Device::trimCommandPoolKHR( VULKAN_HPP_NAMESPACE::CommandPool commandPool, VULKAN_HPP_NAMESPACE::CommandPoolTrimFlags flags, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkTrimCommandPoolKHR( m_device, static_cast( commandPool ), static_cast( flags ) ); + } + + + template + VULKAN_HPP_INLINE void Device::uninitializePerformanceApiINTEL( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUninitializePerformanceApiINTEL( m_device ); + } + + + template + VULKAN_HPP_INLINE void Device::unmapMemory( VULKAN_HPP_NAMESPACE::DeviceMemory memory, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUnmapMemory( m_device, static_cast( memory ) ); + } + + + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplate( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUpdateDescriptorSetWithTemplate( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } + + template + VULKAN_HPP_INLINE void Device::updateDescriptorSetWithTemplateKHR( VULKAN_HPP_NAMESPACE::DescriptorSet descriptorSet, VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUpdateDescriptorSetWithTemplateKHR( m_device, static_cast( descriptorSet ), static_cast( descriptorUpdateTemplate ), pData ); + } + + + template + VULKAN_HPP_INLINE void Device::updateDescriptorSets( uint32_t descriptorWriteCount, const VULKAN_HPP_NAMESPACE::WriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VULKAN_HPP_NAMESPACE::CopyDescriptorSet* pDescriptorCopies, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUpdateDescriptorSets( m_device, descriptorWriteCount, reinterpret_cast( pDescriptorWrites ), descriptorCopyCount, reinterpret_cast( pDescriptorCopies ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Device::updateDescriptorSets( ArrayProxy const & descriptorWrites, ArrayProxy const & descriptorCopies, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkUpdateDescriptorSets( m_device, descriptorWrites.size(), reinterpret_cast( descriptorWrites.data() ), descriptorCopies.size(), reinterpret_cast( descriptorCopies.data() ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( uint32_t fenceCount, const VULKAN_HPP_NAMESPACE::Fence* pFences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkWaitForFences( m_device, fenceCount, reinterpret_cast( pFences ), static_cast( waitAll ), timeout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitForFences( ArrayProxy const & fences, VULKAN_HPP_NAMESPACE::Bool32 waitAll, uint64_t timeout, Dispatch const & d ) const + { + Result result = static_cast( d.vkWaitForFences( m_device, fences.size(), reinterpret_cast( fences.data() ), static_cast( waitAll ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitForFences", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkWaitSemaphores( m_device, reinterpret_cast( pWaitInfo ), timeout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphores( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d ) const + { + Result result = static_cast( d.vkWaitSemaphores( m_device, reinterpret_cast( &waitInfo ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphores", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const VULKAN_HPP_NAMESPACE::SemaphoreWaitInfo* pWaitInfo, uint64_t timeout, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast( pWaitInfo ), timeout ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::waitSemaphoresKHR( const SemaphoreWaitInfo & waitInfo, uint64_t timeout, Dispatch const & d ) const + { + Result result = static_cast( d.vkWaitSemaphoresKHR( m_device, reinterpret_cast( &waitInfo ), timeout ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Device::waitSemaphoresKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eTimeout } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Device::writeAccelerationStructuresPropertiesKHR( uint32_t accelerationStructureCount, const VULKAN_HPP_NAMESPACE::AccelerationStructureKHR* pAccelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, void* pData, size_t stride, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructureCount, reinterpret_cast( pAccelerationStructures ), static_cast( queryType ), dataSize, pData, stride ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::writeAccelerationStructuresPropertiesKHR( ArrayProxy const &accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, ArrayProxy const &data, size_t stride, Dispatch const &d ) const + { + Result result = static_cast( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size() , reinterpret_cast( accelerationStructures.data() ), static_cast( queryType ), data.size() * sizeof( T ) , reinterpret_cast( data.data() ), stride ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING"::Device::writeAccelerationStructuresPropertiesKHR" ); + + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Device::writeAccelerationStructuresPropertiesKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t dataSize, size_t stride, Dispatch const & d ) const + { + VULKAN_HPP_ASSERT( dataSize % sizeof( T ) == 0 ); + std::vector data( dataSize / sizeof( T ) ); + Result result = static_cast( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size(), reinterpret_cast( accelerationStructures.data() ), static_cast( queryType ), data.size() * sizeof( T ), reinterpret_cast( data.data() ), stride ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertiesKHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Device::writeAccelerationStructuresPropertyKHR( ArrayProxy const & accelerationStructures, VULKAN_HPP_NAMESPACE::QueryType queryType, size_t stride, Dispatch const & d ) const + { + T data; + Result result = static_cast( d.vkWriteAccelerationStructuresPropertiesKHR( m_device, accelerationStructures.size(), reinterpret_cast( accelerationStructures.data() ), static_cast( queryType ), sizeof( T ), reinterpret_cast( &data ), stride ) ); + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING "::Device::writeAccelerationStructuresPropertyKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createAndroidSurfaceKHR( const VULKAN_HPP_NAMESPACE::AndroidSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createAndroidSurfaceKHR( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createAndroidSurfaceKHRUnique( const AndroidSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateAndroidSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createAndroidSurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDebugReportCallbackEXT( const VULKAN_HPP_NAMESPACE::DebugReportCallbackCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT* pCallback, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDebugReportCallbackEXT *>( pCallback ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Instance::createDebugReportCallbackEXT( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback; + Result result = static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); + return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDebugReportCallbackEXTUnique( const DebugReportCallbackCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback; + Result result = static_cast( d.vkCreateDebugReportCallbackEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &callback ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, callback, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugReportCallbackEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDebugUtilsMessengerEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT* pMessenger, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDebugUtilsMessengerEXT *>( pMessenger ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type Instance::createDebugUtilsMessengerEXT( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger; + Result result = static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); + return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDebugUtilsMessengerEXTUnique( const DebugUtilsMessengerCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger; + Result result = static_cast( d.vkCreateDebugUtilsMessengerEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &messenger ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, messenger, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDebugUtilsMessengerEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDirectFBSurfaceEXT( const VULKAN_HPP_NAMESPACE::DirectFBSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createDirectFBSurfaceEXT( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDirectFBSurfaceEXTUnique( const DirectFBSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateDirectFBSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDirectFBSurfaceEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createDisplayPlaneSurfaceKHR( const VULKAN_HPP_NAMESPACE::DisplaySurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createDisplayPlaneSurfaceKHR( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createDisplayPlaneSurfaceKHRUnique( const DisplaySurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateDisplayPlaneSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createDisplayPlaneSurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createHeadlessSurfaceEXT( const VULKAN_HPP_NAMESPACE::HeadlessSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createHeadlessSurfaceEXT( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createHeadlessSurfaceEXTUnique( const HeadlessSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateHeadlessSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createHeadlessSurfaceEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_IOS_MVK + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createIOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::IOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createIOSSurfaceMVK( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVK" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createIOSSurfaceMVKUnique( const IOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateIOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createIOSSurfaceMVKUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + + +#ifdef VK_USE_PLATFORM_FUCHSIA + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createImagePipeSurfaceFUCHSIA( const VULKAN_HPP_NAMESPACE::ImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createImagePipeSurfaceFUCHSIA( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIA" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createImagePipeSurfaceFUCHSIAUnique( const ImagePipeSurfaceCreateInfoFUCHSIA & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateImagePipeSurfaceFUCHSIA( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createImagePipeSurfaceFUCHSIAUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + + +#ifdef VK_USE_PLATFORM_MACOS_MVK + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMacOSSurfaceMVK( const VULKAN_HPP_NAMESPACE::MacOSSurfaceCreateInfoMVK* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createMacOSSurfaceMVK( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVK" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createMacOSSurfaceMVKUnique( const MacOSSurfaceCreateInfoMVK & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateMacOSSurfaceMVK( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMacOSSurfaceMVKUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ + + +#ifdef VK_USE_PLATFORM_METAL_EXT + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createMetalSurfaceEXT( const VULKAN_HPP_NAMESPACE::MetalSurfaceCreateInfoEXT* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createMetalSurfaceEXT( const MetalSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createMetalSurfaceEXTUnique( const MetalSurfaceCreateInfoEXT & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateMetalSurfaceEXT( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createMetalSurfaceEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + + +#ifdef VK_USE_PLATFORM_GGP + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createStreamDescriptorSurfaceGGP( const VULKAN_HPP_NAMESPACE::StreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createStreamDescriptorSurfaceGGP( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGP" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createStreamDescriptorSurfaceGGPUnique( const StreamDescriptorSurfaceCreateInfoGGP & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateStreamDescriptorSurfaceGGP( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createStreamDescriptorSurfaceGGPUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_GGP*/ + + +#ifdef VK_USE_PLATFORM_VI_NN + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createViSurfaceNN( const VULKAN_HPP_NAMESPACE::ViSurfaceCreateInfoNN* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createViSurfaceNN( const ViSurfaceCreateInfoNN & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNN" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createViSurfaceNNUnique( const ViSurfaceCreateInfoNN & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateViSurfaceNN( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createViSurfaceNNUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_VI_NN*/ + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWaylandSurfaceKHR( const VULKAN_HPP_NAMESPACE::WaylandSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createWaylandSurfaceKHR( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createWaylandSurfaceKHRUnique( const WaylandSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateWaylandSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWaylandSurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createWin32SurfaceKHR( const VULKAN_HPP_NAMESPACE::Win32SurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createWin32SurfaceKHR( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createWin32SurfaceKHRUnique( const Win32SurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateWin32SurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createWin32SurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXcbSurfaceKHR( const VULKAN_HPP_NAMESPACE::XcbSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createXcbSurfaceKHR( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createXcbSurfaceKHRUnique( const XcbSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateXcbSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXcbSurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::createXlibSurfaceKHR( const VULKAN_HPP_NAMESPACE::XlibSurfaceCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::SurfaceKHR* pSurface, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkSurfaceKHR *>( pSurface ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Instance::createXlibSurfaceKHR( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type Instance::createXlibSurfaceKHRUnique( const XlibSurfaceCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceKHR surface; + Result result = static_cast( d.vkCreateXlibSurfaceKHR( m_instance, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &surface ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, surface, VULKAN_HPP_NAMESPACE_STRING "::Instance::createXlibSurfaceKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + + template + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDebugReportMessageEXT( m_instance, static_cast( flags ), static_cast( objectType ), object, location, messageCode, pLayerPrefix, pMessage ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::debugReportMessageEXT( VULKAN_HPP_NAMESPACE::DebugReportFlagsEXT flags, VULKAN_HPP_NAMESPACE::DebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const std::string & layerPrefix, const std::string & message, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDebugReportMessageEXT( m_instance, static_cast( flags ), static_cast( objectType ), object, location, messageCode, layerPrefix.c_str(), message.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroyDebugReportCallbackEXT( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT callback, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugReportCallbackEXT( m_instance, static_cast( callback ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroyDebugUtilsMessengerEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT messenger, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyDebugUtilsMessengerEXT( m_instance, static_cast( messenger ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroy( const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyInstance( m_instance, reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroyInstance( m_instance, reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroySurfaceKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( pAllocator ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::destroy( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Optional allocator, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkDestroySurfaceKHR( m_instance, static_cast( surface ), reinterpret_cast( static_cast( allocator ) ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroups( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast< VkPhysicalDeviceGroupProperties *>( pPhysicalDeviceGroupProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroups( Dispatch const & d ) const + { + std::vector physicalDeviceGroupProperties; + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + } + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroups" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroups( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d ) const + { + std::vector physicalDeviceGroupProperties( physicalDeviceGroupPropertiesAllocator ); + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroups( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + } + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroups" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDeviceGroupsKHR( uint32_t* pPhysicalDeviceGroupCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, pPhysicalDeviceGroupCount, reinterpret_cast< VkPhysicalDeviceGroupProperties *>( pPhysicalDeviceGroupProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroupsKHR( Dispatch const & d ) const + { + std::vector physicalDeviceGroupProperties; + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + } + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroupsKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDeviceGroupsKHR( PhysicalDeviceGroupPropertiesAllocator & physicalDeviceGroupPropertiesAllocator, Dispatch const & d ) const + { + std::vector physicalDeviceGroupProperties( physicalDeviceGroupPropertiesAllocator ); + uint32_t physicalDeviceGroupCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceGroupCount ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceGroupsKHR( m_instance, &physicalDeviceGroupCount, reinterpret_cast( physicalDeviceGroupProperties.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceGroupCount <= physicalDeviceGroupProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceGroupCount < physicalDeviceGroupProperties.size() ) ) + { + physicalDeviceGroupProperties.resize( physicalDeviceGroupCount ); + } + return createResultValue( result, physicalDeviceGroupProperties, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDeviceGroupsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Instance::enumeratePhysicalDevices( uint32_t* pPhysicalDeviceCount, VULKAN_HPP_NAMESPACE::PhysicalDevice* pPhysicalDevices, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumeratePhysicalDevices( m_instance, pPhysicalDeviceCount, reinterpret_cast< VkPhysicalDevice *>( pPhysicalDevices ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDevices( Dispatch const & d ) const + { + std::vector physicalDevices; + uint32_t physicalDeviceCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceCount ) + { + physicalDevices.resize( physicalDeviceCount ); + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast( physicalDevices.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceCount < physicalDevices.size() ) ) + { + physicalDevices.resize( physicalDeviceCount ); + } + return createResultValue( result, physicalDevices, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDevices" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type Instance::enumeratePhysicalDevices( PhysicalDeviceAllocator & physicalDeviceAllocator, Dispatch const & d ) const + { + std::vector physicalDevices( physicalDeviceAllocator ); + uint32_t physicalDeviceCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && physicalDeviceCount ) + { + physicalDevices.resize( physicalDeviceCount ); + result = static_cast( d.vkEnumeratePhysicalDevices( m_instance, &physicalDeviceCount, reinterpret_cast( physicalDevices.data() ) ) ); + VULKAN_HPP_ASSERT( physicalDeviceCount <= physicalDevices.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( physicalDeviceCount < physicalDevices.size() ) ) + { + physicalDevices.resize( physicalDeviceCount ); + } + return createResultValue( result, physicalDevices, VULKAN_HPP_NAMESPACE_STRING"::Instance::enumeratePhysicalDevices" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const char* pName, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetInstanceProcAddr( m_instance, pName ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE PFN_vkVoidFunction Instance::getProcAddr( const std::string & name, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetInstanceProcAddr( m_instance, name.c_str() ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const VULKAN_HPP_NAMESPACE::DebugUtilsMessengerCallbackDataEXT* pCallbackData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast( messageSeverity ), static_cast( messageTypes ), reinterpret_cast( pCallbackData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Instance::submitDebugUtilsMessageEXT( VULKAN_HPP_NAMESPACE::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VULKAN_HPP_NAMESPACE::DebugUtilsMessageTypeFlagsEXT messageTypes, const DebugUtilsMessengerCallbackDataEXT & callbackData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkSubmitDebugUtilsMessageEXT( m_instance, static_cast( messageSeverity ), static_cast( messageTypes ), reinterpret_cast( &callbackData ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::acquireXlibDisplayEXT( Display* dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkAcquireXlibDisplayEXT( m_physicalDevice, dpy, static_cast( display ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::acquireXlibDisplayEXT( Display & dpy, VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const + { + Result result = static_cast( d.vkAcquireXlibDisplayEXT( m_physicalDevice, &dpy, static_cast( display ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::acquireXlibDisplayEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDevice( const VULKAN_HPP_NAMESPACE::DeviceCreateInfo* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::Device* pDevice, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDevice *>( pDevice ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::createDevice( const DeviceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Device device; + Result result = static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); + return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDevice" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::createDeviceUnique( const DeviceCreateInfo & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Device device; + Result result = static_cast( d.vkCreateDevice( m_physicalDevice, reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &device ) ) ); + ObjectDestroy deleter( allocator, d ); + return createResultValue( result, device, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDeviceUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const VULKAN_HPP_NAMESPACE::DisplayModeCreateInfoKHR* pCreateInfo, const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocator, VULKAN_HPP_NAMESPACE::DisplayModeKHR* pMode, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast( display ), reinterpret_cast( pCreateInfo ), reinterpret_cast( pAllocator ), reinterpret_cast< VkDisplayModeKHR *>( pMode ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::createDisplayModeKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayModeKHR mode; + Result result = static_cast( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast( display ), reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &mode ) ) ); + return createResultValue( result, mode, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHR" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::createDisplayModeKHRUnique( VULKAN_HPP_NAMESPACE::DisplayKHR display, const DisplayModeCreateInfoKHR & createInfo, Optional allocator, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayModeKHR mode; + Result result = static_cast( d.vkCreateDisplayModeKHR( m_physicalDevice, static_cast( display ), reinterpret_cast( &createInfo ), reinterpret_cast( static_cast( allocator ) ), reinterpret_cast( &mode ) ) ); + ObjectDestroy deleter( *this, allocator, d ); + return createResultValue( result, mode, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::createDisplayModeKHRUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceExtensionProperties( const char* pLayerName, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::ExtensionProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, pLayerName, pPropertyCount, reinterpret_cast< VkExtensionProperties *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional layerName, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceExtensionProperties" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceExtensionProperties( Optional layerName, ExtensionPropertiesAllocator & extensionPropertiesAllocator, Dispatch const & d ) const + { + std::vector properties( extensionPropertiesAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceExtensionProperties( m_physicalDevice, layerName ? layerName->c_str() : nullptr, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceExtensionProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateDeviceLayerProperties( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::LayerProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, pPropertyCount, reinterpret_cast< VkLayerProperties *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceLayerProperties( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceLayerProperties" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateDeviceLayerProperties( LayerPropertiesAllocator & layerPropertiesAllocator, Dispatch const & d ) const + { + std::vector properties( layerPropertiesAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkEnumerateDeviceLayerProperties( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateDeviceLayerProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, uint32_t* pCounterCount, VULKAN_HPP_NAMESPACE::PerformanceCounterKHR* pCounters, VULKAN_HPP_NAMESPACE::PerformanceCounterDescriptionKHR* pCounterDescriptions, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, pCounterCount, reinterpret_cast< VkPerformanceCounterKHR *>( pCounters ), reinterpret_cast< VkPerformanceCounterDescriptionKHR *>( pCounterDescriptions ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy const &counters, Dispatch const &d ) const + { + std::vector counterDescriptions; + uint32_t counterCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, counters.size() , reinterpret_cast( counters.data() ), nullptr ) ); + if ( ( result == Result::eSuccess ) && counterCount ) + { + counterDescriptions.resize( counterCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, counters.size() , reinterpret_cast( counters.data() ), reinterpret_cast( counterDescriptions.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + if ( result == Result::eSuccess ) + { + VULKAN_HPP_ASSERT( counterCount <= counterDescriptions.size() ); + counterDescriptions.resize( counterCount ); + } + return createResultValue( result, counterDescriptions, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + + } + + template ::value, int>::type> + VULKAN_HPP_DEPRECATED( "This function is deprecated. Use one of the other flavours of it.") + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, ArrayProxy const &counters, Allocator const& vectorAllocator, Dispatch const &d ) const + { + std::vector counterDescriptions( vectorAllocator ); + uint32_t counterCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, counters.size() , reinterpret_cast( counters.data() ), nullptr ) ); + if ( ( result == Result::eSuccess ) && counterCount ) + { + counterDescriptions.resize( counterCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, counters.size() , reinterpret_cast( counters.data() ), reinterpret_cast( counterDescriptions.data() ) ) ); + } + } while ( result == Result::eIncomplete ); + if ( result == Result::eSuccess ) + { + VULKAN_HPP_ASSERT( counterCount <= counterDescriptions.size() ); + counterDescriptions.resize( counterCount ); + } + return createResultValue( result, counterDescriptions, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType, std::vector>>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, Dispatch const & d ) const + { + std::pair, std::vector> data; + std::vector & counters = data.first; + std::vector & counterDescriptions = data.second; + uint32_t counterCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, nullptr, nullptr ) ); + if ( ( result == Result::eSuccess ) && counterCount ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast( counters.data() ), reinterpret_cast( counterDescriptions.data() ) ) ); + VULKAN_HPP_ASSERT( counterCount <= counters.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( counterCount < counters.size() ) ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + } + + template ::value && std::is_same::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType, std::vector>>::type PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR( uint32_t queueFamilyIndex, PerformanceCounterKHRAllocator & performanceCounterKHRAllocator, PerformanceCounterDescriptionKHRAllocator & performanceCounterDescriptionKHRAllocator, Dispatch const & d ) const + { + std::pair, std::vector> data( std::piecewise_construct, std::forward_as_tuple( performanceCounterKHRAllocator ), std::forward_as_tuple( performanceCounterDescriptionKHRAllocator ) ); + std::vector & counters = data.first; + std::vector & counterDescriptions = data.second; + uint32_t counterCount; + Result result; + do + { + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, nullptr, nullptr ) ); + if ( ( result == Result::eSuccess ) && counterCount ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + result = static_cast( d.vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( m_physicalDevice, queueFamilyIndex, &counterCount, reinterpret_cast( counters.data() ), reinterpret_cast( counterDescriptions.data() ) ) ); + VULKAN_HPP_ASSERT( counterCount <= counters.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( counterCount < counters.size() ) ) + { + counters.resize( counterCount ); + counterDescriptions.resize( counterCount ); + } + return createResultValue( result, data, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::enumerateQueueFamilyPerformanceQueryCountersKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModeProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), pPropertyCount, reinterpret_cast< VkDisplayModeProperties2KHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModeProperties2KHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModeProperties2KHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModeProperties2KHRAllocator & displayModeProperties2KHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayModeProperties2KHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModeProperties2KHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModeProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayModePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), pPropertyCount, reinterpret_cast< VkDisplayModePropertiesKHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModePropertiesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayModePropertiesKHR( VULKAN_HPP_NAMESPACE::DisplayKHR display, DisplayModePropertiesKHRAllocator & displayModePropertiesKHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayModePropertiesKHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetDisplayModePropertiesKHR( m_physicalDevice, static_cast( display ), &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayModePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilities2KHR( const VULKAN_HPP_NAMESPACE::DisplayPlaneInfo2KHR* pDisplayPlaneInfo, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR* pCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast( pDisplayPlaneInfo ), reinterpret_cast< VkDisplayPlaneCapabilities2KHR *>( pCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getDisplayPlaneCapabilities2KHR( const DisplayPlaneInfo2KHR & displayPlaneInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilities2KHR capabilities; + Result result = static_cast( d.vkGetDisplayPlaneCapabilities2KHR( m_physicalDevice, reinterpret_cast( &displayPlaneInfo ), reinterpret_cast( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilities2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR* pCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast( mode ), planeIndex, reinterpret_cast< VkDisplayPlaneCapabilitiesKHR *>( pCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getDisplayPlaneCapabilitiesKHR( VULKAN_HPP_NAMESPACE::DisplayModeKHR mode, uint32_t planeIndex, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayPlaneCapabilitiesKHR capabilities; + Result result = static_cast( d.vkGetDisplayPlaneCapabilitiesKHR( m_physicalDevice, static_cast( mode ), planeIndex, reinterpret_cast( &capabilities ) ) ); + return createResultValue( result, capabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getDisplayPlaneCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, uint32_t* pDisplayCount, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplays, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, pDisplayCount, reinterpret_cast< VkDisplayKHR *>( pDisplays ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, Dispatch const & d ) const + { + std::vector displays; + uint32_t displayCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && displayCount ) + { + displays.resize( displayCount ); + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast( displays.data() ) ) ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( displayCount < displays.size() ) ) + { + displays.resize( displayCount ); + } + return createResultValue( result, displays, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR( uint32_t planeIndex, DisplayKHRAllocator & displayKHRAllocator, Dispatch const & d ) const + { + std::vector displays( displayKHRAllocator ); + uint32_t displayCount; + Result result; + do + { + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && displayCount ) + { + displays.resize( displayCount ); + result = static_cast( d.vkGetDisplayPlaneSupportedDisplaysKHR( m_physicalDevice, planeIndex, &displayCount, reinterpret_cast( displays.data() ) ) ); + VULKAN_HPP_ASSERT( displayCount <= displays.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( displayCount < displays.size() ) ) + { + displays.resize( displayCount ); + } + return createResultValue( result, displays, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneSupportedDisplaysKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCalibrateableTimeDomainsEXT( uint32_t* pTimeDomainCount, VULKAN_HPP_NAMESPACE::TimeDomainEXT* pTimeDomains, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, pTimeDomainCount, reinterpret_cast< VkTimeDomainEXT *>( pTimeDomains ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getCalibrateableTimeDomainsEXT( Dispatch const & d ) const + { + std::vector timeDomains; + uint32_t timeDomainCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && timeDomainCount ) + { + timeDomains.resize( timeDomainCount ); + result = static_cast( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); + VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( timeDomainCount < timeDomains.size() ) ) + { + timeDomains.resize( timeDomainCount ); + } + return createResultValue( result, timeDomains, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getCalibrateableTimeDomainsEXT( TimeDomainEXTAllocator & timeDomainEXTAllocator, Dispatch const & d ) const + { + std::vector timeDomains( timeDomainEXTAllocator ); + uint32_t timeDomainCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && timeDomainCount ) + { + timeDomains.resize( timeDomainCount ); + result = static_cast( d.vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( m_physicalDevice, &timeDomainCount, reinterpret_cast( timeDomains.data() ) ) ); + VULKAN_HPP_ASSERT( timeDomainCount <= timeDomains.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( timeDomainCount < timeDomains.size() ) ) + { + timeDomains.resize( timeDomainCount ); + } + return createResultValue( result, timeDomains, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCalibrateableTimeDomainsEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getCooperativeMatrixPropertiesNV( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::CooperativeMatrixPropertiesNV* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, pPropertyCount, reinterpret_cast< VkCooperativeMatrixPropertiesNV *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getCooperativeMatrixPropertiesNV( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getCooperativeMatrixPropertiesNV( CooperativeMatrixPropertiesNVAllocator & cooperativeMatrixPropertiesNVAllocator, Dispatch const & d ) const + { + std::vector properties( cooperativeMatrixPropertiesNVAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getCooperativeMatrixPropertiesNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB* dfb, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceDirectFBPresentationSupportEXT( m_physicalDevice, queueFamilyIndex, dfb ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getDirectFBPresentationSupportEXT( uint32_t queueFamilyIndex, IDirectFB & dfb, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetPhysicalDeviceDirectFBPresentationSupportEXT( m_physicalDevice, queueFamilyIndex, &dfb ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlaneProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlaneProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPlaneProperties2KHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneProperties2KHR( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlaneProperties2KHR( DisplayPlaneProperties2KHRAllocator & displayPlaneProperties2KHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayPlaneProperties2KHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlaneProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlaneProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPlanePropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPlanePropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPlanePropertiesKHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlanePropertiesKHR( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPlanePropertiesKHR( DisplayPlanePropertiesKHRAllocator & displayPlanePropertiesKHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayPlanePropertiesKHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPlanePropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPlanePropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayProperties2KHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayProperties2KHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayProperties2KHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayProperties2KHR( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayProperties2KHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayProperties2KHR( DisplayProperties2KHRAllocator & displayProperties2KHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayProperties2KHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayProperties2KHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getDisplayPropertiesKHR( uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::DisplayPropertiesKHR* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, pPropertyCount, reinterpret_cast< VkDisplayPropertiesKHR *>( pProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPropertiesKHR( Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPropertiesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getDisplayPropertiesKHR( DisplayPropertiesKHRAllocator & displayPropertiesKHRAllocator, Dispatch const & d ) const + { + std::vector properties( displayPropertiesKHRAllocator ); + uint32_t propertyCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && propertyCount ) + { + properties.resize( propertyCount ); + result = static_cast( d.vkGetPhysicalDeviceDisplayPropertiesKHR( m_physicalDevice, &propertyCount, reinterpret_cast( properties.data() ) ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( propertyCount < properties.size() ) ) + { + properties.resize( propertyCount ); + } + return createResultValue( result, properties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getDisplayPropertiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast( pExternalBufferInfo ), reinterpret_cast< VkExternalBufferProperties *>( pExternalBufferProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferProperties( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalBufferProperties externalBufferProperties; + d.vkGetPhysicalDeviceExternalBufferProperties( m_physicalDevice, reinterpret_cast( &externalBufferInfo ), reinterpret_cast( &externalBufferProperties ) ); + return externalBufferProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalBufferPropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VULKAN_HPP_NAMESPACE::ExternalBufferProperties* pExternalBufferProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalBufferInfo ), reinterpret_cast< VkExternalBufferProperties *>( pExternalBufferProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalBufferProperties PhysicalDevice::getExternalBufferPropertiesKHR( const PhysicalDeviceExternalBufferInfo & externalBufferInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalBufferProperties externalBufferProperties; + d.vkGetPhysicalDeviceExternalBufferPropertiesKHR( m_physicalDevice, reinterpret_cast( &externalBufferInfo ), reinterpret_cast( &externalBufferProperties ) ); + return externalBufferProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFenceProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast( pExternalFenceInfo ), reinterpret_cast< VkExternalFenceProperties *>( pExternalFenceProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFenceProperties( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalFenceProperties externalFenceProperties; + d.vkGetPhysicalDeviceExternalFenceProperties( m_physicalDevice, reinterpret_cast( &externalFenceInfo ), reinterpret_cast( &externalFenceProperties ) ); + return externalFenceProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalFencePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VULKAN_HPP_NAMESPACE::ExternalFenceProperties* pExternalFenceProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalFenceInfo ), reinterpret_cast< VkExternalFenceProperties *>( pExternalFenceProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalFenceProperties PhysicalDevice::getExternalFencePropertiesKHR( const PhysicalDeviceExternalFenceInfo & externalFenceInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalFenceProperties externalFenceProperties; + d.vkGetPhysicalDeviceExternalFencePropertiesKHR( m_physicalDevice, reinterpret_cast( &externalFenceInfo ), reinterpret_cast( &externalFenceProperties ) ); + return externalFenceProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV* pExternalImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast< VkExternalImageFormatPropertiesNV *>( pExternalImageFormatProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getExternalImageFormatPropertiesNV( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsNV externalHandleType, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ExternalImageFormatPropertiesNV externalImageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceExternalImageFormatPropertiesNV( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), static_cast( externalHandleType ), reinterpret_cast( &externalImageFormatProperties ) ) ); + return createResultValue( result, externalImageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getExternalImageFormatPropertiesNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphoreProperties( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast( pExternalSemaphoreInfo ), reinterpret_cast< VkExternalSemaphoreProperties *>( pExternalSemaphoreProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphoreProperties( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties externalSemaphoreProperties; + d.vkGetPhysicalDeviceExternalSemaphoreProperties( m_physicalDevice, reinterpret_cast( &externalSemaphoreInfo ), reinterpret_cast( &externalSemaphoreProperties ) ); + return externalSemaphoreProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getExternalSemaphorePropertiesKHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties* pExternalSemaphoreProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast( pExternalSemaphoreInfo ), reinterpret_cast< VkExternalSemaphoreProperties *>( pExternalSemaphoreProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties PhysicalDevice::getExternalSemaphorePropertiesKHR( const PhysicalDeviceExternalSemaphoreInfo & externalSemaphoreInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::ExternalSemaphoreProperties externalSemaphoreProperties; + d.vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( m_physicalDevice, reinterpret_cast( &externalSemaphoreInfo ), reinterpret_cast( &externalSemaphoreProperties ) ); + return externalSemaphoreProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures *>( pFeatures ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures PhysicalDevice::getFeatures( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures features; + d.vkGetPhysicalDeviceFeatures( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures2 *>( pFeatures ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 features; + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFeatures2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 & features = structureChain.template get(); + d.vkGetPhysicalDeviceFeatures2( m_physicalDevice, reinterpret_cast( &features ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFeatures2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2* pFeatures, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceFeatures2 *>( pFeatures ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 PhysicalDevice::getFeatures2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 features; + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast( &features ) ); + return features; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFeatures2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceFeatures2 & features = structureChain.template get(); + d.vkGetPhysicalDeviceFeatures2KHR( m_physicalDevice, reinterpret_cast( &features ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast( format ), reinterpret_cast< VkFormatProperties *>( pFormatProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties PhysicalDevice::getFormatProperties( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::FormatProperties formatProperties; + d.vkGetPhysicalDeviceFormatProperties( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast( format ), reinterpret_cast< VkFormatProperties2 *>( pFormatProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::FormatProperties2 formatProperties; + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFormatProperties2( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::FormatProperties2 & formatProperties = structureChain.template get(); + d.vkGetPhysicalDeviceFormatProperties2( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::FormatProperties2* pFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast( format ), reinterpret_cast< VkFormatProperties2 *>( pFormatProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::FormatProperties2 PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::FormatProperties2 formatProperties; + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return formatProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getFormatProperties2KHR( VULKAN_HPP_NAMESPACE::Format format, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::FormatProperties2 & formatProperties = structureChain.template get(); + d.vkGetPhysicalDeviceFormatProperties2KHR( m_physicalDevice, static_cast( format ), reinterpret_cast( &formatProperties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getFragmentShadingRatesKHR( uint32_t* pFragmentShadingRateCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, pFragmentShadingRateCount, reinterpret_cast< VkPhysicalDeviceFragmentShadingRateKHR *>( pFragmentShadingRates ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getFragmentShadingRatesKHR( Dispatch const & d ) const + { + std::vector fragmentShadingRates; + uint32_t fragmentShadingRateCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && fragmentShadingRateCount ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + result = static_cast( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, reinterpret_cast( fragmentShadingRates.data() ) ) ); + VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( fragmentShadingRateCount < fragmentShadingRates.size() ) ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + } + return createResultValue( result, fragmentShadingRates, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getFragmentShadingRatesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getFragmentShadingRatesKHR( PhysicalDeviceFragmentShadingRateKHRAllocator & physicalDeviceFragmentShadingRateKHRAllocator, Dispatch const & d ) const + { + std::vector fragmentShadingRates( physicalDeviceFragmentShadingRateKHRAllocator ); + uint32_t fragmentShadingRateCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && fragmentShadingRateCount ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + result = static_cast( d.vkGetPhysicalDeviceFragmentShadingRatesKHR( m_physicalDevice, &fragmentShadingRateCount, reinterpret_cast( fragmentShadingRates.data() ) ) ); + VULKAN_HPP_ASSERT( fragmentShadingRateCount <= fragmentShadingRates.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( fragmentShadingRateCount < fragmentShadingRates.size() ) ) + { + fragmentShadingRates.resize( fragmentShadingRateCount ); + } + return createResultValue( result, fragmentShadingRates, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getFragmentShadingRatesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, VULKAN_HPP_NAMESPACE::ImageFormatProperties* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), reinterpret_cast< VkImageFormatProperties *>( pImageFormatProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::ImageTiling tiling, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageCreateFlags flags, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageFormatProperties imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( tiling ), static_cast( usage ), static_cast( flags ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( pImageFormatInfo ), reinterpret_cast< VkImageFormatProperties2 *>( pImageFormatProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getImageFormatProperties2( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 & imageFormatProperties = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceImageFormatInfo2* pImageFormatInfo, VULKAN_HPP_NAMESPACE::ImageFormatProperties2* pImageFormatProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( pImageFormatInfo ), reinterpret_cast< VkImageFormatProperties2 *>( pImageFormatProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 imageFormatProperties; + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, imageFormatProperties, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getImageFormatProperties2KHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getImageFormatProperties2KHR( const PhysicalDeviceImageFormatInfo2 & imageFormatInfo, Dispatch const & d ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::ImageFormatProperties2 & imageFormatProperties = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &imageFormatInfo ), reinterpret_cast( &imageFormatProperties ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getImageFormatProperties2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties *>( pMemoryProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties PhysicalDevice::getMemoryProperties( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties2 *>( pMemoryProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getMemoryProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 & memoryProperties = structureChain.template get(); + d.vkGetPhysicalDeviceMemoryProperties2( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMemoryProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2* pMemoryProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceMemoryProperties2 *>( pMemoryProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 PhysicalDevice::getMemoryProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 memoryProperties; + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return memoryProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getMemoryProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties2 & memoryProperties = structureChain.template get(); + d.vkGetPhysicalDeviceMemoryProperties2KHR( m_physicalDevice, reinterpret_cast( &memoryProperties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT* pMultisampleProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast( samples ), reinterpret_cast< VkMultisamplePropertiesEXT *>( pMultisampleProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT PhysicalDevice::getMultisamplePropertiesEXT( VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::MultisamplePropertiesEXT multisampleProperties; + d.vkGetPhysicalDeviceMultisamplePropertiesEXT( m_physicalDevice, static_cast( samples ), reinterpret_cast( &multisampleProperties ) ); + return multisampleProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pRectCount, VULKAN_HPP_NAMESPACE::Rect2D* pRects, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), pRectCount, reinterpret_cast< VkRect2D *>( pRects ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + std::vector rects; + uint32_t rectCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && rectCount ) + { + rects.resize( rectCount ); + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, reinterpret_cast( rects.data() ) ) ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( rectCount < rects.size() ) ) + { + rects.resize( rectCount ); + } + return createResultValue( result, rects, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getPresentRectanglesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getPresentRectanglesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Rect2DAllocator & rect2DAllocator, Dispatch const & d ) const + { + std::vector rects( rect2DAllocator ); + uint32_t rectCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && rectCount ) + { + rects.resize( rectCount ); + result = static_cast( d.vkGetPhysicalDevicePresentRectanglesKHR( m_physicalDevice, static_cast( surface ), &rectCount, reinterpret_cast( rects.data() ) ) ); + VULKAN_HPP_ASSERT( rectCount <= rects.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( rectCount < rects.size() ) ) + { + rects.resize( rectCount ); + } + return createResultValue( result, rects, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getPresentRectanglesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties PhysicalDevice::getProperties( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties properties; + d.vkGetPhysicalDeviceProperties( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties2 *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties; + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getProperties2( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 & properties = structureChain.template get(); + d.vkGetPhysicalDeviceProperties2( m_physicalDevice, reinterpret_cast( &properties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getProperties2KHR( VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast< VkPhysicalDeviceProperties2 *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 PhysicalDevice::getProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 properties; + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast( &properties ) ); + return properties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE StructureChain PhysicalDevice::getProperties2KHR( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties2 & properties = structureChain.template get(); + d.vkGetPhysicalDeviceProperties2KHR( m_physicalDevice, reinterpret_cast( &properties ) ); + return structureChain; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const VULKAN_HPP_NAMESPACE::QueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast( pPerformanceQueryCreateInfo ), pNumPasses ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE uint32_t PhysicalDevice::getQueueFamilyPerformanceQueryPassesKHR( const QueryPoolPerformanceCreateInfoKHR & performanceQueryCreateInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + uint32_t numPasses; + d.vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( m_physicalDevice, reinterpret_cast( &performanceQueryCreateInfo ), &numPasses ); + return numPasses; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties *>( pQueueFamilyProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties( Dispatch const & d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties( QueueFamilyPropertiesAllocator & queueFamilyPropertiesAllocator, Dispatch const & d ) const + { + std::vector queueFamilyProperties( queueFamilyPropertiesAllocator ); + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties2 *>( pQueueFamilyProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2( Dispatch const & d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d ) const + { + std::vector queueFamilyProperties( queueFamilyProperties2Allocator ); + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2( Dispatch const & d ) const + { + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + std::vector returnVector( queueFamilyPropertyCount ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + queueFamilyProperties[i].pNext = + returnVector[i].template get().pNext; + } + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + returnVector[i].template get() = queueFamilyProperties[i]; + } + return returnVector; + } + + template ::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2( StructureChainAllocator & structureChainAllocator, Dispatch const & d ) const + { + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + std::vector returnVector( queueFamilyPropertyCount, structureChainAllocator ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + queueFamilyProperties[i].pNext = + returnVector[i].template get().pNext; + } + d.vkGetPhysicalDeviceQueueFamilyProperties2( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + returnVector[i].template get() = queueFamilyProperties[i]; + } + return returnVector; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getQueueFamilyProperties2KHR( uint32_t* pQueueFamilyPropertyCount, VULKAN_HPP_NAMESPACE::QueueFamilyProperties2* pQueueFamilyProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, pQueueFamilyPropertyCount, reinterpret_cast< VkQueueFamilyProperties2 *>( pQueueFamilyProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2KHR( Dispatch const & d ) const + { + std::vector queueFamilyProperties; + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2KHR( QueueFamilyProperties2Allocator & queueFamilyProperties2Allocator, Dispatch const & d ) const + { + std::vector queueFamilyProperties( queueFamilyProperties2Allocator ); + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + queueFamilyProperties.resize( queueFamilyPropertyCount ); + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + return queueFamilyProperties; + } + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2KHR( Dispatch const & d ) const + { + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + std::vector returnVector( queueFamilyPropertyCount ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + queueFamilyProperties[i].pNext = + returnVector[i].template get().pNext; + } + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + returnVector[i].template get() = queueFamilyProperties[i]; + } + return returnVector; + } + + template ::value, int>::type> + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getQueueFamilyProperties2KHR( StructureChainAllocator & structureChainAllocator, Dispatch const & d ) const + { + uint32_t queueFamilyPropertyCount; + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, nullptr ); + std::vector returnVector( queueFamilyPropertyCount, structureChainAllocator ); + std::vector queueFamilyProperties( queueFamilyPropertyCount ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + queueFamilyProperties[i].pNext = + returnVector[i].template get().pNext; + } + d.vkGetPhysicalDeviceQueueFamilyProperties2KHR( m_physicalDevice, &queueFamilyPropertyCount, reinterpret_cast( queueFamilyProperties.data() ) ); + VULKAN_HPP_ASSERT( queueFamilyPropertyCount <= queueFamilyProperties.size() ); + for ( uint32_t i = 0; i < queueFamilyPropertyCount; i++ ) + { + returnVector[i].template get() = queueFamilyProperties[i]; + } + return returnVector; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties( VULKAN_HPP_NAMESPACE::Format format, VULKAN_HPP_NAMESPACE::ImageType type, VULKAN_HPP_NAMESPACE::SampleCountFlagBits samples, VULKAN_HPP_NAMESPACE::ImageUsageFlags usage, VULKAN_HPP_NAMESPACE::ImageTiling tiling, SparseImageFormatPropertiesAllocator & sparseImageFormatPropertiesAllocator, Dispatch const & d ) const + { + std::vector properties( sparseImageFormatPropertiesAllocator ); + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties( m_physicalDevice, static_cast( format ), static_cast( type ), static_cast( samples ), static_cast( usage ), static_cast( tiling ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( pFormatInfo ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties2 *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d ) const + { + std::vector properties( sparseImageFormatProperties2Allocator ); + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + template + VULKAN_HPP_INLINE void PhysicalDevice::getSparseImageFormatProperties2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VULKAN_HPP_NAMESPACE::SparseImageFormatProperties2* pProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( pFormatInfo ), pPropertyCount, reinterpret_cast< VkSparseImageFormatProperties2 *>( pProperties ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, Dispatch const & d ) const + { + std::vector properties; + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector PhysicalDevice::getSparseImageFormatProperties2KHR( const PhysicalDeviceSparseImageFormatInfo2 & formatInfo, SparseImageFormatProperties2Allocator & sparseImageFormatProperties2Allocator, Dispatch const & d ) const + { + std::vector properties( sparseImageFormatProperties2Allocator ); + uint32_t propertyCount; + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, nullptr ); + properties.resize( propertyCount ); + d.vkGetPhysicalDeviceSparseImageFormatProperties2KHR( m_physicalDevice, reinterpret_cast( &formatInfo ), &propertyCount, reinterpret_cast( properties.data() ) ); + VULKAN_HPP_ASSERT( propertyCount <= properties.size() ); + return properties; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( uint32_t* pCombinationCount, VULKAN_HPP_NAMESPACE::FramebufferMixedSamplesCombinationNV* pCombinations, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, pCombinationCount, reinterpret_cast< VkFramebufferMixedSamplesCombinationNV *>( pCombinations ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( Dispatch const & d ) const + { + std::vector combinations; + uint32_t combinationCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && combinationCount ) + { + combinations.resize( combinationCount ); + result = static_cast( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast( combinations.data() ) ) ); + VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( combinationCount < combinations.size() ) ) + { + combinations.resize( combinationCount ); + } + return createResultValue( result, combinations, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV( FramebufferMixedSamplesCombinationNVAllocator & framebufferMixedSamplesCombinationNVAllocator, Dispatch const & d ) const + { + std::vector combinations( framebufferMixedSamplesCombinationNVAllocator ); + uint32_t combinationCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && combinationCount ) + { + combinations.resize( combinationCount ); + result = static_cast( d.vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( m_physicalDevice, &combinationCount, reinterpret_cast( combinations.data() ) ) ); + VULKAN_HPP_ASSERT( combinationCount <= combinations.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( combinationCount < combinations.size() ) ) + { + combinations.resize( combinationCount ); + } + return createResultValue( result, combinations, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSupportedFramebufferMixedSamplesCombinationsNV" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast( surface ), reinterpret_cast< VkSurfaceCapabilities2EXT *>( pSurfaceCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getSurfaceCapabilities2EXT( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2EXT surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2EXT( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2EXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilities2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( pSurfaceInfo ), reinterpret_cast< VkSurfaceCapabilities2KHR *>( pSurfaceCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } + + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceCapabilities2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const + { + StructureChain structureChain; + VULKAN_HPP_NAMESPACE::SurfaceCapabilities2KHR & surfaceCapabilities = structureChain.template get(); + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilities2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, structureChain, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceCapabilities2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR* pSurfaceCapabilities, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast( surface ), reinterpret_cast< VkSurfaceCapabilitiesKHR *>( pSurfaceCapabilities ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getSurfaceCapabilitiesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::SurfaceCapabilitiesKHR surfaceCapabilities; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceCapabilitiesKHR( m_physicalDevice, static_cast( surface ), reinterpret_cast( &surfaceCapabilities ) ) ); + return createResultValue( result, surfaceCapabilities, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceCapabilitiesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormats2KHR( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormat2KHR* pSurfaceFormats, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( pSurfaceInfo ), pSurfaceFormatCount, reinterpret_cast< VkSurfaceFormat2KHR *>( pSurfaceFormats ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormats2KHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormats2KHR( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, SurfaceFormat2KHRAllocator & surfaceFormat2KHRAllocator, Dispatch const & d ) const + { + std::vector surfaceFormats( surfaceFormat2KHRAllocator ); + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormats2KHR( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormats2KHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pSurfaceFormatCount, VULKAN_HPP_NAMESPACE::SurfaceFormatKHR* pSurfaceFormats, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), pSurfaceFormatCount, reinterpret_cast< VkSurfaceFormatKHR *>( pSurfaceFormats ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + std::vector surfaceFormats; + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormatsKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfaceFormatsKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, SurfaceFormatKHRAllocator & surfaceFormatKHRAllocator, Dispatch const & d ) const + { + std::vector surfaceFormats( surfaceFormatKHRAllocator ); + uint32_t surfaceFormatCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && surfaceFormatCount ) + { + surfaceFormats.resize( surfaceFormatCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfaceFormatsKHR( m_physicalDevice, static_cast( surface ), &surfaceFormatCount, reinterpret_cast( surfaceFormats.data() ) ) ); + VULKAN_HPP_ASSERT( surfaceFormatCount <= surfaceFormats.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( surfaceFormatCount < surfaceFormats.size() ) ) + { + surfaceFormats.resize( surfaceFormatCount ); + } + return createResultValue( result, surfaceFormats, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfaceFormatsKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModes2EXT( const VULKAN_HPP_NAMESPACE::PhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast( pSurfaceInfo ), pPresentModeCount, reinterpret_cast< VkPresentModeKHR *>( pPresentModes ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, Dispatch const & d ) const + { + std::vector presentModes; + uint32_t presentModeCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &presentModeCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModes2EXT" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfacePresentModes2EXT( const PhysicalDeviceSurfaceInfo2KHR & surfaceInfo, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d ) const + { + std::vector presentModes( presentModeKHRAllocator ); + uint32_t presentModeCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &presentModeCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModes2EXT( m_physicalDevice, reinterpret_cast( &surfaceInfo ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModes2EXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, uint32_t* pPresentModeCount, VULKAN_HPP_NAMESPACE::PresentModeKHR* pPresentModes, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), pPresentModeCount, reinterpret_cast< VkPresentModeKHR *>( pPresentModes ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + std::vector presentModes; + uint32_t presentModeCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModesKHR" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getSurfacePresentModesKHR( VULKAN_HPP_NAMESPACE::SurfaceKHR surface, PresentModeKHRAllocator & presentModeKHRAllocator, Dispatch const & d ) const + { + std::vector presentModes( presentModeKHRAllocator ); + uint32_t presentModeCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && presentModeCount ) + { + presentModes.resize( presentModeCount ); + result = static_cast( d.vkGetPhysicalDeviceSurfacePresentModesKHR( m_physicalDevice, static_cast( surface ), &presentModeCount, reinterpret_cast( presentModes.data() ) ) ); + VULKAN_HPP_ASSERT( presentModeCount <= presentModes.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( presentModeCount < presentModes.size() ) ) + { + presentModes.resize( presentModeCount ); + } + return createResultValue( result, presentModes, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getSurfacePresentModesKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, VULKAN_HPP_NAMESPACE::Bool32* pSupported, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast( surface ), reinterpret_cast< VkBool32 *>( pSupported ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getSurfaceSupportKHR( uint32_t queueFamilyIndex, VULKAN_HPP_NAMESPACE::SurfaceKHR surface, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::Bool32 supported; + Result result = static_cast( d.vkGetPhysicalDeviceSurfaceSupportKHR( m_physicalDevice, queueFamilyIndex, static_cast( surface ), reinterpret_cast( &supported ) ) ); + return createResultValue( result, supported, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getSurfaceSupportKHR" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getToolPropertiesEXT( uint32_t* pToolCount, VULKAN_HPP_NAMESPACE::PhysicalDeviceToolPropertiesEXT* pToolProperties, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, pToolCount, reinterpret_cast< VkPhysicalDeviceToolPropertiesEXT *>( pToolProperties ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getToolPropertiesEXT( Dispatch const & d ) const + { + std::vector toolProperties; + uint32_t toolCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && toolCount ) + { + toolProperties.resize( toolCount ); + result = static_cast( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); + VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( toolCount < toolProperties.size() ) ) + { + toolProperties.resize( toolCount ); + } + return createResultValue( result, toolProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getToolPropertiesEXT" ); + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getToolPropertiesEXT( PhysicalDeviceToolPropertiesEXTAllocator & physicalDeviceToolPropertiesEXTAllocator, Dispatch const & d ) const + { + std::vector toolProperties( physicalDeviceToolPropertiesEXTAllocator ); + uint32_t toolCount; + Result result; + do + { + result = static_cast( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, nullptr ) ); + if ( ( result == Result::eSuccess ) && toolCount ) + { + toolProperties.resize( toolCount ); + result = static_cast( d.vkGetPhysicalDeviceToolPropertiesEXT( m_physicalDevice, &toolCount, reinterpret_cast( toolProperties.data() ) ) ); + VULKAN_HPP_ASSERT( toolCount <= toolProperties.size() ); + } + } while ( result == Result::eIncomplete ); + if ( ( result == Result::eSuccess ) && ( toolCount < toolProperties.size() ) ) + { + toolProperties.resize( toolCount ); + } + return createResultValue( result, toolProperties, VULKAN_HPP_NAMESPACE_STRING"::PhysicalDevice::getToolPropertiesEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display* display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, display ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWaylandPresentationSupportKHR( uint32_t queueFamilyIndex, struct wl_display & display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetPhysicalDeviceWaylandPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &display ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ + + +#ifdef VK_USE_PLATFORM_WIN32_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getWin32PresentationSupportKHR( uint32_t queueFamilyIndex, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceWin32PresentationSupportKHR( m_physicalDevice, queueFamilyIndex ) ); + } +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + + +#ifdef VK_USE_PLATFORM_XCB_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, connection, visual_id ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXcbPresentationSupportKHR( uint32_t queueFamilyIndex, xcb_connection_t & connection, xcb_visualid_t visual_id, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetPhysicalDeviceXcbPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &connection, visual_id ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XCB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_KHR + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display* dpy, VisualID visualID, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, dpy, visualID ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Bool32 PhysicalDevice::getXlibPresentationSupportKHR( uint32_t queueFamilyIndex, Display & dpy, VisualID visualID, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return d.vkGetPhysicalDeviceXlibPresentationSupportKHR( m_physicalDevice, queueFamilyIndex, &dpy, visualID ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result PhysicalDevice::getRandROutputDisplayEXT( Display* dpy, RROutput rrOutput, VULKAN_HPP_NAMESPACE::DisplayKHR* pDisplay, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, dpy, rrOutput, reinterpret_cast< VkDisplayKHR *>( pDisplay ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::getRandROutputDisplayEXT( Display & dpy, RROutput rrOutput, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayKHR display; + Result result = static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast( &display ) ) ); + return createResultValue( result, display, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXT" ); + } + +# ifndef VULKAN_HPP_NO_SMART_HANDLE + template + VULKAN_HPP_INLINE typename ResultValueType>::type PhysicalDevice::getRandROutputDisplayEXTUnique( Display & dpy, RROutput rrOutput, Dispatch const & d ) const + { + VULKAN_HPP_NAMESPACE::DisplayKHR display; + Result result = static_cast( d.vkGetRandROutputDisplayEXT( m_physicalDevice, &dpy, rrOutput, reinterpret_cast( &display ) ) ); + ObjectRelease deleter( *this, d ); + return createResultValue( result, display, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::getRandROutputDisplayEXTUnique", deleter ); + } +# endif /*VULKAN_HPP_NO_SMART_HANDLE*/ +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE Result PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast( display ) ) ); + } +#else + template + VULKAN_HPP_INLINE typename ResultValueType::type PhysicalDevice::releaseDisplayEXT( VULKAN_HPP_NAMESPACE::DisplayKHR display, Dispatch const & d ) const + { + Result result = static_cast( d.vkReleaseDisplayEXT( m_physicalDevice, static_cast( display ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::PhysicalDevice::releaseDisplayEXT" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_INLINE void Queue::getCheckpointDataNV( uint32_t* pCheckpointDataCount, VULKAN_HPP_NAMESPACE::CheckpointDataNV* pCheckpointData, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkGetQueueCheckpointDataNV( m_queue, pCheckpointDataCount, reinterpret_cast< VkCheckpointDataNV *>( pCheckpointData ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Queue::getCheckpointDataNV( Dispatch const & d ) const + { + std::vector checkpointData; + uint32_t checkpointDataCount; + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, nullptr ); + checkpointData.resize( checkpointDataCount ); + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); + return checkpointData; + } + + template ::value, int>::type > + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE std::vector Queue::getCheckpointDataNV( CheckpointDataNVAllocator & checkpointDataNVAllocator, Dispatch const & d ) const + { + std::vector checkpointData( checkpointDataNVAllocator ); + uint32_t checkpointDataCount; + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, nullptr ); + checkpointData.resize( checkpointDataCount ); + d.vkGetQueueCheckpointDataNV( m_queue, &checkpointDataCount, reinterpret_cast( checkpointData.data() ) ); + VULKAN_HPP_ASSERT( checkpointDataCount <= checkpointData.size() ); + return checkpointData; + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast( pLabelInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Queue::beginDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkQueueBeginDebugUtilsLabelEXT( m_queue, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::bindSparse( uint32_t bindInfoCount, const VULKAN_HPP_NAMESPACE::BindSparseInfo* pBindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkQueueBindSparse( m_queue, bindInfoCount, reinterpret_cast( pBindInfo ), static_cast( fence ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Queue::bindSparse( ArrayProxy const & bindInfo, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const + { + Result result = static_cast( d.vkQueueBindSparse( m_queue, bindInfo.size(), reinterpret_cast( bindInfo.data() ), static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::bindSparse" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_INLINE void Queue::endDebugUtilsLabelEXT( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkQueueEndDebugUtilsLabelEXT( m_queue ); + } + + + template + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const VULKAN_HPP_NAMESPACE::DebugUtilsLabelEXT* pLabelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast( pLabelInfo ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_INLINE void Queue::insertDebugUtilsLabelEXT( const DebugUtilsLabelEXT & labelInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + d.vkQueueInsertDebugUtilsLabelEXT( m_queue, reinterpret_cast( &labelInfo ) ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const VULKAN_HPP_NAMESPACE::PresentInfoKHR* pPresentInfo, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkQueuePresentKHR( m_queue, reinterpret_cast( pPresentInfo ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::presentKHR( const PresentInfoKHR & presentInfo, Dispatch const & d ) const + { + Result result = static_cast( d.vkQueuePresentKHR( m_queue, reinterpret_cast( &presentInfo ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::presentKHR", { VULKAN_HPP_NAMESPACE::Result::eSuccess, VULKAN_HPP_NAMESPACE::Result::eSuboptimalKHR } ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkQueueSetPerformanceConfigurationINTEL( m_queue, static_cast( configuration ) ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Queue::setPerformanceConfigurationINTEL( VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL configuration, Dispatch const & d ) const + { + Result result = static_cast( d.vkQueueSetPerformanceConfigurationINTEL( m_queue, static_cast( configuration ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::setPerformanceConfigurationINTEL" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + + + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::submit( uint32_t submitCount, const VULKAN_HPP_NAMESPACE::SubmitInfo* pSubmits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkQueueSubmit( m_queue, submitCount, reinterpret_cast( pSubmits ), static_cast( fence ) ) ); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Queue::submit( ArrayProxy const & submits, VULKAN_HPP_NAMESPACE::Fence fence, Dispatch const & d ) const + { + Result result = static_cast( d.vkQueueSubmit( m_queue, submits.size(), reinterpret_cast( submits.data() ), static_cast( fence ) ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::submit" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + + +#ifdef VULKAN_HPP_DISABLE_ENHANCED_MODE + template + VULKAN_HPP_NODISCARD VULKAN_HPP_INLINE Result Queue::waitIdle( Dispatch const & d ) const VULKAN_HPP_NOEXCEPT + { + return static_cast( d.vkQueueWaitIdle( m_queue ) ); + } +#else + template + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_INLINE typename ResultValueType::type Queue::waitIdle( Dispatch const & d ) const + { + Result result = static_cast( d.vkQueueWaitIdle( m_queue ) ); + return createResultValue( result, VULKAN_HPP_NAMESPACE_STRING "::Queue::waitIdle" ); + } +#endif /*VULKAN_HPP_DISABLE_ENHANCED_MODE*/ + +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_ANDROID_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_ENABLE_BETA_EXTENSIONS + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ +#ifdef VK_ENABLE_BETA_EXTENSIONS + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_ENABLE_BETA_EXTENSIONS*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_GGP + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_GGP*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + template <> struct StructExtends{ enum { value = true }; }; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + template <> struct StructExtends{ enum { value = true }; }; + +#if VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL + class DynamicLoader + { + public: +# ifdef VULKAN_HPP_NO_EXCEPTIONS + DynamicLoader( std::string const & vulkanLibraryName = {} ) VULKAN_HPP_NOEXCEPT +# else + DynamicLoader( std::string const & vulkanLibraryName = {} ) +# endif + { + if ( !vulkanLibraryName.empty() ) + { +# if defined( __linux__ ) || defined( __APPLE__ ) + m_library = dlopen( vulkanLibraryName.c_str(), RTLD_NOW | RTLD_LOCAL ); +# elif defined( _WIN32 ) + m_library = ::LoadLibraryA( vulkanLibraryName.c_str() ); +# else +# error unsupported platform +# endif + } + else + { +# if defined( __linux__ ) + m_library = dlopen( "libvulkan.so", RTLD_NOW | RTLD_LOCAL ); + if ( m_library == nullptr ) + { + m_library = dlopen( "libvulkan.so.1", RTLD_NOW | RTLD_LOCAL ); + } +# elif defined( __APPLE__ ) + m_library = dlopen( "libvulkan.dylib", RTLD_NOW | RTLD_LOCAL ); +# elif defined( _WIN32 ) + m_library = ::LoadLibraryA( "vulkan-1.dll" ); +# else +# error unsupported platform +# endif + } + +#ifndef VULKAN_HPP_NO_EXCEPTIONS + if ( m_library == nullptr ) + { + // NOTE there should be an InitializationFailedError, but msvc insists on the symbol does not exist within the scope of this function. + throw std::runtime_error( "Failed to load vulkan library!" ); + } +#endif + } + + DynamicLoader( DynamicLoader const& ) = delete; + + DynamicLoader( DynamicLoader && other ) VULKAN_HPP_NOEXCEPT : m_library(other.m_library) + { + other.m_library = nullptr; + } + + DynamicLoader &operator=( DynamicLoader const& ) = delete; + + DynamicLoader &operator=( DynamicLoader && other ) VULKAN_HPP_NOEXCEPT + { + std::swap(m_library, other.m_library); + return *this; + } + + ~DynamicLoader() VULKAN_HPP_NOEXCEPT + { + if ( m_library ) + { +# if defined( __linux__ ) || defined( __APPLE__ ) + dlclose( m_library ); +# elif defined( _WIN32 ) + ::FreeLibrary( m_library ); +# else +# error unsupported platform +# endif + } + } + + template + T getProcAddress( const char* function ) const VULKAN_HPP_NOEXCEPT + { +# if defined( __linux__ ) || defined( __APPLE__ ) + return (T)dlsym( m_library, function ); +# elif defined( _WIN32 ) + return (T)::GetProcAddress( m_library, function ); +# else +# error unsupported platform +# endif + } + + bool success() const VULKAN_HPP_NOEXCEPT { return m_library != nullptr; } + + private: +# if defined( __linux__ ) || defined( __APPLE__ ) + void * m_library; +# elif defined( _WIN32 ) + ::HINSTANCE m_library; +# else +# error unsupported platform +# endif + }; +#endif + + + class DispatchLoaderDynamic + { + public: +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR = 0; + PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR = 0; + PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL = 0; + PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR = 0; +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT = 0; +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers = 0; + PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets = 0; + PFN_vkAllocateMemory vkAllocateMemory = 0; + PFN_vkBeginCommandBuffer vkBeginCommandBuffer = 0; + PFN_vkBindAccelerationStructureMemoryNV vkBindAccelerationStructureMemoryNV = 0; + PFN_vkBindBufferMemory vkBindBufferMemory = 0; + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR = 0; + PFN_vkBindBufferMemory2 vkBindBufferMemory2 = 0; + PFN_vkBindImageMemory vkBindImageMemory = 0; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR = 0; + PFN_vkBindImageMemory2 vkBindImageMemory2 = 0; + PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR = 0; + PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT = 0; + PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT = 0; + PFN_vkCmdBeginQuery vkCmdBeginQuery = 0; + PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT = 0; + PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass = 0; + PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR = 0; + PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2 = 0; + PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT = 0; + PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets = 0; + PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer = 0; + PFN_vkCmdBindPipeline vkCmdBindPipeline = 0; + PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV = 0; + PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV = 0; + PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT = 0; + PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers = 0; + PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT = 0; + PFN_vkCmdBlitImage vkCmdBlitImage = 0; + PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR = 0; + PFN_vkCmdBuildAccelerationStructureNV vkCmdBuildAccelerationStructureNV = 0; + PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR = 0; + PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR = 0; + PFN_vkCmdClearAttachments vkCmdClearAttachments = 0; + PFN_vkCmdClearColorImage vkCmdClearColorImage = 0; + PFN_vkCmdClearDepthStencilImage vkCmdClearDepthStencilImage = 0; + PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR = 0; + PFN_vkCmdCopyAccelerationStructureNV vkCmdCopyAccelerationStructureNV = 0; + PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR = 0; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer = 0; + PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR = 0; + PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage = 0; + PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR = 0; + PFN_vkCmdCopyImage vkCmdCopyImage = 0; + PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR = 0; + PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer = 0; + PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR = 0; + PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR = 0; + PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults = 0; + PFN_vkCmdDebugMarkerBeginEXT vkCmdDebugMarkerBeginEXT = 0; + PFN_vkCmdDebugMarkerEndEXT vkCmdDebugMarkerEndEXT = 0; + PFN_vkCmdDebugMarkerInsertEXT vkCmdDebugMarkerInsertEXT = 0; + PFN_vkCmdDispatch vkCmdDispatch = 0; + PFN_vkCmdDispatchBaseKHR vkCmdDispatchBaseKHR = 0; + PFN_vkCmdDispatchBase vkCmdDispatchBase = 0; + PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect = 0; + PFN_vkCmdDraw vkCmdDraw = 0; + PFN_vkCmdDrawIndexed vkCmdDrawIndexed = 0; + PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect = 0; + PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD = 0; + PFN_vkCmdDrawIndexedIndirectCountKHR vkCmdDrawIndexedIndirectCountKHR = 0; + PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount = 0; + PFN_vkCmdDrawIndirect vkCmdDrawIndirect = 0; + PFN_vkCmdDrawIndirectByteCountEXT vkCmdDrawIndirectByteCountEXT = 0; + PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD = 0; + PFN_vkCmdDrawIndirectCountKHR vkCmdDrawIndirectCountKHR = 0; + PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount = 0; + PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV = 0; + PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV = 0; + PFN_vkCmdDrawMeshTasksNV vkCmdDrawMeshTasksNV = 0; + PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT = 0; + PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT = 0; + PFN_vkCmdEndQuery vkCmdEndQuery = 0; + PFN_vkCmdEndQueryIndexedEXT vkCmdEndQueryIndexedEXT = 0; + PFN_vkCmdEndRenderPass vkCmdEndRenderPass = 0; + PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR = 0; + PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2 = 0; + PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT = 0; + PFN_vkCmdExecuteCommands vkCmdExecuteCommands = 0; + PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV = 0; + PFN_vkCmdFillBuffer vkCmdFillBuffer = 0; + PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT = 0; + PFN_vkCmdNextSubpass vkCmdNextSubpass = 0; + PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR = 0; + PFN_vkCmdNextSubpass2 vkCmdNextSubpass2 = 0; + PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier = 0; + PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV = 0; + PFN_vkCmdPushConstants vkCmdPushConstants = 0; + PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = 0; + PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR = 0; + PFN_vkCmdResetEvent vkCmdResetEvent = 0; + PFN_vkCmdResetQueryPool vkCmdResetQueryPool = 0; + PFN_vkCmdResolveImage vkCmdResolveImage = 0; + PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR = 0; + PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants = 0; + PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV = 0; + PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV = 0; + PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT = 0; + PFN_vkCmdSetDepthBias vkCmdSetDepthBias = 0; + PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds = 0; + PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT = 0; + PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT = 0; + PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT = 0; + PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT = 0; + PFN_vkCmdSetDeviceMaskKHR vkCmdSetDeviceMaskKHR = 0; + PFN_vkCmdSetDeviceMask vkCmdSetDeviceMask = 0; + PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT = 0; + PFN_vkCmdSetEvent vkCmdSetEvent = 0; + PFN_vkCmdSetExclusiveScissorNV vkCmdSetExclusiveScissorNV = 0; + PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV = 0; + PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR = 0; + PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT = 0; + PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT = 0; + PFN_vkCmdSetLineWidth vkCmdSetLineWidth = 0; + PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL = 0; + PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL = 0; + PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL = 0; + PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT = 0; + PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR = 0; + PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT = 0; + PFN_vkCmdSetScissor vkCmdSetScissor = 0; + PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT = 0; + PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask = 0; + PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT = 0; + PFN_vkCmdSetStencilReference vkCmdSetStencilReference = 0; + PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT = 0; + PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask = 0; + PFN_vkCmdSetViewport vkCmdSetViewport = 0; + PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV = 0; + PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV = 0; + PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT = 0; + PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR = 0; + PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR = 0; + PFN_vkCmdTraceRaysNV vkCmdTraceRaysNV = 0; + PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer = 0; + PFN_vkCmdWaitEvents vkCmdWaitEvents = 0; + PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR = 0; + PFN_vkCmdWriteAccelerationStructuresPropertiesNV vkCmdWriteAccelerationStructuresPropertiesNV = 0; + PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD = 0; + PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp = 0; + PFN_vkCompileDeferredNV vkCompileDeferredNV = 0; + PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR = 0; + PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR = 0; + PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR = 0; + PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR = 0; + PFN_vkCreateAccelerationStructureNV vkCreateAccelerationStructureNV = 0; +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + PFN_vkCreateBuffer vkCreateBuffer = 0; + PFN_vkCreateBufferView vkCreateBufferView = 0; + PFN_vkCreateCommandPool vkCreateCommandPool = 0; + PFN_vkCreateComputePipelines vkCreateComputePipelines = 0; + PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT = 0; + PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT = 0; + PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR = 0; + PFN_vkCreateDescriptorPool vkCreateDescriptorPool = 0; + PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout = 0; + PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = 0; + PFN_vkCreateDescriptorUpdateTemplate vkCreateDescriptorUpdateTemplate = 0; + PFN_vkCreateDevice vkCreateDevice = 0; +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT = 0; +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + PFN_vkCreateDisplayModeKHR vkCreateDisplayModeKHR = 0; + PFN_vkCreateDisplayPlaneSurfaceKHR vkCreateDisplayPlaneSurfaceKHR = 0; + PFN_vkCreateEvent vkCreateEvent = 0; + PFN_vkCreateFence vkCreateFence = 0; + PFN_vkCreateFramebuffer vkCreateFramebuffer = 0; + PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines = 0; + PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT = 0; +#ifdef VK_USE_PLATFORM_IOS_MVK + PFN_vkCreateIOSSurfaceMVK vkCreateIOSSurfaceMVK = 0; +#endif /*VK_USE_PLATFORM_IOS_MVK*/ + PFN_vkCreateImage vkCreateImage = 0; +#ifdef VK_USE_PLATFORM_FUCHSIA + PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA = 0; +#endif /*VK_USE_PLATFORM_FUCHSIA*/ + PFN_vkCreateImageView vkCreateImageView = 0; + PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV = 0; + PFN_vkCreateInstance vkCreateInstance = 0; +#ifdef VK_USE_PLATFORM_MACOS_MVK + PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK = 0; +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ +#ifdef VK_USE_PLATFORM_METAL_EXT + PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT = 0; +#endif /*VK_USE_PLATFORM_METAL_EXT*/ + PFN_vkCreatePipelineCache vkCreatePipelineCache = 0; + PFN_vkCreatePipelineLayout vkCreatePipelineLayout = 0; + PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT = 0; + PFN_vkCreateQueryPool vkCreateQueryPool = 0; + PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR = 0; + PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = 0; + PFN_vkCreateRenderPass vkCreateRenderPass = 0; + PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR = 0; + PFN_vkCreateRenderPass2 vkCreateRenderPass2 = 0; + PFN_vkCreateSampler vkCreateSampler = 0; + PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR = 0; + PFN_vkCreateSamplerYcbcrConversion vkCreateSamplerYcbcrConversion = 0; + PFN_vkCreateSemaphore vkCreateSemaphore = 0; + PFN_vkCreateShaderModule vkCreateShaderModule = 0; + PFN_vkCreateSharedSwapchainsKHR vkCreateSharedSwapchainsKHR = 0; +#ifdef VK_USE_PLATFORM_GGP + PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP = 0; +#endif /*VK_USE_PLATFORM_GGP*/ + PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR = 0; + PFN_vkCreateValidationCacheEXT vkCreateValidationCacheEXT = 0; +#ifdef VK_USE_PLATFORM_VI_NN + PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN = 0; +#endif /*VK_USE_PLATFORM_VI_NN*/ +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkCreateWin32SurfaceKHR vkCreateWin32SurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkCreateXcbSurfaceKHR vkCreateXcbSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkCreateXlibSurfaceKHR vkCreateXlibSurfaceKHR = 0; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT = 0; + PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT = 0; + PFN_vkDebugReportMessageEXT vkDebugReportMessageEXT = 0; + PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR = 0; + PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR = 0; + PFN_vkDestroyAccelerationStructureNV vkDestroyAccelerationStructureNV = 0; + PFN_vkDestroyBuffer vkDestroyBuffer = 0; + PFN_vkDestroyBufferView vkDestroyBufferView = 0; + PFN_vkDestroyCommandPool vkDestroyCommandPool = 0; + PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT = 0; + PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT = 0; + PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR = 0; + PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool = 0; + PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout = 0; + PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = 0; + PFN_vkDestroyDescriptorUpdateTemplate vkDestroyDescriptorUpdateTemplate = 0; + PFN_vkDestroyDevice vkDestroyDevice = 0; + PFN_vkDestroyEvent vkDestroyEvent = 0; + PFN_vkDestroyFence vkDestroyFence = 0; + PFN_vkDestroyFramebuffer vkDestroyFramebuffer = 0; + PFN_vkDestroyImage vkDestroyImage = 0; + PFN_vkDestroyImageView vkDestroyImageView = 0; + PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV = 0; + PFN_vkDestroyInstance vkDestroyInstance = 0; + PFN_vkDestroyPipeline vkDestroyPipeline = 0; + PFN_vkDestroyPipelineCache vkDestroyPipelineCache = 0; + PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout = 0; + PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT = 0; + PFN_vkDestroyQueryPool vkDestroyQueryPool = 0; + PFN_vkDestroyRenderPass vkDestroyRenderPass = 0; + PFN_vkDestroySampler vkDestroySampler = 0; + PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR = 0; + PFN_vkDestroySamplerYcbcrConversion vkDestroySamplerYcbcrConversion = 0; + PFN_vkDestroySemaphore vkDestroySemaphore = 0; + PFN_vkDestroyShaderModule vkDestroyShaderModule = 0; + PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR = 0; + PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR = 0; + PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT = 0; + PFN_vkDeviceWaitIdle vkDeviceWaitIdle = 0; + PFN_vkDisplayPowerControlEXT vkDisplayPowerControlEXT = 0; + PFN_vkEndCommandBuffer vkEndCommandBuffer = 0; + PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties = 0; + PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties = 0; + PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties = 0; + PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties = 0; + PFN_vkEnumerateInstanceVersion vkEnumerateInstanceVersion = 0; + PFN_vkEnumeratePhysicalDeviceGroupsKHR vkEnumeratePhysicalDeviceGroupsKHR = 0; + PFN_vkEnumeratePhysicalDeviceGroups vkEnumeratePhysicalDeviceGroups = 0; + PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = 0; + PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices = 0; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges = 0; + PFN_vkFreeCommandBuffers vkFreeCommandBuffers = 0; + PFN_vkFreeDescriptorSets vkFreeDescriptorSets = 0; + PFN_vkFreeMemory vkFreeMemory = 0; + PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR = 0; + PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR = 0; + PFN_vkGetAccelerationStructureHandleNV vkGetAccelerationStructureHandleNV = 0; + PFN_vkGetAccelerationStructureMemoryRequirementsNV vkGetAccelerationStructureMemoryRequirementsNV = 0; +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID = 0; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT = 0; + PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR = 0; + PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress = 0; + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements = 0; + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR = 0; + PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2 = 0; + PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR = 0; + PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress = 0; + PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT = 0; + PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR = 0; + PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR = 0; + PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR = 0; + PFN_vkGetDescriptorSetLayoutSupport vkGetDescriptorSetLayoutSupport = 0; + PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR = 0; + PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR vkGetDeviceGroupPeerMemoryFeaturesKHR = 0; + PFN_vkGetDeviceGroupPeerMemoryFeatures vkGetDeviceGroupPeerMemoryFeatures = 0; + PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR = 0; + PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment = 0; + PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR = 0; + PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress = 0; + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr = 0; + PFN_vkGetDeviceQueue vkGetDeviceQueue = 0; + PFN_vkGetDeviceQueue2 vkGetDeviceQueue2 = 0; + PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR = 0; + PFN_vkGetDisplayModePropertiesKHR vkGetDisplayModePropertiesKHR = 0; + PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR = 0; + PFN_vkGetDisplayPlaneCapabilitiesKHR vkGetDisplayPlaneCapabilitiesKHR = 0; + PFN_vkGetDisplayPlaneSupportedDisplaysKHR vkGetDisplayPlaneSupportedDisplaysKHR = 0; + PFN_vkGetEventStatus vkGetEventStatus = 0; + PFN_vkGetFenceFdKHR vkGetFenceFdKHR = 0; + PFN_vkGetFenceStatus vkGetFenceStatus = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetFenceWin32HandleKHR vkGetFenceWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV = 0; + PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT = 0; + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements = 0; + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR = 0; + PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2 = 0; + PFN_vkGetImageSparseMemoryRequirements vkGetImageSparseMemoryRequirements = 0; + PFN_vkGetImageSparseMemoryRequirements2KHR vkGetImageSparseMemoryRequirements2KHR = 0; + PFN_vkGetImageSparseMemoryRequirements2 vkGetImageSparseMemoryRequirements2 = 0; + PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout = 0; + PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX = 0; + PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX = 0; + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = 0; +#ifdef VK_USE_PLATFORM_ANDROID_KHR + PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID = 0; +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + PFN_vkGetMemoryFdKHR vkGetMemoryFdKHR = 0; + PFN_vkGetMemoryFdPropertiesKHR vkGetMemoryFdPropertiesKHR = 0; + PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleKHR vkGetMemoryWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetMemoryWin32HandlePropertiesKHR vkGetMemoryWin32HandlePropertiesKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE = 0; + PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL = 0; + PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = 0; + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = 0; +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT = 0; +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR vkGetPhysicalDeviceDisplayPlaneProperties2KHR = 0; + PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR vkGetPhysicalDeviceDisplayPlanePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceDisplayProperties2KHR vkGetPhysicalDeviceDisplayProperties2KHR = 0; + PFN_vkGetPhysicalDeviceDisplayPropertiesKHR vkGetPhysicalDeviceDisplayPropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalBufferProperties vkGetPhysicalDeviceExternalBufferProperties = 0; + PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR vkGetPhysicalDeviceExternalFencePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalFenceProperties vkGetPhysicalDeviceExternalFenceProperties = 0; + PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV = 0; + PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = 0; + PFN_vkGetPhysicalDeviceExternalSemaphoreProperties vkGetPhysicalDeviceExternalSemaphoreProperties = 0; + PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures = 0; + PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = 0; + PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 = 0; + PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties = 0; + PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceFormatProperties2 vkGetPhysicalDeviceFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceImageFormatProperties2 vkGetPhysicalDeviceImageFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties = 0; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR = 0; + PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2 = 0; + PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT = 0; + PFN_vkGetPhysicalDevicePresentRectanglesKHR vkGetPhysicalDevicePresentRectanglesKHR = 0; + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties = 0; + PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = 0; + PFN_vkGetPhysicalDeviceProperties2 vkGetPhysicalDeviceProperties2 = 0; + PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR = 0; + PFN_vkGetPhysicalDeviceQueueFamilyProperties2 vkGetPhysicalDeviceQueueFamilyProperties2 = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties vkGetPhysicalDeviceSparseImageFormatProperties = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR vkGetPhysicalDeviceSparseImageFormatProperties2KHR = 0; + PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSparseImageFormatProperties2 = 0; + PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR = 0; + PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR = 0; + PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR = 0; + PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR = 0; + PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR = 0; + PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT = 0; +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR vkGetPhysicalDeviceWin32PresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR vkGetPhysicalDeviceXcbPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR vkGetPhysicalDeviceXlibPresentationSupportKHR = 0; +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + PFN_vkGetPipelineCacheData vkGetPipelineCacheData = 0; + PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR = 0; + PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR = 0; + PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR = 0; + PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT = 0; + PFN_vkGetQueryPoolResults vkGetQueryPoolResults = 0; + PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV = 0; +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT = 0; +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = 0; + PFN_vkGetRayTracingShaderGroupHandlesNV vkGetRayTracingShaderGroupHandlesNV = 0; + PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR = 0; + PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR = 0; + PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE = 0; + PFN_vkGetRenderAreaGranularity vkGetRenderAreaGranularity = 0; + PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR = 0; + PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue = 0; + PFN_vkGetSemaphoreFdKHR vkGetSemaphoreFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD = 0; + PFN_vkGetSwapchainCounterEXT vkGetSwapchainCounterEXT = 0; + PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR = 0; + PFN_vkGetSwapchainStatusKHR vkGetSwapchainStatusKHR = 0; + PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT = 0; + PFN_vkImportFenceFdKHR vkImportFenceFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportFenceWin32HandleKHR vkImportFenceWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL = 0; + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges = 0; + PFN_vkMapMemory vkMapMemory = 0; + PFN_vkMergePipelineCaches vkMergePipelineCaches = 0; + PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT = 0; + PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT = 0; + PFN_vkQueueBindSparse vkQueueBindSparse = 0; + PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT = 0; + PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT = 0; + PFN_vkQueuePresentKHR vkQueuePresentKHR = 0; + PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL = 0; + PFN_vkQueueSubmit vkQueueSubmit = 0; + PFN_vkQueueWaitIdle vkQueueWaitIdle = 0; + PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT = 0; + PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT = 0; + PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT = 0; +#ifdef VK_USE_PLATFORM_WIN32_KHR + PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT = 0; +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL = 0; + PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR = 0; + PFN_vkResetCommandBuffer vkResetCommandBuffer = 0; + PFN_vkResetCommandPool vkResetCommandPool = 0; + PFN_vkResetDescriptorPool vkResetDescriptorPool = 0; + PFN_vkResetEvent vkResetEvent = 0; + PFN_vkResetFences vkResetFences = 0; + PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT = 0; + PFN_vkResetQueryPool vkResetQueryPool = 0; + PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT = 0; + PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT = 0; + PFN_vkSetEvent vkSetEvent = 0; + PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT = 0; + PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD = 0; + PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT = 0; + PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR = 0; + PFN_vkSignalSemaphore vkSignalSemaphore = 0; + PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT = 0; + PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR = 0; + PFN_vkTrimCommandPool vkTrimCommandPool = 0; + PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL = 0; + PFN_vkUnmapMemory vkUnmapMemory = 0; + PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR = 0; + PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate = 0; + PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets = 0; + PFN_vkWaitForFences vkWaitForFences = 0; + PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR = 0; + PFN_vkWaitSemaphores vkWaitSemaphores = 0; + PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR = 0; + + public: + DispatchLoaderDynamic() VULKAN_HPP_NOEXCEPT = default; + +#if !defined(VK_NO_PROTOTYPES) + // This interface is designed to be used for per-device function pointers in combination with a linked vulkan library. + template + void init(VULKAN_HPP_NAMESPACE::Instance const& instance, VULKAN_HPP_NAMESPACE::Device const& device, DynamicLoader const& dl) VULKAN_HPP_NOEXCEPT + { + PFN_vkGetInstanceProcAddr getInstanceProcAddr = dl.template getProcAddress("vkGetInstanceProcAddr"); + PFN_vkGetDeviceProcAddr getDeviceProcAddr = dl.template getProcAddress("vkGetDeviceProcAddr"); + init(static_cast(instance), getInstanceProcAddr, static_cast(device), device ? getDeviceProcAddr : nullptr); + } + + // This interface is designed to be used for per-device function pointers in combination with a linked vulkan library. + template + void init(VULKAN_HPP_NAMESPACE::Instance const& instance, VULKAN_HPP_NAMESPACE::Device const& device) VULKAN_HPP_NOEXCEPT + { + static DynamicLoader dl; + init(instance, device, dl); + } +#endif // !defined(VK_NO_PROTOTYPES) + + DispatchLoaderDynamic(PFN_vkGetInstanceProcAddr getInstanceProcAddr) VULKAN_HPP_NOEXCEPT + { + init(getInstanceProcAddr); + } + + void init( PFN_vkGetInstanceProcAddr getInstanceProcAddr ) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT(getInstanceProcAddr); + + vkGetInstanceProcAddr = getInstanceProcAddr; + vkCreateInstance = PFN_vkCreateInstance( vkGetInstanceProcAddr( NULL, "vkCreateInstance" ) ); + vkEnumerateInstanceExtensionProperties = PFN_vkEnumerateInstanceExtensionProperties( vkGetInstanceProcAddr( NULL, "vkEnumerateInstanceExtensionProperties" ) ); + vkEnumerateInstanceLayerProperties = PFN_vkEnumerateInstanceLayerProperties( vkGetInstanceProcAddr( NULL, "vkEnumerateInstanceLayerProperties" ) ); + vkEnumerateInstanceVersion = PFN_vkEnumerateInstanceVersion( vkGetInstanceProcAddr( NULL, "vkEnumerateInstanceVersion" ) ); + } + + // This interface does not require a linked vulkan library. + DispatchLoaderDynamic( VkInstance instance, PFN_vkGetInstanceProcAddr getInstanceProcAddr, VkDevice device = VK_NULL_HANDLE, PFN_vkGetDeviceProcAddr getDeviceProcAddr = nullptr ) VULKAN_HPP_NOEXCEPT + { + init( instance, getInstanceProcAddr, device, getDeviceProcAddr ); + } + + // This interface does not require a linked vulkan library. + void init( VkInstance instance, PFN_vkGetInstanceProcAddr getInstanceProcAddr, VkDevice device = VK_NULL_HANDLE, PFN_vkGetDeviceProcAddr /*getDeviceProcAddr*/ = nullptr ) VULKAN_HPP_NOEXCEPT + { + VULKAN_HPP_ASSERT(instance && getInstanceProcAddr); + vkGetInstanceProcAddr = getInstanceProcAddr; + init( VULKAN_HPP_NAMESPACE::Instance(instance) ); + if (device) { + init( VULKAN_HPP_NAMESPACE::Device(device) ); + } + } + + void init( VULKAN_HPP_NAMESPACE::Instance instanceCpp ) VULKAN_HPP_NOEXCEPT + { + VkInstance instance = static_cast(instanceCpp); +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + vkAcquireXlibDisplayEXT = PFN_vkAcquireXlibDisplayEXT( vkGetInstanceProcAddr( instance, "vkAcquireXlibDisplayEXT" ) ); +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkCreateAndroidSurfaceKHR = PFN_vkCreateAndroidSurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateAndroidSurfaceKHR" ) ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkCreateDebugReportCallbackEXT = PFN_vkCreateDebugReportCallbackEXT( vkGetInstanceProcAddr( instance, "vkCreateDebugReportCallbackEXT" ) ); + vkCreateDebugUtilsMessengerEXT = PFN_vkCreateDebugUtilsMessengerEXT( vkGetInstanceProcAddr( instance, "vkCreateDebugUtilsMessengerEXT" ) ); + vkCreateDevice = PFN_vkCreateDevice( vkGetInstanceProcAddr( instance, "vkCreateDevice" ) ); +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + vkCreateDirectFBSurfaceEXT = PFN_vkCreateDirectFBSurfaceEXT( vkGetInstanceProcAddr( instance, "vkCreateDirectFBSurfaceEXT" ) ); +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + vkCreateDisplayModeKHR = PFN_vkCreateDisplayModeKHR( vkGetInstanceProcAddr( instance, "vkCreateDisplayModeKHR" ) ); + vkCreateDisplayPlaneSurfaceKHR = PFN_vkCreateDisplayPlaneSurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateDisplayPlaneSurfaceKHR" ) ); + vkCreateHeadlessSurfaceEXT = PFN_vkCreateHeadlessSurfaceEXT( vkGetInstanceProcAddr( instance, "vkCreateHeadlessSurfaceEXT" ) ); +#ifdef VK_USE_PLATFORM_IOS_MVK + vkCreateIOSSurfaceMVK = PFN_vkCreateIOSSurfaceMVK( vkGetInstanceProcAddr( instance, "vkCreateIOSSurfaceMVK" ) ); +#endif /*VK_USE_PLATFORM_IOS_MVK*/ +#ifdef VK_USE_PLATFORM_FUCHSIA + vkCreateImagePipeSurfaceFUCHSIA = PFN_vkCreateImagePipeSurfaceFUCHSIA( vkGetInstanceProcAddr( instance, "vkCreateImagePipeSurfaceFUCHSIA" ) ); +#endif /*VK_USE_PLATFORM_FUCHSIA*/ +#ifdef VK_USE_PLATFORM_MACOS_MVK + vkCreateMacOSSurfaceMVK = PFN_vkCreateMacOSSurfaceMVK( vkGetInstanceProcAddr( instance, "vkCreateMacOSSurfaceMVK" ) ); +#endif /*VK_USE_PLATFORM_MACOS_MVK*/ +#ifdef VK_USE_PLATFORM_METAL_EXT + vkCreateMetalSurfaceEXT = PFN_vkCreateMetalSurfaceEXT( vkGetInstanceProcAddr( instance, "vkCreateMetalSurfaceEXT" ) ); +#endif /*VK_USE_PLATFORM_METAL_EXT*/ +#ifdef VK_USE_PLATFORM_GGP + vkCreateStreamDescriptorSurfaceGGP = PFN_vkCreateStreamDescriptorSurfaceGGP( vkGetInstanceProcAddr( instance, "vkCreateStreamDescriptorSurfaceGGP" ) ); +#endif /*VK_USE_PLATFORM_GGP*/ +#ifdef VK_USE_PLATFORM_VI_NN + vkCreateViSurfaceNN = PFN_vkCreateViSurfaceNN( vkGetInstanceProcAddr( instance, "vkCreateViSurfaceNN" ) ); +#endif /*VK_USE_PLATFORM_VI_NN*/ +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + vkCreateWaylandSurfaceKHR = PFN_vkCreateWaylandSurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateWaylandSurfaceKHR" ) ); +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkCreateWin32SurfaceKHR = PFN_vkCreateWin32SurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateWin32SurfaceKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + vkCreateXcbSurfaceKHR = PFN_vkCreateXcbSurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateXcbSurfaceKHR" ) ); +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + vkCreateXlibSurfaceKHR = PFN_vkCreateXlibSurfaceKHR( vkGetInstanceProcAddr( instance, "vkCreateXlibSurfaceKHR" ) ); +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ + vkDebugReportMessageEXT = PFN_vkDebugReportMessageEXT( vkGetInstanceProcAddr( instance, "vkDebugReportMessageEXT" ) ); + vkDestroyDebugReportCallbackEXT = PFN_vkDestroyDebugReportCallbackEXT( vkGetInstanceProcAddr( instance, "vkDestroyDebugReportCallbackEXT" ) ); + vkDestroyDebugUtilsMessengerEXT = PFN_vkDestroyDebugUtilsMessengerEXT( vkGetInstanceProcAddr( instance, "vkDestroyDebugUtilsMessengerEXT" ) ); + vkDestroyInstance = PFN_vkDestroyInstance( vkGetInstanceProcAddr( instance, "vkDestroyInstance" ) ); + vkDestroySurfaceKHR = PFN_vkDestroySurfaceKHR( vkGetInstanceProcAddr( instance, "vkDestroySurfaceKHR" ) ); + vkEnumerateDeviceExtensionProperties = PFN_vkEnumerateDeviceExtensionProperties( vkGetInstanceProcAddr( instance, "vkEnumerateDeviceExtensionProperties" ) ); + vkEnumerateDeviceLayerProperties = PFN_vkEnumerateDeviceLayerProperties( vkGetInstanceProcAddr( instance, "vkEnumerateDeviceLayerProperties" ) ); + vkEnumeratePhysicalDeviceGroupsKHR = PFN_vkEnumeratePhysicalDeviceGroupsKHR( vkGetInstanceProcAddr( instance, "vkEnumeratePhysicalDeviceGroupsKHR" ) ); + vkEnumeratePhysicalDeviceGroups = PFN_vkEnumeratePhysicalDeviceGroups( vkGetInstanceProcAddr( instance, "vkEnumeratePhysicalDeviceGroups" ) ); + if ( !vkEnumeratePhysicalDeviceGroups ) vkEnumeratePhysicalDeviceGroups = vkEnumeratePhysicalDeviceGroupsKHR; + vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( vkGetInstanceProcAddr( instance, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR" ) ); + vkEnumeratePhysicalDevices = PFN_vkEnumeratePhysicalDevices( vkGetInstanceProcAddr( instance, "vkEnumeratePhysicalDevices" ) ); + vkGetDisplayModeProperties2KHR = PFN_vkGetDisplayModeProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetDisplayModeProperties2KHR" ) ); + vkGetDisplayModePropertiesKHR = PFN_vkGetDisplayModePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetDisplayModePropertiesKHR" ) ); + vkGetDisplayPlaneCapabilities2KHR = PFN_vkGetDisplayPlaneCapabilities2KHR( vkGetInstanceProcAddr( instance, "vkGetDisplayPlaneCapabilities2KHR" ) ); + vkGetDisplayPlaneCapabilitiesKHR = PFN_vkGetDisplayPlaneCapabilitiesKHR( vkGetInstanceProcAddr( instance, "vkGetDisplayPlaneCapabilitiesKHR" ) ); + vkGetDisplayPlaneSupportedDisplaysKHR = PFN_vkGetDisplayPlaneSupportedDisplaysKHR( vkGetInstanceProcAddr( instance, "vkGetDisplayPlaneSupportedDisplaysKHR" ) ); + vkGetInstanceProcAddr = PFN_vkGetInstanceProcAddr( vkGetInstanceProcAddr( instance, "vkGetInstanceProcAddr" ) ); + vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT" ) ); + vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV" ) ); +#ifdef VK_USE_PLATFORM_DIRECTFB_EXT + vkGetPhysicalDeviceDirectFBPresentationSupportEXT = PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT" ) ); +#endif /*VK_USE_PLATFORM_DIRECTFB_EXT*/ + vkGetPhysicalDeviceDisplayPlaneProperties2KHR = PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR" ) ); + vkGetPhysicalDeviceDisplayPlanePropertiesKHR = PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR" ) ); + vkGetPhysicalDeviceDisplayProperties2KHR = PFN_vkGetPhysicalDeviceDisplayProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceDisplayProperties2KHR" ) ); + vkGetPhysicalDeviceDisplayPropertiesKHR = PFN_vkGetPhysicalDeviceDisplayPropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceDisplayPropertiesKHR" ) ); + vkGetPhysicalDeviceExternalBufferPropertiesKHR = PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalBufferPropertiesKHR" ) ); + vkGetPhysicalDeviceExternalBufferProperties = PFN_vkGetPhysicalDeviceExternalBufferProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalBufferProperties" ) ); + if ( !vkGetPhysicalDeviceExternalBufferProperties ) vkGetPhysicalDeviceExternalBufferProperties = vkGetPhysicalDeviceExternalBufferPropertiesKHR; + vkGetPhysicalDeviceExternalFencePropertiesKHR = PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalFencePropertiesKHR" ) ); + vkGetPhysicalDeviceExternalFenceProperties = PFN_vkGetPhysicalDeviceExternalFenceProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalFenceProperties" ) ); + if ( !vkGetPhysicalDeviceExternalFenceProperties ) vkGetPhysicalDeviceExternalFenceProperties = vkGetPhysicalDeviceExternalFencePropertiesKHR; + vkGetPhysicalDeviceExternalImageFormatPropertiesNV = PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV" ) ); + vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR" ) ); + vkGetPhysicalDeviceExternalSemaphoreProperties = PFN_vkGetPhysicalDeviceExternalSemaphoreProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceExternalSemaphoreProperties" ) ); + if ( !vkGetPhysicalDeviceExternalSemaphoreProperties ) vkGetPhysicalDeviceExternalSemaphoreProperties = vkGetPhysicalDeviceExternalSemaphorePropertiesKHR; + vkGetPhysicalDeviceFeatures = PFN_vkGetPhysicalDeviceFeatures( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFeatures" ) ); + vkGetPhysicalDeviceFeatures2KHR = PFN_vkGetPhysicalDeviceFeatures2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFeatures2KHR" ) ); + vkGetPhysicalDeviceFeatures2 = PFN_vkGetPhysicalDeviceFeatures2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFeatures2" ) ); + if ( !vkGetPhysicalDeviceFeatures2 ) vkGetPhysicalDeviceFeatures2 = vkGetPhysicalDeviceFeatures2KHR; + vkGetPhysicalDeviceFormatProperties = PFN_vkGetPhysicalDeviceFormatProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFormatProperties" ) ); + vkGetPhysicalDeviceFormatProperties2KHR = PFN_vkGetPhysicalDeviceFormatProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFormatProperties2KHR" ) ); + vkGetPhysicalDeviceFormatProperties2 = PFN_vkGetPhysicalDeviceFormatProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFormatProperties2" ) ); + if ( !vkGetPhysicalDeviceFormatProperties2 ) vkGetPhysicalDeviceFormatProperties2 = vkGetPhysicalDeviceFormatProperties2KHR; + vkGetPhysicalDeviceFragmentShadingRatesKHR = PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceFragmentShadingRatesKHR" ) ); + vkGetPhysicalDeviceImageFormatProperties = PFN_vkGetPhysicalDeviceImageFormatProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties" ) ); + vkGetPhysicalDeviceImageFormatProperties2KHR = PFN_vkGetPhysicalDeviceImageFormatProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties2KHR" ) ); + vkGetPhysicalDeviceImageFormatProperties2 = PFN_vkGetPhysicalDeviceImageFormatProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceImageFormatProperties2" ) ); + if ( !vkGetPhysicalDeviceImageFormatProperties2 ) vkGetPhysicalDeviceImageFormatProperties2 = vkGetPhysicalDeviceImageFormatProperties2KHR; + vkGetPhysicalDeviceMemoryProperties = PFN_vkGetPhysicalDeviceMemoryProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceMemoryProperties" ) ); + vkGetPhysicalDeviceMemoryProperties2KHR = PFN_vkGetPhysicalDeviceMemoryProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceMemoryProperties2KHR" ) ); + vkGetPhysicalDeviceMemoryProperties2 = PFN_vkGetPhysicalDeviceMemoryProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceMemoryProperties2" ) ); + if ( !vkGetPhysicalDeviceMemoryProperties2 ) vkGetPhysicalDeviceMemoryProperties2 = vkGetPhysicalDeviceMemoryProperties2KHR; + vkGetPhysicalDeviceMultisamplePropertiesEXT = PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceMultisamplePropertiesEXT" ) ); + vkGetPhysicalDevicePresentRectanglesKHR = PFN_vkGetPhysicalDevicePresentRectanglesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDevicePresentRectanglesKHR" ) ); + vkGetPhysicalDeviceProperties = PFN_vkGetPhysicalDeviceProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceProperties" ) ); + vkGetPhysicalDeviceProperties2KHR = PFN_vkGetPhysicalDeviceProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceProperties2KHR" ) ); + vkGetPhysicalDeviceProperties2 = PFN_vkGetPhysicalDeviceProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceProperties2" ) ); + if ( !vkGetPhysicalDeviceProperties2 ) vkGetPhysicalDeviceProperties2 = vkGetPhysicalDeviceProperties2KHR; + vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR" ) ); + vkGetPhysicalDeviceQueueFamilyProperties = PFN_vkGetPhysicalDeviceQueueFamilyProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceQueueFamilyProperties" ) ); + vkGetPhysicalDeviceQueueFamilyProperties2KHR = PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceQueueFamilyProperties2KHR" ) ); + vkGetPhysicalDeviceQueueFamilyProperties2 = PFN_vkGetPhysicalDeviceQueueFamilyProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceQueueFamilyProperties2" ) ); + if ( !vkGetPhysicalDeviceQueueFamilyProperties2 ) vkGetPhysicalDeviceQueueFamilyProperties2 = vkGetPhysicalDeviceQueueFamilyProperties2KHR; + vkGetPhysicalDeviceSparseImageFormatProperties = PFN_vkGetPhysicalDeviceSparseImageFormatProperties( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSparseImageFormatProperties" ) ); + vkGetPhysicalDeviceSparseImageFormatProperties2KHR = PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR" ) ); + vkGetPhysicalDeviceSparseImageFormatProperties2 = PFN_vkGetPhysicalDeviceSparseImageFormatProperties2( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSparseImageFormatProperties2" ) ); + if ( !vkGetPhysicalDeviceSparseImageFormatProperties2 ) vkGetPhysicalDeviceSparseImageFormatProperties2 = vkGetPhysicalDeviceSparseImageFormatProperties2KHR; + vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV" ) ); + vkGetPhysicalDeviceSurfaceCapabilities2EXT = PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceCapabilities2EXT" ) ); + vkGetPhysicalDeviceSurfaceCapabilities2KHR = PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceCapabilities2KHR" ) ); + vkGetPhysicalDeviceSurfaceCapabilitiesKHR = PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR" ) ); + vkGetPhysicalDeviceSurfaceFormats2KHR = PFN_vkGetPhysicalDeviceSurfaceFormats2KHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceFormats2KHR" ) ); + vkGetPhysicalDeviceSurfaceFormatsKHR = PFN_vkGetPhysicalDeviceSurfaceFormatsKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceFormatsKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetPhysicalDeviceSurfacePresentModes2EXT = PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfacePresentModes2EXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetPhysicalDeviceSurfacePresentModesKHR = PFN_vkGetPhysicalDeviceSurfacePresentModesKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfacePresentModesKHR" ) ); + vkGetPhysicalDeviceSurfaceSupportKHR = PFN_vkGetPhysicalDeviceSurfaceSupportKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceSurfaceSupportKHR" ) ); + vkGetPhysicalDeviceToolPropertiesEXT = PFN_vkGetPhysicalDeviceToolPropertiesEXT( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceToolPropertiesEXT" ) ); +#ifdef VK_USE_PLATFORM_WAYLAND_KHR + vkGetPhysicalDeviceWaylandPresentationSupportKHR = PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR" ) ); +#endif /*VK_USE_PLATFORM_WAYLAND_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetPhysicalDeviceWin32PresentationSupportKHR = PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_XCB_KHR + vkGetPhysicalDeviceXcbPresentationSupportKHR = PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR" ) ); +#endif /*VK_USE_PLATFORM_XCB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_KHR + vkGetPhysicalDeviceXlibPresentationSupportKHR = PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR( vkGetInstanceProcAddr( instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR" ) ); +#endif /*VK_USE_PLATFORM_XLIB_KHR*/ +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT + vkGetRandROutputDisplayEXT = PFN_vkGetRandROutputDisplayEXT( vkGetInstanceProcAddr( instance, "vkGetRandROutputDisplayEXT" ) ); +#endif /*VK_USE_PLATFORM_XLIB_XRANDR_EXT*/ + vkReleaseDisplayEXT = PFN_vkReleaseDisplayEXT( vkGetInstanceProcAddr( instance, "vkReleaseDisplayEXT" ) ); + vkSubmitDebugUtilsMessageEXT = PFN_vkSubmitDebugUtilsMessageEXT( vkGetInstanceProcAddr( instance, "vkSubmitDebugUtilsMessageEXT" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkAcquireFullScreenExclusiveModeEXT = PFN_vkAcquireFullScreenExclusiveModeEXT( vkGetInstanceProcAddr( instance, "vkAcquireFullScreenExclusiveModeEXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkAcquireNextImage2KHR = PFN_vkAcquireNextImage2KHR( vkGetInstanceProcAddr( instance, "vkAcquireNextImage2KHR" ) ); + vkAcquireNextImageKHR = PFN_vkAcquireNextImageKHR( vkGetInstanceProcAddr( instance, "vkAcquireNextImageKHR" ) ); + vkAcquirePerformanceConfigurationINTEL = PFN_vkAcquirePerformanceConfigurationINTEL( vkGetInstanceProcAddr( instance, "vkAcquirePerformanceConfigurationINTEL" ) ); + vkAcquireProfilingLockKHR = PFN_vkAcquireProfilingLockKHR( vkGetInstanceProcAddr( instance, "vkAcquireProfilingLockKHR" ) ); + vkAllocateCommandBuffers = PFN_vkAllocateCommandBuffers( vkGetInstanceProcAddr( instance, "vkAllocateCommandBuffers" ) ); + vkAllocateDescriptorSets = PFN_vkAllocateDescriptorSets( vkGetInstanceProcAddr( instance, "vkAllocateDescriptorSets" ) ); + vkAllocateMemory = PFN_vkAllocateMemory( vkGetInstanceProcAddr( instance, "vkAllocateMemory" ) ); + vkBeginCommandBuffer = PFN_vkBeginCommandBuffer( vkGetInstanceProcAddr( instance, "vkBeginCommandBuffer" ) ); + vkBindAccelerationStructureMemoryNV = PFN_vkBindAccelerationStructureMemoryNV( vkGetInstanceProcAddr( instance, "vkBindAccelerationStructureMemoryNV" ) ); + vkBindBufferMemory = PFN_vkBindBufferMemory( vkGetInstanceProcAddr( instance, "vkBindBufferMemory" ) ); + vkBindBufferMemory2KHR = PFN_vkBindBufferMemory2KHR( vkGetInstanceProcAddr( instance, "vkBindBufferMemory2KHR" ) ); + vkBindBufferMemory2 = PFN_vkBindBufferMemory2( vkGetInstanceProcAddr( instance, "vkBindBufferMemory2" ) ); + if ( !vkBindBufferMemory2 ) vkBindBufferMemory2 = vkBindBufferMemory2KHR; + vkBindImageMemory = PFN_vkBindImageMemory( vkGetInstanceProcAddr( instance, "vkBindImageMemory" ) ); + vkBindImageMemory2KHR = PFN_vkBindImageMemory2KHR( vkGetInstanceProcAddr( instance, "vkBindImageMemory2KHR" ) ); + vkBindImageMemory2 = PFN_vkBindImageMemory2( vkGetInstanceProcAddr( instance, "vkBindImageMemory2" ) ); + if ( !vkBindImageMemory2 ) vkBindImageMemory2 = vkBindImageMemory2KHR; + vkBuildAccelerationStructuresKHR = PFN_vkBuildAccelerationStructuresKHR( vkGetInstanceProcAddr( instance, "vkBuildAccelerationStructuresKHR" ) ); + vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginConditionalRenderingEXT" ) ); + vkCmdBeginDebugUtilsLabelEXT = PFN_vkCmdBeginDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginDebugUtilsLabelEXT" ) ); + vkCmdBeginQuery = PFN_vkCmdBeginQuery( vkGetInstanceProcAddr( instance, "vkCmdBeginQuery" ) ); + vkCmdBeginQueryIndexedEXT = PFN_vkCmdBeginQueryIndexedEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginQueryIndexedEXT" ) ); + vkCmdBeginRenderPass = PFN_vkCmdBeginRenderPass( vkGetInstanceProcAddr( instance, "vkCmdBeginRenderPass" ) ); + vkCmdBeginRenderPass2KHR = PFN_vkCmdBeginRenderPass2KHR( vkGetInstanceProcAddr( instance, "vkCmdBeginRenderPass2KHR" ) ); + vkCmdBeginRenderPass2 = PFN_vkCmdBeginRenderPass2( vkGetInstanceProcAddr( instance, "vkCmdBeginRenderPass2" ) ); + if ( !vkCmdBeginRenderPass2 ) vkCmdBeginRenderPass2 = vkCmdBeginRenderPass2KHR; + vkCmdBeginTransformFeedbackEXT = PFN_vkCmdBeginTransformFeedbackEXT( vkGetInstanceProcAddr( instance, "vkCmdBeginTransformFeedbackEXT" ) ); + vkCmdBindDescriptorSets = PFN_vkCmdBindDescriptorSets( vkGetInstanceProcAddr( instance, "vkCmdBindDescriptorSets" ) ); + vkCmdBindIndexBuffer = PFN_vkCmdBindIndexBuffer( vkGetInstanceProcAddr( instance, "vkCmdBindIndexBuffer" ) ); + vkCmdBindPipeline = PFN_vkCmdBindPipeline( vkGetInstanceProcAddr( instance, "vkCmdBindPipeline" ) ); + vkCmdBindPipelineShaderGroupNV = PFN_vkCmdBindPipelineShaderGroupNV( vkGetInstanceProcAddr( instance, "vkCmdBindPipelineShaderGroupNV" ) ); + vkCmdBindShadingRateImageNV = PFN_vkCmdBindShadingRateImageNV( vkGetInstanceProcAddr( instance, "vkCmdBindShadingRateImageNV" ) ); + vkCmdBindTransformFeedbackBuffersEXT = PFN_vkCmdBindTransformFeedbackBuffersEXT( vkGetInstanceProcAddr( instance, "vkCmdBindTransformFeedbackBuffersEXT" ) ); + vkCmdBindVertexBuffers = PFN_vkCmdBindVertexBuffers( vkGetInstanceProcAddr( instance, "vkCmdBindVertexBuffers" ) ); + vkCmdBindVertexBuffers2EXT = PFN_vkCmdBindVertexBuffers2EXT( vkGetInstanceProcAddr( instance, "vkCmdBindVertexBuffers2EXT" ) ); + vkCmdBlitImage = PFN_vkCmdBlitImage( vkGetInstanceProcAddr( instance, "vkCmdBlitImage" ) ); + vkCmdBlitImage2KHR = PFN_vkCmdBlitImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdBlitImage2KHR" ) ); + vkCmdBuildAccelerationStructureNV = PFN_vkCmdBuildAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructureNV" ) ); + vkCmdBuildAccelerationStructuresIndirectKHR = PFN_vkCmdBuildAccelerationStructuresIndirectKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructuresIndirectKHR" ) ); + vkCmdBuildAccelerationStructuresKHR = PFN_vkCmdBuildAccelerationStructuresKHR( vkGetInstanceProcAddr( instance, "vkCmdBuildAccelerationStructuresKHR" ) ); + vkCmdClearAttachments = PFN_vkCmdClearAttachments( vkGetInstanceProcAddr( instance, "vkCmdClearAttachments" ) ); + vkCmdClearColorImage = PFN_vkCmdClearColorImage( vkGetInstanceProcAddr( instance, "vkCmdClearColorImage" ) ); + vkCmdClearDepthStencilImage = PFN_vkCmdClearDepthStencilImage( vkGetInstanceProcAddr( instance, "vkCmdClearDepthStencilImage" ) ); + vkCmdCopyAccelerationStructureKHR = PFN_vkCmdCopyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureKHR" ) ); + vkCmdCopyAccelerationStructureNV = PFN_vkCmdCopyAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureNV" ) ); + vkCmdCopyAccelerationStructureToMemoryKHR = PFN_vkCmdCopyAccelerationStructureToMemoryKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyAccelerationStructureToMemoryKHR" ) ); + vkCmdCopyBuffer = PFN_vkCmdCopyBuffer( vkGetInstanceProcAddr( instance, "vkCmdCopyBuffer" ) ); + vkCmdCopyBuffer2KHR = PFN_vkCmdCopyBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyBuffer2KHR" ) ); + vkCmdCopyBufferToImage = PFN_vkCmdCopyBufferToImage( vkGetInstanceProcAddr( instance, "vkCmdCopyBufferToImage" ) ); + vkCmdCopyBufferToImage2KHR = PFN_vkCmdCopyBufferToImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyBufferToImage2KHR" ) ); + vkCmdCopyImage = PFN_vkCmdCopyImage( vkGetInstanceProcAddr( instance, "vkCmdCopyImage" ) ); + vkCmdCopyImage2KHR = PFN_vkCmdCopyImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyImage2KHR" ) ); + vkCmdCopyImageToBuffer = PFN_vkCmdCopyImageToBuffer( vkGetInstanceProcAddr( instance, "vkCmdCopyImageToBuffer" ) ); + vkCmdCopyImageToBuffer2KHR = PFN_vkCmdCopyImageToBuffer2KHR( vkGetInstanceProcAddr( instance, "vkCmdCopyImageToBuffer2KHR" ) ); + vkCmdCopyMemoryToAccelerationStructureKHR = PFN_vkCmdCopyMemoryToAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCmdCopyMemoryToAccelerationStructureKHR" ) ); + vkCmdCopyQueryPoolResults = PFN_vkCmdCopyQueryPoolResults( vkGetInstanceProcAddr( instance, "vkCmdCopyQueryPoolResults" ) ); + vkCmdDebugMarkerBeginEXT = PFN_vkCmdDebugMarkerBeginEXT( vkGetInstanceProcAddr( instance, "vkCmdDebugMarkerBeginEXT" ) ); + vkCmdDebugMarkerEndEXT = PFN_vkCmdDebugMarkerEndEXT( vkGetInstanceProcAddr( instance, "vkCmdDebugMarkerEndEXT" ) ); + vkCmdDebugMarkerInsertEXT = PFN_vkCmdDebugMarkerInsertEXT( vkGetInstanceProcAddr( instance, "vkCmdDebugMarkerInsertEXT" ) ); + vkCmdDispatch = PFN_vkCmdDispatch( vkGetInstanceProcAddr( instance, "vkCmdDispatch" ) ); + vkCmdDispatchBaseKHR = PFN_vkCmdDispatchBaseKHR( vkGetInstanceProcAddr( instance, "vkCmdDispatchBaseKHR" ) ); + vkCmdDispatchBase = PFN_vkCmdDispatchBase( vkGetInstanceProcAddr( instance, "vkCmdDispatchBase" ) ); + if ( !vkCmdDispatchBase ) vkCmdDispatchBase = vkCmdDispatchBaseKHR; + vkCmdDispatchIndirect = PFN_vkCmdDispatchIndirect( vkGetInstanceProcAddr( instance, "vkCmdDispatchIndirect" ) ); + vkCmdDraw = PFN_vkCmdDraw( vkGetInstanceProcAddr( instance, "vkCmdDraw" ) ); + vkCmdDrawIndexed = PFN_vkCmdDrawIndexed( vkGetInstanceProcAddr( instance, "vkCmdDrawIndexed" ) ); + vkCmdDrawIndexedIndirect = PFN_vkCmdDrawIndexedIndirect( vkGetInstanceProcAddr( instance, "vkCmdDrawIndexedIndirect" ) ); + vkCmdDrawIndexedIndirectCountAMD = PFN_vkCmdDrawIndexedIndirectCountAMD( vkGetInstanceProcAddr( instance, "vkCmdDrawIndexedIndirectCountAMD" ) ); + vkCmdDrawIndexedIndirectCountKHR = PFN_vkCmdDrawIndexedIndirectCountKHR( vkGetInstanceProcAddr( instance, "vkCmdDrawIndexedIndirectCountKHR" ) ); + vkCmdDrawIndexedIndirectCount = PFN_vkCmdDrawIndexedIndirectCount( vkGetInstanceProcAddr( instance, "vkCmdDrawIndexedIndirectCount" ) ); + if ( !vkCmdDrawIndexedIndirectCount ) vkCmdDrawIndexedIndirectCount = vkCmdDrawIndexedIndirectCountKHR; + if ( !vkCmdDrawIndexedIndirectCount ) vkCmdDrawIndexedIndirectCount = vkCmdDrawIndexedIndirectCountAMD; + vkCmdDrawIndirect = PFN_vkCmdDrawIndirect( vkGetInstanceProcAddr( instance, "vkCmdDrawIndirect" ) ); + vkCmdDrawIndirectByteCountEXT = PFN_vkCmdDrawIndirectByteCountEXT( vkGetInstanceProcAddr( instance, "vkCmdDrawIndirectByteCountEXT" ) ); + vkCmdDrawIndirectCountAMD = PFN_vkCmdDrawIndirectCountAMD( vkGetInstanceProcAddr( instance, "vkCmdDrawIndirectCountAMD" ) ); + vkCmdDrawIndirectCountKHR = PFN_vkCmdDrawIndirectCountKHR( vkGetInstanceProcAddr( instance, "vkCmdDrawIndirectCountKHR" ) ); + vkCmdDrawIndirectCount = PFN_vkCmdDrawIndirectCount( vkGetInstanceProcAddr( instance, "vkCmdDrawIndirectCount" ) ); + if ( !vkCmdDrawIndirectCount ) vkCmdDrawIndirectCount = vkCmdDrawIndirectCountKHR; + if ( !vkCmdDrawIndirectCount ) vkCmdDrawIndirectCount = vkCmdDrawIndirectCountAMD; + vkCmdDrawMeshTasksIndirectCountNV = PFN_vkCmdDrawMeshTasksIndirectCountNV( vkGetInstanceProcAddr( instance, "vkCmdDrawMeshTasksIndirectCountNV" ) ); + vkCmdDrawMeshTasksIndirectNV = PFN_vkCmdDrawMeshTasksIndirectNV( vkGetInstanceProcAddr( instance, "vkCmdDrawMeshTasksIndirectNV" ) ); + vkCmdDrawMeshTasksNV = PFN_vkCmdDrawMeshTasksNV( vkGetInstanceProcAddr( instance, "vkCmdDrawMeshTasksNV" ) ); + vkCmdEndConditionalRenderingEXT = PFN_vkCmdEndConditionalRenderingEXT( vkGetInstanceProcAddr( instance, "vkCmdEndConditionalRenderingEXT" ) ); + vkCmdEndDebugUtilsLabelEXT = PFN_vkCmdEndDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkCmdEndDebugUtilsLabelEXT" ) ); + vkCmdEndQuery = PFN_vkCmdEndQuery( vkGetInstanceProcAddr( instance, "vkCmdEndQuery" ) ); + vkCmdEndQueryIndexedEXT = PFN_vkCmdEndQueryIndexedEXT( vkGetInstanceProcAddr( instance, "vkCmdEndQueryIndexedEXT" ) ); + vkCmdEndRenderPass = PFN_vkCmdEndRenderPass( vkGetInstanceProcAddr( instance, "vkCmdEndRenderPass" ) ); + vkCmdEndRenderPass2KHR = PFN_vkCmdEndRenderPass2KHR( vkGetInstanceProcAddr( instance, "vkCmdEndRenderPass2KHR" ) ); + vkCmdEndRenderPass2 = PFN_vkCmdEndRenderPass2( vkGetInstanceProcAddr( instance, "vkCmdEndRenderPass2" ) ); + if ( !vkCmdEndRenderPass2 ) vkCmdEndRenderPass2 = vkCmdEndRenderPass2KHR; + vkCmdEndTransformFeedbackEXT = PFN_vkCmdEndTransformFeedbackEXT( vkGetInstanceProcAddr( instance, "vkCmdEndTransformFeedbackEXT" ) ); + vkCmdExecuteCommands = PFN_vkCmdExecuteCommands( vkGetInstanceProcAddr( instance, "vkCmdExecuteCommands" ) ); + vkCmdExecuteGeneratedCommandsNV = PFN_vkCmdExecuteGeneratedCommandsNV( vkGetInstanceProcAddr( instance, "vkCmdExecuteGeneratedCommandsNV" ) ); + vkCmdFillBuffer = PFN_vkCmdFillBuffer( vkGetInstanceProcAddr( instance, "vkCmdFillBuffer" ) ); + vkCmdInsertDebugUtilsLabelEXT = PFN_vkCmdInsertDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkCmdInsertDebugUtilsLabelEXT" ) ); + vkCmdNextSubpass = PFN_vkCmdNextSubpass( vkGetInstanceProcAddr( instance, "vkCmdNextSubpass" ) ); + vkCmdNextSubpass2KHR = PFN_vkCmdNextSubpass2KHR( vkGetInstanceProcAddr( instance, "vkCmdNextSubpass2KHR" ) ); + vkCmdNextSubpass2 = PFN_vkCmdNextSubpass2( vkGetInstanceProcAddr( instance, "vkCmdNextSubpass2" ) ); + if ( !vkCmdNextSubpass2 ) vkCmdNextSubpass2 = vkCmdNextSubpass2KHR; + vkCmdPipelineBarrier = PFN_vkCmdPipelineBarrier( vkGetInstanceProcAddr( instance, "vkCmdPipelineBarrier" ) ); + vkCmdPreprocessGeneratedCommandsNV = PFN_vkCmdPreprocessGeneratedCommandsNV( vkGetInstanceProcAddr( instance, "vkCmdPreprocessGeneratedCommandsNV" ) ); + vkCmdPushConstants = PFN_vkCmdPushConstants( vkGetInstanceProcAddr( instance, "vkCmdPushConstants" ) ); + vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetKHR" ) ); + vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR( vkGetInstanceProcAddr( instance, "vkCmdPushDescriptorSetWithTemplateKHR" ) ); + vkCmdResetEvent = PFN_vkCmdResetEvent( vkGetInstanceProcAddr( instance, "vkCmdResetEvent" ) ); + vkCmdResetQueryPool = PFN_vkCmdResetQueryPool( vkGetInstanceProcAddr( instance, "vkCmdResetQueryPool" ) ); + vkCmdResolveImage = PFN_vkCmdResolveImage( vkGetInstanceProcAddr( instance, "vkCmdResolveImage" ) ); + vkCmdResolveImage2KHR = PFN_vkCmdResolveImage2KHR( vkGetInstanceProcAddr( instance, "vkCmdResolveImage2KHR" ) ); + vkCmdSetBlendConstants = PFN_vkCmdSetBlendConstants( vkGetInstanceProcAddr( instance, "vkCmdSetBlendConstants" ) ); + vkCmdSetCheckpointNV = PFN_vkCmdSetCheckpointNV( vkGetInstanceProcAddr( instance, "vkCmdSetCheckpointNV" ) ); + vkCmdSetCoarseSampleOrderNV = PFN_vkCmdSetCoarseSampleOrderNV( vkGetInstanceProcAddr( instance, "vkCmdSetCoarseSampleOrderNV" ) ); + vkCmdSetCullModeEXT = PFN_vkCmdSetCullModeEXT( vkGetInstanceProcAddr( instance, "vkCmdSetCullModeEXT" ) ); + vkCmdSetDepthBias = PFN_vkCmdSetDepthBias( vkGetInstanceProcAddr( instance, "vkCmdSetDepthBias" ) ); + vkCmdSetDepthBounds = PFN_vkCmdSetDepthBounds( vkGetInstanceProcAddr( instance, "vkCmdSetDepthBounds" ) ); + vkCmdSetDepthBoundsTestEnableEXT = PFN_vkCmdSetDepthBoundsTestEnableEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDepthBoundsTestEnableEXT" ) ); + vkCmdSetDepthCompareOpEXT = PFN_vkCmdSetDepthCompareOpEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDepthCompareOpEXT" ) ); + vkCmdSetDepthTestEnableEXT = PFN_vkCmdSetDepthTestEnableEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDepthTestEnableEXT" ) ); + vkCmdSetDepthWriteEnableEXT = PFN_vkCmdSetDepthWriteEnableEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDepthWriteEnableEXT" ) ); + vkCmdSetDeviceMaskKHR = PFN_vkCmdSetDeviceMaskKHR( vkGetInstanceProcAddr( instance, "vkCmdSetDeviceMaskKHR" ) ); + vkCmdSetDeviceMask = PFN_vkCmdSetDeviceMask( vkGetInstanceProcAddr( instance, "vkCmdSetDeviceMask" ) ); + if ( !vkCmdSetDeviceMask ) vkCmdSetDeviceMask = vkCmdSetDeviceMaskKHR; + vkCmdSetDiscardRectangleEXT = PFN_vkCmdSetDiscardRectangleEXT( vkGetInstanceProcAddr( instance, "vkCmdSetDiscardRectangleEXT" ) ); + vkCmdSetEvent = PFN_vkCmdSetEvent( vkGetInstanceProcAddr( instance, "vkCmdSetEvent" ) ); + vkCmdSetExclusiveScissorNV = PFN_vkCmdSetExclusiveScissorNV( vkGetInstanceProcAddr( instance, "vkCmdSetExclusiveScissorNV" ) ); + vkCmdSetFragmentShadingRateEnumNV = PFN_vkCmdSetFragmentShadingRateEnumNV( vkGetInstanceProcAddr( instance, "vkCmdSetFragmentShadingRateEnumNV" ) ); + vkCmdSetFragmentShadingRateKHR = PFN_vkCmdSetFragmentShadingRateKHR( vkGetInstanceProcAddr( instance, "vkCmdSetFragmentShadingRateKHR" ) ); + vkCmdSetFrontFaceEXT = PFN_vkCmdSetFrontFaceEXT( vkGetInstanceProcAddr( instance, "vkCmdSetFrontFaceEXT" ) ); + vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetInstanceProcAddr( instance, "vkCmdSetLineStippleEXT" ) ); + vkCmdSetLineWidth = PFN_vkCmdSetLineWidth( vkGetInstanceProcAddr( instance, "vkCmdSetLineWidth" ) ); + vkCmdSetPerformanceMarkerINTEL = PFN_vkCmdSetPerformanceMarkerINTEL( vkGetInstanceProcAddr( instance, "vkCmdSetPerformanceMarkerINTEL" ) ); + vkCmdSetPerformanceOverrideINTEL = PFN_vkCmdSetPerformanceOverrideINTEL( vkGetInstanceProcAddr( instance, "vkCmdSetPerformanceOverrideINTEL" ) ); + vkCmdSetPerformanceStreamMarkerINTEL = PFN_vkCmdSetPerformanceStreamMarkerINTEL( vkGetInstanceProcAddr( instance, "vkCmdSetPerformanceStreamMarkerINTEL" ) ); + vkCmdSetPrimitiveTopologyEXT = PFN_vkCmdSetPrimitiveTopologyEXT( vkGetInstanceProcAddr( instance, "vkCmdSetPrimitiveTopologyEXT" ) ); + vkCmdSetRayTracingPipelineStackSizeKHR = PFN_vkCmdSetRayTracingPipelineStackSizeKHR( vkGetInstanceProcAddr( instance, "vkCmdSetRayTracingPipelineStackSizeKHR" ) ); + vkCmdSetSampleLocationsEXT = PFN_vkCmdSetSampleLocationsEXT( vkGetInstanceProcAddr( instance, "vkCmdSetSampleLocationsEXT" ) ); + vkCmdSetScissor = PFN_vkCmdSetScissor( vkGetInstanceProcAddr( instance, "vkCmdSetScissor" ) ); + vkCmdSetScissorWithCountEXT = PFN_vkCmdSetScissorWithCountEXT( vkGetInstanceProcAddr( instance, "vkCmdSetScissorWithCountEXT" ) ); + vkCmdSetStencilCompareMask = PFN_vkCmdSetStencilCompareMask( vkGetInstanceProcAddr( instance, "vkCmdSetStencilCompareMask" ) ); + vkCmdSetStencilOpEXT = PFN_vkCmdSetStencilOpEXT( vkGetInstanceProcAddr( instance, "vkCmdSetStencilOpEXT" ) ); + vkCmdSetStencilReference = PFN_vkCmdSetStencilReference( vkGetInstanceProcAddr( instance, "vkCmdSetStencilReference" ) ); + vkCmdSetStencilTestEnableEXT = PFN_vkCmdSetStencilTestEnableEXT( vkGetInstanceProcAddr( instance, "vkCmdSetStencilTestEnableEXT" ) ); + vkCmdSetStencilWriteMask = PFN_vkCmdSetStencilWriteMask( vkGetInstanceProcAddr( instance, "vkCmdSetStencilWriteMask" ) ); + vkCmdSetViewport = PFN_vkCmdSetViewport( vkGetInstanceProcAddr( instance, "vkCmdSetViewport" ) ); + vkCmdSetViewportShadingRatePaletteNV = PFN_vkCmdSetViewportShadingRatePaletteNV( vkGetInstanceProcAddr( instance, "vkCmdSetViewportShadingRatePaletteNV" ) ); + vkCmdSetViewportWScalingNV = PFN_vkCmdSetViewportWScalingNV( vkGetInstanceProcAddr( instance, "vkCmdSetViewportWScalingNV" ) ); + vkCmdSetViewportWithCountEXT = PFN_vkCmdSetViewportWithCountEXT( vkGetInstanceProcAddr( instance, "vkCmdSetViewportWithCountEXT" ) ); + vkCmdTraceRaysIndirectKHR = PFN_vkCmdTraceRaysIndirectKHR( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysIndirectKHR" ) ); + vkCmdTraceRaysKHR = PFN_vkCmdTraceRaysKHR( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysKHR" ) ); + vkCmdTraceRaysNV = PFN_vkCmdTraceRaysNV( vkGetInstanceProcAddr( instance, "vkCmdTraceRaysNV" ) ); + vkCmdUpdateBuffer = PFN_vkCmdUpdateBuffer( vkGetInstanceProcAddr( instance, "vkCmdUpdateBuffer" ) ); + vkCmdWaitEvents = PFN_vkCmdWaitEvents( vkGetInstanceProcAddr( instance, "vkCmdWaitEvents" ) ); + vkCmdWriteAccelerationStructuresPropertiesKHR = PFN_vkCmdWriteAccelerationStructuresPropertiesKHR( vkGetInstanceProcAddr( instance, "vkCmdWriteAccelerationStructuresPropertiesKHR" ) ); + vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetInstanceProcAddr( instance, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); + vkCmdWriteBufferMarkerAMD = PFN_vkCmdWriteBufferMarkerAMD( vkGetInstanceProcAddr( instance, "vkCmdWriteBufferMarkerAMD" ) ); + vkCmdWriteTimestamp = PFN_vkCmdWriteTimestamp( vkGetInstanceProcAddr( instance, "vkCmdWriteTimestamp" ) ); + vkCompileDeferredNV = PFN_vkCompileDeferredNV( vkGetInstanceProcAddr( instance, "vkCompileDeferredNV" ) ); + vkCopyAccelerationStructureKHR = PFN_vkCopyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCopyAccelerationStructureKHR" ) ); + vkCopyAccelerationStructureToMemoryKHR = PFN_vkCopyAccelerationStructureToMemoryKHR( vkGetInstanceProcAddr( instance, "vkCopyAccelerationStructureToMemoryKHR" ) ); + vkCopyMemoryToAccelerationStructureKHR = PFN_vkCopyMemoryToAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCopyMemoryToAccelerationStructureKHR" ) ); + vkCreateAccelerationStructureKHR = PFN_vkCreateAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkCreateAccelerationStructureKHR" ) ); + vkCreateAccelerationStructureNV = PFN_vkCreateAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkCreateAccelerationStructureNV" ) ); + vkCreateBuffer = PFN_vkCreateBuffer( vkGetInstanceProcAddr( instance, "vkCreateBuffer" ) ); + vkCreateBufferView = PFN_vkCreateBufferView( vkGetInstanceProcAddr( instance, "vkCreateBufferView" ) ); + vkCreateCommandPool = PFN_vkCreateCommandPool( vkGetInstanceProcAddr( instance, "vkCreateCommandPool" ) ); + vkCreateComputePipelines = PFN_vkCreateComputePipelines( vkGetInstanceProcAddr( instance, "vkCreateComputePipelines" ) ); + vkCreateDeferredOperationKHR = PFN_vkCreateDeferredOperationKHR( vkGetInstanceProcAddr( instance, "vkCreateDeferredOperationKHR" ) ); + vkCreateDescriptorPool = PFN_vkCreateDescriptorPool( vkGetInstanceProcAddr( instance, "vkCreateDescriptorPool" ) ); + vkCreateDescriptorSetLayout = PFN_vkCreateDescriptorSetLayout( vkGetInstanceProcAddr( instance, "vkCreateDescriptorSetLayout" ) ); + vkCreateDescriptorUpdateTemplateKHR = PFN_vkCreateDescriptorUpdateTemplateKHR( vkGetInstanceProcAddr( instance, "vkCreateDescriptorUpdateTemplateKHR" ) ); + vkCreateDescriptorUpdateTemplate = PFN_vkCreateDescriptorUpdateTemplate( vkGetInstanceProcAddr( instance, "vkCreateDescriptorUpdateTemplate" ) ); + if ( !vkCreateDescriptorUpdateTemplate ) vkCreateDescriptorUpdateTemplate = vkCreateDescriptorUpdateTemplateKHR; + vkCreateEvent = PFN_vkCreateEvent( vkGetInstanceProcAddr( instance, "vkCreateEvent" ) ); + vkCreateFence = PFN_vkCreateFence( vkGetInstanceProcAddr( instance, "vkCreateFence" ) ); + vkCreateFramebuffer = PFN_vkCreateFramebuffer( vkGetInstanceProcAddr( instance, "vkCreateFramebuffer" ) ); + vkCreateGraphicsPipelines = PFN_vkCreateGraphicsPipelines( vkGetInstanceProcAddr( instance, "vkCreateGraphicsPipelines" ) ); + vkCreateImage = PFN_vkCreateImage( vkGetInstanceProcAddr( instance, "vkCreateImage" ) ); + vkCreateImageView = PFN_vkCreateImageView( vkGetInstanceProcAddr( instance, "vkCreateImageView" ) ); + vkCreateIndirectCommandsLayoutNV = PFN_vkCreateIndirectCommandsLayoutNV( vkGetInstanceProcAddr( instance, "vkCreateIndirectCommandsLayoutNV" ) ); + vkCreatePipelineCache = PFN_vkCreatePipelineCache( vkGetInstanceProcAddr( instance, "vkCreatePipelineCache" ) ); + vkCreatePipelineLayout = PFN_vkCreatePipelineLayout( vkGetInstanceProcAddr( instance, "vkCreatePipelineLayout" ) ); + vkCreatePrivateDataSlotEXT = PFN_vkCreatePrivateDataSlotEXT( vkGetInstanceProcAddr( instance, "vkCreatePrivateDataSlotEXT" ) ); + vkCreateQueryPool = PFN_vkCreateQueryPool( vkGetInstanceProcAddr( instance, "vkCreateQueryPool" ) ); + vkCreateRayTracingPipelinesKHR = PFN_vkCreateRayTracingPipelinesKHR( vkGetInstanceProcAddr( instance, "vkCreateRayTracingPipelinesKHR" ) ); + vkCreateRayTracingPipelinesNV = PFN_vkCreateRayTracingPipelinesNV( vkGetInstanceProcAddr( instance, "vkCreateRayTracingPipelinesNV" ) ); + vkCreateRenderPass = PFN_vkCreateRenderPass( vkGetInstanceProcAddr( instance, "vkCreateRenderPass" ) ); + vkCreateRenderPass2KHR = PFN_vkCreateRenderPass2KHR( vkGetInstanceProcAddr( instance, "vkCreateRenderPass2KHR" ) ); + vkCreateRenderPass2 = PFN_vkCreateRenderPass2( vkGetInstanceProcAddr( instance, "vkCreateRenderPass2" ) ); + if ( !vkCreateRenderPass2 ) vkCreateRenderPass2 = vkCreateRenderPass2KHR; + vkCreateSampler = PFN_vkCreateSampler( vkGetInstanceProcAddr( instance, "vkCreateSampler" ) ); + vkCreateSamplerYcbcrConversionKHR = PFN_vkCreateSamplerYcbcrConversionKHR( vkGetInstanceProcAddr( instance, "vkCreateSamplerYcbcrConversionKHR" ) ); + vkCreateSamplerYcbcrConversion = PFN_vkCreateSamplerYcbcrConversion( vkGetInstanceProcAddr( instance, "vkCreateSamplerYcbcrConversion" ) ); + if ( !vkCreateSamplerYcbcrConversion ) vkCreateSamplerYcbcrConversion = vkCreateSamplerYcbcrConversionKHR; + vkCreateSemaphore = PFN_vkCreateSemaphore( vkGetInstanceProcAddr( instance, "vkCreateSemaphore" ) ); + vkCreateShaderModule = PFN_vkCreateShaderModule( vkGetInstanceProcAddr( instance, "vkCreateShaderModule" ) ); + vkCreateSharedSwapchainsKHR = PFN_vkCreateSharedSwapchainsKHR( vkGetInstanceProcAddr( instance, "vkCreateSharedSwapchainsKHR" ) ); + vkCreateSwapchainKHR = PFN_vkCreateSwapchainKHR( vkGetInstanceProcAddr( instance, "vkCreateSwapchainKHR" ) ); + vkCreateValidationCacheEXT = PFN_vkCreateValidationCacheEXT( vkGetInstanceProcAddr( instance, "vkCreateValidationCacheEXT" ) ); + vkDebugMarkerSetObjectNameEXT = PFN_vkDebugMarkerSetObjectNameEXT( vkGetInstanceProcAddr( instance, "vkDebugMarkerSetObjectNameEXT" ) ); + vkDebugMarkerSetObjectTagEXT = PFN_vkDebugMarkerSetObjectTagEXT( vkGetInstanceProcAddr( instance, "vkDebugMarkerSetObjectTagEXT" ) ); + vkDeferredOperationJoinKHR = PFN_vkDeferredOperationJoinKHR( vkGetInstanceProcAddr( instance, "vkDeferredOperationJoinKHR" ) ); + vkDestroyAccelerationStructureKHR = PFN_vkDestroyAccelerationStructureKHR( vkGetInstanceProcAddr( instance, "vkDestroyAccelerationStructureKHR" ) ); + vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetInstanceProcAddr( instance, "vkDestroyAccelerationStructureNV" ) ); + vkDestroyBuffer = PFN_vkDestroyBuffer( vkGetInstanceProcAddr( instance, "vkDestroyBuffer" ) ); + vkDestroyBufferView = PFN_vkDestroyBufferView( vkGetInstanceProcAddr( instance, "vkDestroyBufferView" ) ); + vkDestroyCommandPool = PFN_vkDestroyCommandPool( vkGetInstanceProcAddr( instance, "vkDestroyCommandPool" ) ); + vkDestroyDeferredOperationKHR = PFN_vkDestroyDeferredOperationKHR( vkGetInstanceProcAddr( instance, "vkDestroyDeferredOperationKHR" ) ); + vkDestroyDescriptorPool = PFN_vkDestroyDescriptorPool( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorPool" ) ); + vkDestroyDescriptorSetLayout = PFN_vkDestroyDescriptorSetLayout( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorSetLayout" ) ); + vkDestroyDescriptorUpdateTemplateKHR = PFN_vkDestroyDescriptorUpdateTemplateKHR( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorUpdateTemplateKHR" ) ); + vkDestroyDescriptorUpdateTemplate = PFN_vkDestroyDescriptorUpdateTemplate( vkGetInstanceProcAddr( instance, "vkDestroyDescriptorUpdateTemplate" ) ); + if ( !vkDestroyDescriptorUpdateTemplate ) vkDestroyDescriptorUpdateTemplate = vkDestroyDescriptorUpdateTemplateKHR; + vkDestroyDevice = PFN_vkDestroyDevice( vkGetInstanceProcAddr( instance, "vkDestroyDevice" ) ); + vkDestroyEvent = PFN_vkDestroyEvent( vkGetInstanceProcAddr( instance, "vkDestroyEvent" ) ); + vkDestroyFence = PFN_vkDestroyFence( vkGetInstanceProcAddr( instance, "vkDestroyFence" ) ); + vkDestroyFramebuffer = PFN_vkDestroyFramebuffer( vkGetInstanceProcAddr( instance, "vkDestroyFramebuffer" ) ); + vkDestroyImage = PFN_vkDestroyImage( vkGetInstanceProcAddr( instance, "vkDestroyImage" ) ); + vkDestroyImageView = PFN_vkDestroyImageView( vkGetInstanceProcAddr( instance, "vkDestroyImageView" ) ); + vkDestroyIndirectCommandsLayoutNV = PFN_vkDestroyIndirectCommandsLayoutNV( vkGetInstanceProcAddr( instance, "vkDestroyIndirectCommandsLayoutNV" ) ); + vkDestroyPipeline = PFN_vkDestroyPipeline( vkGetInstanceProcAddr( instance, "vkDestroyPipeline" ) ); + vkDestroyPipelineCache = PFN_vkDestroyPipelineCache( vkGetInstanceProcAddr( instance, "vkDestroyPipelineCache" ) ); + vkDestroyPipelineLayout = PFN_vkDestroyPipelineLayout( vkGetInstanceProcAddr( instance, "vkDestroyPipelineLayout" ) ); + vkDestroyPrivateDataSlotEXT = PFN_vkDestroyPrivateDataSlotEXT( vkGetInstanceProcAddr( instance, "vkDestroyPrivateDataSlotEXT" ) ); + vkDestroyQueryPool = PFN_vkDestroyQueryPool( vkGetInstanceProcAddr( instance, "vkDestroyQueryPool" ) ); + vkDestroyRenderPass = PFN_vkDestroyRenderPass( vkGetInstanceProcAddr( instance, "vkDestroyRenderPass" ) ); + vkDestroySampler = PFN_vkDestroySampler( vkGetInstanceProcAddr( instance, "vkDestroySampler" ) ); + vkDestroySamplerYcbcrConversionKHR = PFN_vkDestroySamplerYcbcrConversionKHR( vkGetInstanceProcAddr( instance, "vkDestroySamplerYcbcrConversionKHR" ) ); + vkDestroySamplerYcbcrConversion = PFN_vkDestroySamplerYcbcrConversion( vkGetInstanceProcAddr( instance, "vkDestroySamplerYcbcrConversion" ) ); + if ( !vkDestroySamplerYcbcrConversion ) vkDestroySamplerYcbcrConversion = vkDestroySamplerYcbcrConversionKHR; + vkDestroySemaphore = PFN_vkDestroySemaphore( vkGetInstanceProcAddr( instance, "vkDestroySemaphore" ) ); + vkDestroyShaderModule = PFN_vkDestroyShaderModule( vkGetInstanceProcAddr( instance, "vkDestroyShaderModule" ) ); + vkDestroySwapchainKHR = PFN_vkDestroySwapchainKHR( vkGetInstanceProcAddr( instance, "vkDestroySwapchainKHR" ) ); + vkDestroyValidationCacheEXT = PFN_vkDestroyValidationCacheEXT( vkGetInstanceProcAddr( instance, "vkDestroyValidationCacheEXT" ) ); + vkDeviceWaitIdle = PFN_vkDeviceWaitIdle( vkGetInstanceProcAddr( instance, "vkDeviceWaitIdle" ) ); + vkDisplayPowerControlEXT = PFN_vkDisplayPowerControlEXT( vkGetInstanceProcAddr( instance, "vkDisplayPowerControlEXT" ) ); + vkEndCommandBuffer = PFN_vkEndCommandBuffer( vkGetInstanceProcAddr( instance, "vkEndCommandBuffer" ) ); + vkFlushMappedMemoryRanges = PFN_vkFlushMappedMemoryRanges( vkGetInstanceProcAddr( instance, "vkFlushMappedMemoryRanges" ) ); + vkFreeCommandBuffers = PFN_vkFreeCommandBuffers( vkGetInstanceProcAddr( instance, "vkFreeCommandBuffers" ) ); + vkFreeDescriptorSets = PFN_vkFreeDescriptorSets( vkGetInstanceProcAddr( instance, "vkFreeDescriptorSets" ) ); + vkFreeMemory = PFN_vkFreeMemory( vkGetInstanceProcAddr( instance, "vkFreeMemory" ) ); + vkGetAccelerationStructureBuildSizesKHR = PFN_vkGetAccelerationStructureBuildSizesKHR( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureBuildSizesKHR" ) ); + vkGetAccelerationStructureDeviceAddressKHR = PFN_vkGetAccelerationStructureDeviceAddressKHR( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureDeviceAddressKHR" ) ); + vkGetAccelerationStructureHandleNV = PFN_vkGetAccelerationStructureHandleNV( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureHandleNV" ) ); + vkGetAccelerationStructureMemoryRequirementsNV = PFN_vkGetAccelerationStructureMemoryRequirementsNV( vkGetInstanceProcAddr( instance, "vkGetAccelerationStructureMemoryRequirementsNV" ) ); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkGetAndroidHardwareBufferPropertiesANDROID = PFN_vkGetAndroidHardwareBufferPropertiesANDROID( vkGetInstanceProcAddr( instance, "vkGetAndroidHardwareBufferPropertiesANDROID" ) ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkGetBufferDeviceAddressEXT = PFN_vkGetBufferDeviceAddressEXT( vkGetInstanceProcAddr( instance, "vkGetBufferDeviceAddressEXT" ) ); + vkGetBufferDeviceAddressKHR = PFN_vkGetBufferDeviceAddressKHR( vkGetInstanceProcAddr( instance, "vkGetBufferDeviceAddressKHR" ) ); + vkGetBufferDeviceAddress = PFN_vkGetBufferDeviceAddress( vkGetInstanceProcAddr( instance, "vkGetBufferDeviceAddress" ) ); + if ( !vkGetBufferDeviceAddress ) vkGetBufferDeviceAddress = vkGetBufferDeviceAddressKHR; + if ( !vkGetBufferDeviceAddress ) vkGetBufferDeviceAddress = vkGetBufferDeviceAddressEXT; + vkGetBufferMemoryRequirements = PFN_vkGetBufferMemoryRequirements( vkGetInstanceProcAddr( instance, "vkGetBufferMemoryRequirements" ) ); + vkGetBufferMemoryRequirements2KHR = PFN_vkGetBufferMemoryRequirements2KHR( vkGetInstanceProcAddr( instance, "vkGetBufferMemoryRequirements2KHR" ) ); + vkGetBufferMemoryRequirements2 = PFN_vkGetBufferMemoryRequirements2( vkGetInstanceProcAddr( instance, "vkGetBufferMemoryRequirements2" ) ); + if ( !vkGetBufferMemoryRequirements2 ) vkGetBufferMemoryRequirements2 = vkGetBufferMemoryRequirements2KHR; + vkGetBufferOpaqueCaptureAddressKHR = PFN_vkGetBufferOpaqueCaptureAddressKHR( vkGetInstanceProcAddr( instance, "vkGetBufferOpaqueCaptureAddressKHR" ) ); + vkGetBufferOpaqueCaptureAddress = PFN_vkGetBufferOpaqueCaptureAddress( vkGetInstanceProcAddr( instance, "vkGetBufferOpaqueCaptureAddress" ) ); + if ( !vkGetBufferOpaqueCaptureAddress ) vkGetBufferOpaqueCaptureAddress = vkGetBufferOpaqueCaptureAddressKHR; + vkGetCalibratedTimestampsEXT = PFN_vkGetCalibratedTimestampsEXT( vkGetInstanceProcAddr( instance, "vkGetCalibratedTimestampsEXT" ) ); + vkGetDeferredOperationMaxConcurrencyKHR = PFN_vkGetDeferredOperationMaxConcurrencyKHR( vkGetInstanceProcAddr( instance, "vkGetDeferredOperationMaxConcurrencyKHR" ) ); + vkGetDeferredOperationResultKHR = PFN_vkGetDeferredOperationResultKHR( vkGetInstanceProcAddr( instance, "vkGetDeferredOperationResultKHR" ) ); + vkGetDescriptorSetLayoutSupportKHR = PFN_vkGetDescriptorSetLayoutSupportKHR( vkGetInstanceProcAddr( instance, "vkGetDescriptorSetLayoutSupportKHR" ) ); + vkGetDescriptorSetLayoutSupport = PFN_vkGetDescriptorSetLayoutSupport( vkGetInstanceProcAddr( instance, "vkGetDescriptorSetLayoutSupport" ) ); + if ( !vkGetDescriptorSetLayoutSupport ) vkGetDescriptorSetLayoutSupport = vkGetDescriptorSetLayoutSupportKHR; + vkGetDeviceAccelerationStructureCompatibilityKHR = PFN_vkGetDeviceAccelerationStructureCompatibilityKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceAccelerationStructureCompatibilityKHR" ) ); + vkGetDeviceGroupPeerMemoryFeaturesKHR = PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupPeerMemoryFeaturesKHR" ) ); + vkGetDeviceGroupPeerMemoryFeatures = PFN_vkGetDeviceGroupPeerMemoryFeatures( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupPeerMemoryFeatures" ) ); + if ( !vkGetDeviceGroupPeerMemoryFeatures ) vkGetDeviceGroupPeerMemoryFeatures = vkGetDeviceGroupPeerMemoryFeaturesKHR; + vkGetDeviceGroupPresentCapabilitiesKHR = PFN_vkGetDeviceGroupPresentCapabilitiesKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupPresentCapabilitiesKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetDeviceGroupSurfacePresentModes2EXT = PFN_vkGetDeviceGroupSurfacePresentModes2EXT( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupSurfacePresentModes2EXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetDeviceGroupSurfacePresentModesKHR = PFN_vkGetDeviceGroupSurfacePresentModesKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceGroupSurfacePresentModesKHR" ) ); + vkGetDeviceMemoryCommitment = PFN_vkGetDeviceMemoryCommitment( vkGetInstanceProcAddr( instance, "vkGetDeviceMemoryCommitment" ) ); + vkGetDeviceMemoryOpaqueCaptureAddressKHR = PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR( vkGetInstanceProcAddr( instance, "vkGetDeviceMemoryOpaqueCaptureAddressKHR" ) ); + vkGetDeviceMemoryOpaqueCaptureAddress = PFN_vkGetDeviceMemoryOpaqueCaptureAddress( vkGetInstanceProcAddr( instance, "vkGetDeviceMemoryOpaqueCaptureAddress" ) ); + if ( !vkGetDeviceMemoryOpaqueCaptureAddress ) vkGetDeviceMemoryOpaqueCaptureAddress = vkGetDeviceMemoryOpaqueCaptureAddressKHR; + vkGetDeviceProcAddr = PFN_vkGetDeviceProcAddr( vkGetInstanceProcAddr( instance, "vkGetDeviceProcAddr" ) ); + vkGetDeviceQueue = PFN_vkGetDeviceQueue( vkGetInstanceProcAddr( instance, "vkGetDeviceQueue" ) ); + vkGetDeviceQueue2 = PFN_vkGetDeviceQueue2( vkGetInstanceProcAddr( instance, "vkGetDeviceQueue2" ) ); + vkGetEventStatus = PFN_vkGetEventStatus( vkGetInstanceProcAddr( instance, "vkGetEventStatus" ) ); + vkGetFenceFdKHR = PFN_vkGetFenceFdKHR( vkGetInstanceProcAddr( instance, "vkGetFenceFdKHR" ) ); + vkGetFenceStatus = PFN_vkGetFenceStatus( vkGetInstanceProcAddr( instance, "vkGetFenceStatus" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetFenceWin32HandleKHR = PFN_vkGetFenceWin32HandleKHR( vkGetInstanceProcAddr( instance, "vkGetFenceWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetGeneratedCommandsMemoryRequirementsNV = PFN_vkGetGeneratedCommandsMemoryRequirementsNV( vkGetInstanceProcAddr( instance, "vkGetGeneratedCommandsMemoryRequirementsNV" ) ); + vkGetImageDrmFormatModifierPropertiesEXT = PFN_vkGetImageDrmFormatModifierPropertiesEXT( vkGetInstanceProcAddr( instance, "vkGetImageDrmFormatModifierPropertiesEXT" ) ); + vkGetImageMemoryRequirements = PFN_vkGetImageMemoryRequirements( vkGetInstanceProcAddr( instance, "vkGetImageMemoryRequirements" ) ); + vkGetImageMemoryRequirements2KHR = PFN_vkGetImageMemoryRequirements2KHR( vkGetInstanceProcAddr( instance, "vkGetImageMemoryRequirements2KHR" ) ); + vkGetImageMemoryRequirements2 = PFN_vkGetImageMemoryRequirements2( vkGetInstanceProcAddr( instance, "vkGetImageMemoryRequirements2" ) ); + if ( !vkGetImageMemoryRequirements2 ) vkGetImageMemoryRequirements2 = vkGetImageMemoryRequirements2KHR; + vkGetImageSparseMemoryRequirements = PFN_vkGetImageSparseMemoryRequirements( vkGetInstanceProcAddr( instance, "vkGetImageSparseMemoryRequirements" ) ); + vkGetImageSparseMemoryRequirements2KHR = PFN_vkGetImageSparseMemoryRequirements2KHR( vkGetInstanceProcAddr( instance, "vkGetImageSparseMemoryRequirements2KHR" ) ); + vkGetImageSparseMemoryRequirements2 = PFN_vkGetImageSparseMemoryRequirements2( vkGetInstanceProcAddr( instance, "vkGetImageSparseMemoryRequirements2" ) ); + if ( !vkGetImageSparseMemoryRequirements2 ) vkGetImageSparseMemoryRequirements2 = vkGetImageSparseMemoryRequirements2KHR; + vkGetImageSubresourceLayout = PFN_vkGetImageSubresourceLayout( vkGetInstanceProcAddr( instance, "vkGetImageSubresourceLayout" ) ); + vkGetImageViewAddressNVX = PFN_vkGetImageViewAddressNVX( vkGetInstanceProcAddr( instance, "vkGetImageViewAddressNVX" ) ); + vkGetImageViewHandleNVX = PFN_vkGetImageViewHandleNVX( vkGetInstanceProcAddr( instance, "vkGetImageViewHandleNVX" ) ); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkGetMemoryAndroidHardwareBufferANDROID = PFN_vkGetMemoryAndroidHardwareBufferANDROID( vkGetInstanceProcAddr( instance, "vkGetMemoryAndroidHardwareBufferANDROID" ) ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkGetMemoryFdKHR = PFN_vkGetMemoryFdKHR( vkGetInstanceProcAddr( instance, "vkGetMemoryFdKHR" ) ); + vkGetMemoryFdPropertiesKHR = PFN_vkGetMemoryFdPropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetMemoryFdPropertiesKHR" ) ); + vkGetMemoryHostPointerPropertiesEXT = PFN_vkGetMemoryHostPointerPropertiesEXT( vkGetInstanceProcAddr( instance, "vkGetMemoryHostPointerPropertiesEXT" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandleKHR = PFN_vkGetMemoryWin32HandleKHR( vkGetInstanceProcAddr( instance, "vkGetMemoryWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandleNV = PFN_vkGetMemoryWin32HandleNV( vkGetInstanceProcAddr( instance, "vkGetMemoryWin32HandleNV" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandlePropertiesKHR = PFN_vkGetMemoryWin32HandlePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetMemoryWin32HandlePropertiesKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetPastPresentationTimingGOOGLE = PFN_vkGetPastPresentationTimingGOOGLE( vkGetInstanceProcAddr( instance, "vkGetPastPresentationTimingGOOGLE" ) ); + vkGetPerformanceParameterINTEL = PFN_vkGetPerformanceParameterINTEL( vkGetInstanceProcAddr( instance, "vkGetPerformanceParameterINTEL" ) ); + vkGetPipelineCacheData = PFN_vkGetPipelineCacheData( vkGetInstanceProcAddr( instance, "vkGetPipelineCacheData" ) ); + vkGetPipelineExecutableInternalRepresentationsKHR = PFN_vkGetPipelineExecutableInternalRepresentationsKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineExecutableInternalRepresentationsKHR" ) ); + vkGetPipelineExecutablePropertiesKHR = PFN_vkGetPipelineExecutablePropertiesKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineExecutablePropertiesKHR" ) ); + vkGetPipelineExecutableStatisticsKHR = PFN_vkGetPipelineExecutableStatisticsKHR( vkGetInstanceProcAddr( instance, "vkGetPipelineExecutableStatisticsKHR" ) ); + vkGetPrivateDataEXT = PFN_vkGetPrivateDataEXT( vkGetInstanceProcAddr( instance, "vkGetPrivateDataEXT" ) ); + vkGetQueryPoolResults = PFN_vkGetQueryPoolResults( vkGetInstanceProcAddr( instance, "vkGetQueryPoolResults" ) ); + vkGetQueueCheckpointDataNV = PFN_vkGetQueueCheckpointDataNV( vkGetInstanceProcAddr( instance, "vkGetQueueCheckpointDataNV" ) ); + vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR" ) ); + vkGetRayTracingShaderGroupHandlesNV = PFN_vkGetRayTracingShaderGroupHandlesNV( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupHandlesNV" ) ); + vkGetRayTracingShaderGroupHandlesKHR = PFN_vkGetRayTracingShaderGroupHandlesKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupHandlesKHR" ) ); + if ( !vkGetRayTracingShaderGroupHandlesKHR ) vkGetRayTracingShaderGroupHandlesKHR = vkGetRayTracingShaderGroupHandlesNV; + vkGetRayTracingShaderGroupStackSizeKHR = PFN_vkGetRayTracingShaderGroupStackSizeKHR( vkGetInstanceProcAddr( instance, "vkGetRayTracingShaderGroupStackSizeKHR" ) ); + vkGetRefreshCycleDurationGOOGLE = PFN_vkGetRefreshCycleDurationGOOGLE( vkGetInstanceProcAddr( instance, "vkGetRefreshCycleDurationGOOGLE" ) ); + vkGetRenderAreaGranularity = PFN_vkGetRenderAreaGranularity( vkGetInstanceProcAddr( instance, "vkGetRenderAreaGranularity" ) ); + vkGetSemaphoreCounterValueKHR = PFN_vkGetSemaphoreCounterValueKHR( vkGetInstanceProcAddr( instance, "vkGetSemaphoreCounterValueKHR" ) ); + vkGetSemaphoreCounterValue = PFN_vkGetSemaphoreCounterValue( vkGetInstanceProcAddr( instance, "vkGetSemaphoreCounterValue" ) ); + if ( !vkGetSemaphoreCounterValue ) vkGetSemaphoreCounterValue = vkGetSemaphoreCounterValueKHR; + vkGetSemaphoreFdKHR = PFN_vkGetSemaphoreFdKHR( vkGetInstanceProcAddr( instance, "vkGetSemaphoreFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetSemaphoreWin32HandleKHR = PFN_vkGetSemaphoreWin32HandleKHR( vkGetInstanceProcAddr( instance, "vkGetSemaphoreWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetShaderInfoAMD = PFN_vkGetShaderInfoAMD( vkGetInstanceProcAddr( instance, "vkGetShaderInfoAMD" ) ); + vkGetSwapchainCounterEXT = PFN_vkGetSwapchainCounterEXT( vkGetInstanceProcAddr( instance, "vkGetSwapchainCounterEXT" ) ); + vkGetSwapchainImagesKHR = PFN_vkGetSwapchainImagesKHR( vkGetInstanceProcAddr( instance, "vkGetSwapchainImagesKHR" ) ); + vkGetSwapchainStatusKHR = PFN_vkGetSwapchainStatusKHR( vkGetInstanceProcAddr( instance, "vkGetSwapchainStatusKHR" ) ); + vkGetValidationCacheDataEXT = PFN_vkGetValidationCacheDataEXT( vkGetInstanceProcAddr( instance, "vkGetValidationCacheDataEXT" ) ); + vkImportFenceFdKHR = PFN_vkImportFenceFdKHR( vkGetInstanceProcAddr( instance, "vkImportFenceFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportFenceWin32HandleKHR = PFN_vkImportFenceWin32HandleKHR( vkGetInstanceProcAddr( instance, "vkImportFenceWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkImportSemaphoreFdKHR = PFN_vkImportSemaphoreFdKHR( vkGetInstanceProcAddr( instance, "vkImportSemaphoreFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportSemaphoreWin32HandleKHR = PFN_vkImportSemaphoreWin32HandleKHR( vkGetInstanceProcAddr( instance, "vkImportSemaphoreWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkInitializePerformanceApiINTEL = PFN_vkInitializePerformanceApiINTEL( vkGetInstanceProcAddr( instance, "vkInitializePerformanceApiINTEL" ) ); + vkInvalidateMappedMemoryRanges = PFN_vkInvalidateMappedMemoryRanges( vkGetInstanceProcAddr( instance, "vkInvalidateMappedMemoryRanges" ) ); + vkMapMemory = PFN_vkMapMemory( vkGetInstanceProcAddr( instance, "vkMapMemory" ) ); + vkMergePipelineCaches = PFN_vkMergePipelineCaches( vkGetInstanceProcAddr( instance, "vkMergePipelineCaches" ) ); + vkMergeValidationCachesEXT = PFN_vkMergeValidationCachesEXT( vkGetInstanceProcAddr( instance, "vkMergeValidationCachesEXT" ) ); + vkQueueBeginDebugUtilsLabelEXT = PFN_vkQueueBeginDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkQueueBeginDebugUtilsLabelEXT" ) ); + vkQueueBindSparse = PFN_vkQueueBindSparse( vkGetInstanceProcAddr( instance, "vkQueueBindSparse" ) ); + vkQueueEndDebugUtilsLabelEXT = PFN_vkQueueEndDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkQueueEndDebugUtilsLabelEXT" ) ); + vkQueueInsertDebugUtilsLabelEXT = PFN_vkQueueInsertDebugUtilsLabelEXT( vkGetInstanceProcAddr( instance, "vkQueueInsertDebugUtilsLabelEXT" ) ); + vkQueuePresentKHR = PFN_vkQueuePresentKHR( vkGetInstanceProcAddr( instance, "vkQueuePresentKHR" ) ); + vkQueueSetPerformanceConfigurationINTEL = PFN_vkQueueSetPerformanceConfigurationINTEL( vkGetInstanceProcAddr( instance, "vkQueueSetPerformanceConfigurationINTEL" ) ); + vkQueueSubmit = PFN_vkQueueSubmit( vkGetInstanceProcAddr( instance, "vkQueueSubmit" ) ); + vkQueueWaitIdle = PFN_vkQueueWaitIdle( vkGetInstanceProcAddr( instance, "vkQueueWaitIdle" ) ); + vkRegisterDeviceEventEXT = PFN_vkRegisterDeviceEventEXT( vkGetInstanceProcAddr( instance, "vkRegisterDeviceEventEXT" ) ); + vkRegisterDisplayEventEXT = PFN_vkRegisterDisplayEventEXT( vkGetInstanceProcAddr( instance, "vkRegisterDisplayEventEXT" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkReleaseFullScreenExclusiveModeEXT = PFN_vkReleaseFullScreenExclusiveModeEXT( vkGetInstanceProcAddr( instance, "vkReleaseFullScreenExclusiveModeEXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkReleasePerformanceConfigurationINTEL = PFN_vkReleasePerformanceConfigurationINTEL( vkGetInstanceProcAddr( instance, "vkReleasePerformanceConfigurationINTEL" ) ); + vkReleaseProfilingLockKHR = PFN_vkReleaseProfilingLockKHR( vkGetInstanceProcAddr( instance, "vkReleaseProfilingLockKHR" ) ); + vkResetCommandBuffer = PFN_vkResetCommandBuffer( vkGetInstanceProcAddr( instance, "vkResetCommandBuffer" ) ); + vkResetCommandPool = PFN_vkResetCommandPool( vkGetInstanceProcAddr( instance, "vkResetCommandPool" ) ); + vkResetDescriptorPool = PFN_vkResetDescriptorPool( vkGetInstanceProcAddr( instance, "vkResetDescriptorPool" ) ); + vkResetEvent = PFN_vkResetEvent( vkGetInstanceProcAddr( instance, "vkResetEvent" ) ); + vkResetFences = PFN_vkResetFences( vkGetInstanceProcAddr( instance, "vkResetFences" ) ); + vkResetQueryPoolEXT = PFN_vkResetQueryPoolEXT( vkGetInstanceProcAddr( instance, "vkResetQueryPoolEXT" ) ); + vkResetQueryPool = PFN_vkResetQueryPool( vkGetInstanceProcAddr( instance, "vkResetQueryPool" ) ); + if ( !vkResetQueryPool ) vkResetQueryPool = vkResetQueryPoolEXT; + vkSetDebugUtilsObjectNameEXT = PFN_vkSetDebugUtilsObjectNameEXT( vkGetInstanceProcAddr( instance, "vkSetDebugUtilsObjectNameEXT" ) ); + vkSetDebugUtilsObjectTagEXT = PFN_vkSetDebugUtilsObjectTagEXT( vkGetInstanceProcAddr( instance, "vkSetDebugUtilsObjectTagEXT" ) ); + vkSetEvent = PFN_vkSetEvent( vkGetInstanceProcAddr( instance, "vkSetEvent" ) ); + vkSetHdrMetadataEXT = PFN_vkSetHdrMetadataEXT( vkGetInstanceProcAddr( instance, "vkSetHdrMetadataEXT" ) ); + vkSetLocalDimmingAMD = PFN_vkSetLocalDimmingAMD( vkGetInstanceProcAddr( instance, "vkSetLocalDimmingAMD" ) ); + vkSetPrivateDataEXT = PFN_vkSetPrivateDataEXT( vkGetInstanceProcAddr( instance, "vkSetPrivateDataEXT" ) ); + vkSignalSemaphoreKHR = PFN_vkSignalSemaphoreKHR( vkGetInstanceProcAddr( instance, "vkSignalSemaphoreKHR" ) ); + vkSignalSemaphore = PFN_vkSignalSemaphore( vkGetInstanceProcAddr( instance, "vkSignalSemaphore" ) ); + if ( !vkSignalSemaphore ) vkSignalSemaphore = vkSignalSemaphoreKHR; + vkTrimCommandPoolKHR = PFN_vkTrimCommandPoolKHR( vkGetInstanceProcAddr( instance, "vkTrimCommandPoolKHR" ) ); + vkTrimCommandPool = PFN_vkTrimCommandPool( vkGetInstanceProcAddr( instance, "vkTrimCommandPool" ) ); + if ( !vkTrimCommandPool ) vkTrimCommandPool = vkTrimCommandPoolKHR; + vkUninitializePerformanceApiINTEL = PFN_vkUninitializePerformanceApiINTEL( vkGetInstanceProcAddr( instance, "vkUninitializePerformanceApiINTEL" ) ); + vkUnmapMemory = PFN_vkUnmapMemory( vkGetInstanceProcAddr( instance, "vkUnmapMemory" ) ); + vkUpdateDescriptorSetWithTemplateKHR = PFN_vkUpdateDescriptorSetWithTemplateKHR( vkGetInstanceProcAddr( instance, "vkUpdateDescriptorSetWithTemplateKHR" ) ); + vkUpdateDescriptorSetWithTemplate = PFN_vkUpdateDescriptorSetWithTemplate( vkGetInstanceProcAddr( instance, "vkUpdateDescriptorSetWithTemplate" ) ); + if ( !vkUpdateDescriptorSetWithTemplate ) vkUpdateDescriptorSetWithTemplate = vkUpdateDescriptorSetWithTemplateKHR; + vkUpdateDescriptorSets = PFN_vkUpdateDescriptorSets( vkGetInstanceProcAddr( instance, "vkUpdateDescriptorSets" ) ); + vkWaitForFences = PFN_vkWaitForFences( vkGetInstanceProcAddr( instance, "vkWaitForFences" ) ); + vkWaitSemaphoresKHR = PFN_vkWaitSemaphoresKHR( vkGetInstanceProcAddr( instance, "vkWaitSemaphoresKHR" ) ); + vkWaitSemaphores = PFN_vkWaitSemaphores( vkGetInstanceProcAddr( instance, "vkWaitSemaphores" ) ); + if ( !vkWaitSemaphores ) vkWaitSemaphores = vkWaitSemaphoresKHR; + vkWriteAccelerationStructuresPropertiesKHR = PFN_vkWriteAccelerationStructuresPropertiesKHR( vkGetInstanceProcAddr( instance, "vkWriteAccelerationStructuresPropertiesKHR" ) ); + } + + void init( VULKAN_HPP_NAMESPACE::Device deviceCpp ) VULKAN_HPP_NOEXCEPT + { + VkDevice device = static_cast(deviceCpp); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkAcquireFullScreenExclusiveModeEXT = PFN_vkAcquireFullScreenExclusiveModeEXT( vkGetDeviceProcAddr( device, "vkAcquireFullScreenExclusiveModeEXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkAcquireNextImage2KHR = PFN_vkAcquireNextImage2KHR( vkGetDeviceProcAddr( device, "vkAcquireNextImage2KHR" ) ); + vkAcquireNextImageKHR = PFN_vkAcquireNextImageKHR( vkGetDeviceProcAddr( device, "vkAcquireNextImageKHR" ) ); + vkAcquirePerformanceConfigurationINTEL = PFN_vkAcquirePerformanceConfigurationINTEL( vkGetDeviceProcAddr( device, "vkAcquirePerformanceConfigurationINTEL" ) ); + vkAcquireProfilingLockKHR = PFN_vkAcquireProfilingLockKHR( vkGetDeviceProcAddr( device, "vkAcquireProfilingLockKHR" ) ); + vkAllocateCommandBuffers = PFN_vkAllocateCommandBuffers( vkGetDeviceProcAddr( device, "vkAllocateCommandBuffers" ) ); + vkAllocateDescriptorSets = PFN_vkAllocateDescriptorSets( vkGetDeviceProcAddr( device, "vkAllocateDescriptorSets" ) ); + vkAllocateMemory = PFN_vkAllocateMemory( vkGetDeviceProcAddr( device, "vkAllocateMemory" ) ); + vkBeginCommandBuffer = PFN_vkBeginCommandBuffer( vkGetDeviceProcAddr( device, "vkBeginCommandBuffer" ) ); + vkBindAccelerationStructureMemoryNV = PFN_vkBindAccelerationStructureMemoryNV( vkGetDeviceProcAddr( device, "vkBindAccelerationStructureMemoryNV" ) ); + vkBindBufferMemory = PFN_vkBindBufferMemory( vkGetDeviceProcAddr( device, "vkBindBufferMemory" ) ); + vkBindBufferMemory2KHR = PFN_vkBindBufferMemory2KHR( vkGetDeviceProcAddr( device, "vkBindBufferMemory2KHR" ) ); + vkBindBufferMemory2 = PFN_vkBindBufferMemory2( vkGetDeviceProcAddr( device, "vkBindBufferMemory2" ) ); + if ( !vkBindBufferMemory2 ) vkBindBufferMemory2 = vkBindBufferMemory2KHR; + vkBindImageMemory = PFN_vkBindImageMemory( vkGetDeviceProcAddr( device, "vkBindImageMemory" ) ); + vkBindImageMemory2KHR = PFN_vkBindImageMemory2KHR( vkGetDeviceProcAddr( device, "vkBindImageMemory2KHR" ) ); + vkBindImageMemory2 = PFN_vkBindImageMemory2( vkGetDeviceProcAddr( device, "vkBindImageMemory2" ) ); + if ( !vkBindImageMemory2 ) vkBindImageMemory2 = vkBindImageMemory2KHR; + vkBuildAccelerationStructuresKHR = PFN_vkBuildAccelerationStructuresKHR( vkGetDeviceProcAddr( device, "vkBuildAccelerationStructuresKHR" ) ); + vkCmdBeginConditionalRenderingEXT = PFN_vkCmdBeginConditionalRenderingEXT( vkGetDeviceProcAddr( device, "vkCmdBeginConditionalRenderingEXT" ) ); + vkCmdBeginDebugUtilsLabelEXT = PFN_vkCmdBeginDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkCmdBeginDebugUtilsLabelEXT" ) ); + vkCmdBeginQuery = PFN_vkCmdBeginQuery( vkGetDeviceProcAddr( device, "vkCmdBeginQuery" ) ); + vkCmdBeginQueryIndexedEXT = PFN_vkCmdBeginQueryIndexedEXT( vkGetDeviceProcAddr( device, "vkCmdBeginQueryIndexedEXT" ) ); + vkCmdBeginRenderPass = PFN_vkCmdBeginRenderPass( vkGetDeviceProcAddr( device, "vkCmdBeginRenderPass" ) ); + vkCmdBeginRenderPass2KHR = PFN_vkCmdBeginRenderPass2KHR( vkGetDeviceProcAddr( device, "vkCmdBeginRenderPass2KHR" ) ); + vkCmdBeginRenderPass2 = PFN_vkCmdBeginRenderPass2( vkGetDeviceProcAddr( device, "vkCmdBeginRenderPass2" ) ); + if ( !vkCmdBeginRenderPass2 ) vkCmdBeginRenderPass2 = vkCmdBeginRenderPass2KHR; + vkCmdBeginTransformFeedbackEXT = PFN_vkCmdBeginTransformFeedbackEXT( vkGetDeviceProcAddr( device, "vkCmdBeginTransformFeedbackEXT" ) ); + vkCmdBindDescriptorSets = PFN_vkCmdBindDescriptorSets( vkGetDeviceProcAddr( device, "vkCmdBindDescriptorSets" ) ); + vkCmdBindIndexBuffer = PFN_vkCmdBindIndexBuffer( vkGetDeviceProcAddr( device, "vkCmdBindIndexBuffer" ) ); + vkCmdBindPipeline = PFN_vkCmdBindPipeline( vkGetDeviceProcAddr( device, "vkCmdBindPipeline" ) ); + vkCmdBindPipelineShaderGroupNV = PFN_vkCmdBindPipelineShaderGroupNV( vkGetDeviceProcAddr( device, "vkCmdBindPipelineShaderGroupNV" ) ); + vkCmdBindShadingRateImageNV = PFN_vkCmdBindShadingRateImageNV( vkGetDeviceProcAddr( device, "vkCmdBindShadingRateImageNV" ) ); + vkCmdBindTransformFeedbackBuffersEXT = PFN_vkCmdBindTransformFeedbackBuffersEXT( vkGetDeviceProcAddr( device, "vkCmdBindTransformFeedbackBuffersEXT" ) ); + vkCmdBindVertexBuffers = PFN_vkCmdBindVertexBuffers( vkGetDeviceProcAddr( device, "vkCmdBindVertexBuffers" ) ); + vkCmdBindVertexBuffers2EXT = PFN_vkCmdBindVertexBuffers2EXT( vkGetDeviceProcAddr( device, "vkCmdBindVertexBuffers2EXT" ) ); + vkCmdBlitImage = PFN_vkCmdBlitImage( vkGetDeviceProcAddr( device, "vkCmdBlitImage" ) ); + vkCmdBlitImage2KHR = PFN_vkCmdBlitImage2KHR( vkGetDeviceProcAddr( device, "vkCmdBlitImage2KHR" ) ); + vkCmdBuildAccelerationStructureNV = PFN_vkCmdBuildAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructureNV" ) ); + vkCmdBuildAccelerationStructuresIndirectKHR = PFN_vkCmdBuildAccelerationStructuresIndirectKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructuresIndirectKHR" ) ); + vkCmdBuildAccelerationStructuresKHR = PFN_vkCmdBuildAccelerationStructuresKHR( vkGetDeviceProcAddr( device, "vkCmdBuildAccelerationStructuresKHR" ) ); + vkCmdClearAttachments = PFN_vkCmdClearAttachments( vkGetDeviceProcAddr( device, "vkCmdClearAttachments" ) ); + vkCmdClearColorImage = PFN_vkCmdClearColorImage( vkGetDeviceProcAddr( device, "vkCmdClearColorImage" ) ); + vkCmdClearDepthStencilImage = PFN_vkCmdClearDepthStencilImage( vkGetDeviceProcAddr( device, "vkCmdClearDepthStencilImage" ) ); + vkCmdCopyAccelerationStructureKHR = PFN_vkCmdCopyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureKHR" ) ); + vkCmdCopyAccelerationStructureNV = PFN_vkCmdCopyAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureNV" ) ); + vkCmdCopyAccelerationStructureToMemoryKHR = PFN_vkCmdCopyAccelerationStructureToMemoryKHR( vkGetDeviceProcAddr( device, "vkCmdCopyAccelerationStructureToMemoryKHR" ) ); + vkCmdCopyBuffer = PFN_vkCmdCopyBuffer( vkGetDeviceProcAddr( device, "vkCmdCopyBuffer" ) ); + vkCmdCopyBuffer2KHR = PFN_vkCmdCopyBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyBuffer2KHR" ) ); + vkCmdCopyBufferToImage = PFN_vkCmdCopyBufferToImage( vkGetDeviceProcAddr( device, "vkCmdCopyBufferToImage" ) ); + vkCmdCopyBufferToImage2KHR = PFN_vkCmdCopyBufferToImage2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyBufferToImage2KHR" ) ); + vkCmdCopyImage = PFN_vkCmdCopyImage( vkGetDeviceProcAddr( device, "vkCmdCopyImage" ) ); + vkCmdCopyImage2KHR = PFN_vkCmdCopyImage2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyImage2KHR" ) ); + vkCmdCopyImageToBuffer = PFN_vkCmdCopyImageToBuffer( vkGetDeviceProcAddr( device, "vkCmdCopyImageToBuffer" ) ); + vkCmdCopyImageToBuffer2KHR = PFN_vkCmdCopyImageToBuffer2KHR( vkGetDeviceProcAddr( device, "vkCmdCopyImageToBuffer2KHR" ) ); + vkCmdCopyMemoryToAccelerationStructureKHR = PFN_vkCmdCopyMemoryToAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCmdCopyMemoryToAccelerationStructureKHR" ) ); + vkCmdCopyQueryPoolResults = PFN_vkCmdCopyQueryPoolResults( vkGetDeviceProcAddr( device, "vkCmdCopyQueryPoolResults" ) ); + vkCmdDebugMarkerBeginEXT = PFN_vkCmdDebugMarkerBeginEXT( vkGetDeviceProcAddr( device, "vkCmdDebugMarkerBeginEXT" ) ); + vkCmdDebugMarkerEndEXT = PFN_vkCmdDebugMarkerEndEXT( vkGetDeviceProcAddr( device, "vkCmdDebugMarkerEndEXT" ) ); + vkCmdDebugMarkerInsertEXT = PFN_vkCmdDebugMarkerInsertEXT( vkGetDeviceProcAddr( device, "vkCmdDebugMarkerInsertEXT" ) ); + vkCmdDispatch = PFN_vkCmdDispatch( vkGetDeviceProcAddr( device, "vkCmdDispatch" ) ); + vkCmdDispatchBaseKHR = PFN_vkCmdDispatchBaseKHR( vkGetDeviceProcAddr( device, "vkCmdDispatchBaseKHR" ) ); + vkCmdDispatchBase = PFN_vkCmdDispatchBase( vkGetDeviceProcAddr( device, "vkCmdDispatchBase" ) ); + if ( !vkCmdDispatchBase ) vkCmdDispatchBase = vkCmdDispatchBaseKHR; + vkCmdDispatchIndirect = PFN_vkCmdDispatchIndirect( vkGetDeviceProcAddr( device, "vkCmdDispatchIndirect" ) ); + vkCmdDraw = PFN_vkCmdDraw( vkGetDeviceProcAddr( device, "vkCmdDraw" ) ); + vkCmdDrawIndexed = PFN_vkCmdDrawIndexed( vkGetDeviceProcAddr( device, "vkCmdDrawIndexed" ) ); + vkCmdDrawIndexedIndirect = PFN_vkCmdDrawIndexedIndirect( vkGetDeviceProcAddr( device, "vkCmdDrawIndexedIndirect" ) ); + vkCmdDrawIndexedIndirectCountAMD = PFN_vkCmdDrawIndexedIndirectCountAMD( vkGetDeviceProcAddr( device, "vkCmdDrawIndexedIndirectCountAMD" ) ); + vkCmdDrawIndexedIndirectCountKHR = PFN_vkCmdDrawIndexedIndirectCountKHR( vkGetDeviceProcAddr( device, "vkCmdDrawIndexedIndirectCountKHR" ) ); + vkCmdDrawIndexedIndirectCount = PFN_vkCmdDrawIndexedIndirectCount( vkGetDeviceProcAddr( device, "vkCmdDrawIndexedIndirectCount" ) ); + if ( !vkCmdDrawIndexedIndirectCount ) vkCmdDrawIndexedIndirectCount = vkCmdDrawIndexedIndirectCountKHR; + if ( !vkCmdDrawIndexedIndirectCount ) vkCmdDrawIndexedIndirectCount = vkCmdDrawIndexedIndirectCountAMD; + vkCmdDrawIndirect = PFN_vkCmdDrawIndirect( vkGetDeviceProcAddr( device, "vkCmdDrawIndirect" ) ); + vkCmdDrawIndirectByteCountEXT = PFN_vkCmdDrawIndirectByteCountEXT( vkGetDeviceProcAddr( device, "vkCmdDrawIndirectByteCountEXT" ) ); + vkCmdDrawIndirectCountAMD = PFN_vkCmdDrawIndirectCountAMD( vkGetDeviceProcAddr( device, "vkCmdDrawIndirectCountAMD" ) ); + vkCmdDrawIndirectCountKHR = PFN_vkCmdDrawIndirectCountKHR( vkGetDeviceProcAddr( device, "vkCmdDrawIndirectCountKHR" ) ); + vkCmdDrawIndirectCount = PFN_vkCmdDrawIndirectCount( vkGetDeviceProcAddr( device, "vkCmdDrawIndirectCount" ) ); + if ( !vkCmdDrawIndirectCount ) vkCmdDrawIndirectCount = vkCmdDrawIndirectCountKHR; + if ( !vkCmdDrawIndirectCount ) vkCmdDrawIndirectCount = vkCmdDrawIndirectCountAMD; + vkCmdDrawMeshTasksIndirectCountNV = PFN_vkCmdDrawMeshTasksIndirectCountNV( vkGetDeviceProcAddr( device, "vkCmdDrawMeshTasksIndirectCountNV" ) ); + vkCmdDrawMeshTasksIndirectNV = PFN_vkCmdDrawMeshTasksIndirectNV( vkGetDeviceProcAddr( device, "vkCmdDrawMeshTasksIndirectNV" ) ); + vkCmdDrawMeshTasksNV = PFN_vkCmdDrawMeshTasksNV( vkGetDeviceProcAddr( device, "vkCmdDrawMeshTasksNV" ) ); + vkCmdEndConditionalRenderingEXT = PFN_vkCmdEndConditionalRenderingEXT( vkGetDeviceProcAddr( device, "vkCmdEndConditionalRenderingEXT" ) ); + vkCmdEndDebugUtilsLabelEXT = PFN_vkCmdEndDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkCmdEndDebugUtilsLabelEXT" ) ); + vkCmdEndQuery = PFN_vkCmdEndQuery( vkGetDeviceProcAddr( device, "vkCmdEndQuery" ) ); + vkCmdEndQueryIndexedEXT = PFN_vkCmdEndQueryIndexedEXT( vkGetDeviceProcAddr( device, "vkCmdEndQueryIndexedEXT" ) ); + vkCmdEndRenderPass = PFN_vkCmdEndRenderPass( vkGetDeviceProcAddr( device, "vkCmdEndRenderPass" ) ); + vkCmdEndRenderPass2KHR = PFN_vkCmdEndRenderPass2KHR( vkGetDeviceProcAddr( device, "vkCmdEndRenderPass2KHR" ) ); + vkCmdEndRenderPass2 = PFN_vkCmdEndRenderPass2( vkGetDeviceProcAddr( device, "vkCmdEndRenderPass2" ) ); + if ( !vkCmdEndRenderPass2 ) vkCmdEndRenderPass2 = vkCmdEndRenderPass2KHR; + vkCmdEndTransformFeedbackEXT = PFN_vkCmdEndTransformFeedbackEXT( vkGetDeviceProcAddr( device, "vkCmdEndTransformFeedbackEXT" ) ); + vkCmdExecuteCommands = PFN_vkCmdExecuteCommands( vkGetDeviceProcAddr( device, "vkCmdExecuteCommands" ) ); + vkCmdExecuteGeneratedCommandsNV = PFN_vkCmdExecuteGeneratedCommandsNV( vkGetDeviceProcAddr( device, "vkCmdExecuteGeneratedCommandsNV" ) ); + vkCmdFillBuffer = PFN_vkCmdFillBuffer( vkGetDeviceProcAddr( device, "vkCmdFillBuffer" ) ); + vkCmdInsertDebugUtilsLabelEXT = PFN_vkCmdInsertDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkCmdInsertDebugUtilsLabelEXT" ) ); + vkCmdNextSubpass = PFN_vkCmdNextSubpass( vkGetDeviceProcAddr( device, "vkCmdNextSubpass" ) ); + vkCmdNextSubpass2KHR = PFN_vkCmdNextSubpass2KHR( vkGetDeviceProcAddr( device, "vkCmdNextSubpass2KHR" ) ); + vkCmdNextSubpass2 = PFN_vkCmdNextSubpass2( vkGetDeviceProcAddr( device, "vkCmdNextSubpass2" ) ); + if ( !vkCmdNextSubpass2 ) vkCmdNextSubpass2 = vkCmdNextSubpass2KHR; + vkCmdPipelineBarrier = PFN_vkCmdPipelineBarrier( vkGetDeviceProcAddr( device, "vkCmdPipelineBarrier" ) ); + vkCmdPreprocessGeneratedCommandsNV = PFN_vkCmdPreprocessGeneratedCommandsNV( vkGetDeviceProcAddr( device, "vkCmdPreprocessGeneratedCommandsNV" ) ); + vkCmdPushConstants = PFN_vkCmdPushConstants( vkGetDeviceProcAddr( device, "vkCmdPushConstants" ) ); + vkCmdPushDescriptorSetKHR = PFN_vkCmdPushDescriptorSetKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetKHR" ) ); + vkCmdPushDescriptorSetWithTemplateKHR = PFN_vkCmdPushDescriptorSetWithTemplateKHR( vkGetDeviceProcAddr( device, "vkCmdPushDescriptorSetWithTemplateKHR" ) ); + vkCmdResetEvent = PFN_vkCmdResetEvent( vkGetDeviceProcAddr( device, "vkCmdResetEvent" ) ); + vkCmdResetQueryPool = PFN_vkCmdResetQueryPool( vkGetDeviceProcAddr( device, "vkCmdResetQueryPool" ) ); + vkCmdResolveImage = PFN_vkCmdResolveImage( vkGetDeviceProcAddr( device, "vkCmdResolveImage" ) ); + vkCmdResolveImage2KHR = PFN_vkCmdResolveImage2KHR( vkGetDeviceProcAddr( device, "vkCmdResolveImage2KHR" ) ); + vkCmdSetBlendConstants = PFN_vkCmdSetBlendConstants( vkGetDeviceProcAddr( device, "vkCmdSetBlendConstants" ) ); + vkCmdSetCheckpointNV = PFN_vkCmdSetCheckpointNV( vkGetDeviceProcAddr( device, "vkCmdSetCheckpointNV" ) ); + vkCmdSetCoarseSampleOrderNV = PFN_vkCmdSetCoarseSampleOrderNV( vkGetDeviceProcAddr( device, "vkCmdSetCoarseSampleOrderNV" ) ); + vkCmdSetCullModeEXT = PFN_vkCmdSetCullModeEXT( vkGetDeviceProcAddr( device, "vkCmdSetCullModeEXT" ) ); + vkCmdSetDepthBias = PFN_vkCmdSetDepthBias( vkGetDeviceProcAddr( device, "vkCmdSetDepthBias" ) ); + vkCmdSetDepthBounds = PFN_vkCmdSetDepthBounds( vkGetDeviceProcAddr( device, "vkCmdSetDepthBounds" ) ); + vkCmdSetDepthBoundsTestEnableEXT = PFN_vkCmdSetDepthBoundsTestEnableEXT( vkGetDeviceProcAddr( device, "vkCmdSetDepthBoundsTestEnableEXT" ) ); + vkCmdSetDepthCompareOpEXT = PFN_vkCmdSetDepthCompareOpEXT( vkGetDeviceProcAddr( device, "vkCmdSetDepthCompareOpEXT" ) ); + vkCmdSetDepthTestEnableEXT = PFN_vkCmdSetDepthTestEnableEXT( vkGetDeviceProcAddr( device, "vkCmdSetDepthTestEnableEXT" ) ); + vkCmdSetDepthWriteEnableEXT = PFN_vkCmdSetDepthWriteEnableEXT( vkGetDeviceProcAddr( device, "vkCmdSetDepthWriteEnableEXT" ) ); + vkCmdSetDeviceMaskKHR = PFN_vkCmdSetDeviceMaskKHR( vkGetDeviceProcAddr( device, "vkCmdSetDeviceMaskKHR" ) ); + vkCmdSetDeviceMask = PFN_vkCmdSetDeviceMask( vkGetDeviceProcAddr( device, "vkCmdSetDeviceMask" ) ); + if ( !vkCmdSetDeviceMask ) vkCmdSetDeviceMask = vkCmdSetDeviceMaskKHR; + vkCmdSetDiscardRectangleEXT = PFN_vkCmdSetDiscardRectangleEXT( vkGetDeviceProcAddr( device, "vkCmdSetDiscardRectangleEXT" ) ); + vkCmdSetEvent = PFN_vkCmdSetEvent( vkGetDeviceProcAddr( device, "vkCmdSetEvent" ) ); + vkCmdSetExclusiveScissorNV = PFN_vkCmdSetExclusiveScissorNV( vkGetDeviceProcAddr( device, "vkCmdSetExclusiveScissorNV" ) ); + vkCmdSetFragmentShadingRateEnumNV = PFN_vkCmdSetFragmentShadingRateEnumNV( vkGetDeviceProcAddr( device, "vkCmdSetFragmentShadingRateEnumNV" ) ); + vkCmdSetFragmentShadingRateKHR = PFN_vkCmdSetFragmentShadingRateKHR( vkGetDeviceProcAddr( device, "vkCmdSetFragmentShadingRateKHR" ) ); + vkCmdSetFrontFaceEXT = PFN_vkCmdSetFrontFaceEXT( vkGetDeviceProcAddr( device, "vkCmdSetFrontFaceEXT" ) ); + vkCmdSetLineStippleEXT = PFN_vkCmdSetLineStippleEXT( vkGetDeviceProcAddr( device, "vkCmdSetLineStippleEXT" ) ); + vkCmdSetLineWidth = PFN_vkCmdSetLineWidth( vkGetDeviceProcAddr( device, "vkCmdSetLineWidth" ) ); + vkCmdSetPerformanceMarkerINTEL = PFN_vkCmdSetPerformanceMarkerINTEL( vkGetDeviceProcAddr( device, "vkCmdSetPerformanceMarkerINTEL" ) ); + vkCmdSetPerformanceOverrideINTEL = PFN_vkCmdSetPerformanceOverrideINTEL( vkGetDeviceProcAddr( device, "vkCmdSetPerformanceOverrideINTEL" ) ); + vkCmdSetPerformanceStreamMarkerINTEL = PFN_vkCmdSetPerformanceStreamMarkerINTEL( vkGetDeviceProcAddr( device, "vkCmdSetPerformanceStreamMarkerINTEL" ) ); + vkCmdSetPrimitiveTopologyEXT = PFN_vkCmdSetPrimitiveTopologyEXT( vkGetDeviceProcAddr( device, "vkCmdSetPrimitiveTopologyEXT" ) ); + vkCmdSetRayTracingPipelineStackSizeKHR = PFN_vkCmdSetRayTracingPipelineStackSizeKHR( vkGetDeviceProcAddr( device, "vkCmdSetRayTracingPipelineStackSizeKHR" ) ); + vkCmdSetSampleLocationsEXT = PFN_vkCmdSetSampleLocationsEXT( vkGetDeviceProcAddr( device, "vkCmdSetSampleLocationsEXT" ) ); + vkCmdSetScissor = PFN_vkCmdSetScissor( vkGetDeviceProcAddr( device, "vkCmdSetScissor" ) ); + vkCmdSetScissorWithCountEXT = PFN_vkCmdSetScissorWithCountEXT( vkGetDeviceProcAddr( device, "vkCmdSetScissorWithCountEXT" ) ); + vkCmdSetStencilCompareMask = PFN_vkCmdSetStencilCompareMask( vkGetDeviceProcAddr( device, "vkCmdSetStencilCompareMask" ) ); + vkCmdSetStencilOpEXT = PFN_vkCmdSetStencilOpEXT( vkGetDeviceProcAddr( device, "vkCmdSetStencilOpEXT" ) ); + vkCmdSetStencilReference = PFN_vkCmdSetStencilReference( vkGetDeviceProcAddr( device, "vkCmdSetStencilReference" ) ); + vkCmdSetStencilTestEnableEXT = PFN_vkCmdSetStencilTestEnableEXT( vkGetDeviceProcAddr( device, "vkCmdSetStencilTestEnableEXT" ) ); + vkCmdSetStencilWriteMask = PFN_vkCmdSetStencilWriteMask( vkGetDeviceProcAddr( device, "vkCmdSetStencilWriteMask" ) ); + vkCmdSetViewport = PFN_vkCmdSetViewport( vkGetDeviceProcAddr( device, "vkCmdSetViewport" ) ); + vkCmdSetViewportShadingRatePaletteNV = PFN_vkCmdSetViewportShadingRatePaletteNV( vkGetDeviceProcAddr( device, "vkCmdSetViewportShadingRatePaletteNV" ) ); + vkCmdSetViewportWScalingNV = PFN_vkCmdSetViewportWScalingNV( vkGetDeviceProcAddr( device, "vkCmdSetViewportWScalingNV" ) ); + vkCmdSetViewportWithCountEXT = PFN_vkCmdSetViewportWithCountEXT( vkGetDeviceProcAddr( device, "vkCmdSetViewportWithCountEXT" ) ); + vkCmdTraceRaysIndirectKHR = PFN_vkCmdTraceRaysIndirectKHR( vkGetDeviceProcAddr( device, "vkCmdTraceRaysIndirectKHR" ) ); + vkCmdTraceRaysKHR = PFN_vkCmdTraceRaysKHR( vkGetDeviceProcAddr( device, "vkCmdTraceRaysKHR" ) ); + vkCmdTraceRaysNV = PFN_vkCmdTraceRaysNV( vkGetDeviceProcAddr( device, "vkCmdTraceRaysNV" ) ); + vkCmdUpdateBuffer = PFN_vkCmdUpdateBuffer( vkGetDeviceProcAddr( device, "vkCmdUpdateBuffer" ) ); + vkCmdWaitEvents = PFN_vkCmdWaitEvents( vkGetDeviceProcAddr( device, "vkCmdWaitEvents" ) ); + vkCmdWriteAccelerationStructuresPropertiesKHR = PFN_vkCmdWriteAccelerationStructuresPropertiesKHR( vkGetDeviceProcAddr( device, "vkCmdWriteAccelerationStructuresPropertiesKHR" ) ); + vkCmdWriteAccelerationStructuresPropertiesNV = PFN_vkCmdWriteAccelerationStructuresPropertiesNV( vkGetDeviceProcAddr( device, "vkCmdWriteAccelerationStructuresPropertiesNV" ) ); + vkCmdWriteBufferMarkerAMD = PFN_vkCmdWriteBufferMarkerAMD( vkGetDeviceProcAddr( device, "vkCmdWriteBufferMarkerAMD" ) ); + vkCmdWriteTimestamp = PFN_vkCmdWriteTimestamp( vkGetDeviceProcAddr( device, "vkCmdWriteTimestamp" ) ); + vkCompileDeferredNV = PFN_vkCompileDeferredNV( vkGetDeviceProcAddr( device, "vkCompileDeferredNV" ) ); + vkCopyAccelerationStructureKHR = PFN_vkCopyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCopyAccelerationStructureKHR" ) ); + vkCopyAccelerationStructureToMemoryKHR = PFN_vkCopyAccelerationStructureToMemoryKHR( vkGetDeviceProcAddr( device, "vkCopyAccelerationStructureToMemoryKHR" ) ); + vkCopyMemoryToAccelerationStructureKHR = PFN_vkCopyMemoryToAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCopyMemoryToAccelerationStructureKHR" ) ); + vkCreateAccelerationStructureKHR = PFN_vkCreateAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkCreateAccelerationStructureKHR" ) ); + vkCreateAccelerationStructureNV = PFN_vkCreateAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkCreateAccelerationStructureNV" ) ); + vkCreateBuffer = PFN_vkCreateBuffer( vkGetDeviceProcAddr( device, "vkCreateBuffer" ) ); + vkCreateBufferView = PFN_vkCreateBufferView( vkGetDeviceProcAddr( device, "vkCreateBufferView" ) ); + vkCreateCommandPool = PFN_vkCreateCommandPool( vkGetDeviceProcAddr( device, "vkCreateCommandPool" ) ); + vkCreateComputePipelines = PFN_vkCreateComputePipelines( vkGetDeviceProcAddr( device, "vkCreateComputePipelines" ) ); + vkCreateDeferredOperationKHR = PFN_vkCreateDeferredOperationKHR( vkGetDeviceProcAddr( device, "vkCreateDeferredOperationKHR" ) ); + vkCreateDescriptorPool = PFN_vkCreateDescriptorPool( vkGetDeviceProcAddr( device, "vkCreateDescriptorPool" ) ); + vkCreateDescriptorSetLayout = PFN_vkCreateDescriptorSetLayout( vkGetDeviceProcAddr( device, "vkCreateDescriptorSetLayout" ) ); + vkCreateDescriptorUpdateTemplateKHR = PFN_vkCreateDescriptorUpdateTemplateKHR( vkGetDeviceProcAddr( device, "vkCreateDescriptorUpdateTemplateKHR" ) ); + vkCreateDescriptorUpdateTemplate = PFN_vkCreateDescriptorUpdateTemplate( vkGetDeviceProcAddr( device, "vkCreateDescriptorUpdateTemplate" ) ); + if ( !vkCreateDescriptorUpdateTemplate ) vkCreateDescriptorUpdateTemplate = vkCreateDescriptorUpdateTemplateKHR; + vkCreateEvent = PFN_vkCreateEvent( vkGetDeviceProcAddr( device, "vkCreateEvent" ) ); + vkCreateFence = PFN_vkCreateFence( vkGetDeviceProcAddr( device, "vkCreateFence" ) ); + vkCreateFramebuffer = PFN_vkCreateFramebuffer( vkGetDeviceProcAddr( device, "vkCreateFramebuffer" ) ); + vkCreateGraphicsPipelines = PFN_vkCreateGraphicsPipelines( vkGetDeviceProcAddr( device, "vkCreateGraphicsPipelines" ) ); + vkCreateImage = PFN_vkCreateImage( vkGetDeviceProcAddr( device, "vkCreateImage" ) ); + vkCreateImageView = PFN_vkCreateImageView( vkGetDeviceProcAddr( device, "vkCreateImageView" ) ); + vkCreateIndirectCommandsLayoutNV = PFN_vkCreateIndirectCommandsLayoutNV( vkGetDeviceProcAddr( device, "vkCreateIndirectCommandsLayoutNV" ) ); + vkCreatePipelineCache = PFN_vkCreatePipelineCache( vkGetDeviceProcAddr( device, "vkCreatePipelineCache" ) ); + vkCreatePipelineLayout = PFN_vkCreatePipelineLayout( vkGetDeviceProcAddr( device, "vkCreatePipelineLayout" ) ); + vkCreatePrivateDataSlotEXT = PFN_vkCreatePrivateDataSlotEXT( vkGetDeviceProcAddr( device, "vkCreatePrivateDataSlotEXT" ) ); + vkCreateQueryPool = PFN_vkCreateQueryPool( vkGetDeviceProcAddr( device, "vkCreateQueryPool" ) ); + vkCreateRayTracingPipelinesKHR = PFN_vkCreateRayTracingPipelinesKHR( vkGetDeviceProcAddr( device, "vkCreateRayTracingPipelinesKHR" ) ); + vkCreateRayTracingPipelinesNV = PFN_vkCreateRayTracingPipelinesNV( vkGetDeviceProcAddr( device, "vkCreateRayTracingPipelinesNV" ) ); + vkCreateRenderPass = PFN_vkCreateRenderPass( vkGetDeviceProcAddr( device, "vkCreateRenderPass" ) ); + vkCreateRenderPass2KHR = PFN_vkCreateRenderPass2KHR( vkGetDeviceProcAddr( device, "vkCreateRenderPass2KHR" ) ); + vkCreateRenderPass2 = PFN_vkCreateRenderPass2( vkGetDeviceProcAddr( device, "vkCreateRenderPass2" ) ); + if ( !vkCreateRenderPass2 ) vkCreateRenderPass2 = vkCreateRenderPass2KHR; + vkCreateSampler = PFN_vkCreateSampler( vkGetDeviceProcAddr( device, "vkCreateSampler" ) ); + vkCreateSamplerYcbcrConversionKHR = PFN_vkCreateSamplerYcbcrConversionKHR( vkGetDeviceProcAddr( device, "vkCreateSamplerYcbcrConversionKHR" ) ); + vkCreateSamplerYcbcrConversion = PFN_vkCreateSamplerYcbcrConversion( vkGetDeviceProcAddr( device, "vkCreateSamplerYcbcrConversion" ) ); + if ( !vkCreateSamplerYcbcrConversion ) vkCreateSamplerYcbcrConversion = vkCreateSamplerYcbcrConversionKHR; + vkCreateSemaphore = PFN_vkCreateSemaphore( vkGetDeviceProcAddr( device, "vkCreateSemaphore" ) ); + vkCreateShaderModule = PFN_vkCreateShaderModule( vkGetDeviceProcAddr( device, "vkCreateShaderModule" ) ); + vkCreateSharedSwapchainsKHR = PFN_vkCreateSharedSwapchainsKHR( vkGetDeviceProcAddr( device, "vkCreateSharedSwapchainsKHR" ) ); + vkCreateSwapchainKHR = PFN_vkCreateSwapchainKHR( vkGetDeviceProcAddr( device, "vkCreateSwapchainKHR" ) ); + vkCreateValidationCacheEXT = PFN_vkCreateValidationCacheEXT( vkGetDeviceProcAddr( device, "vkCreateValidationCacheEXT" ) ); + vkDebugMarkerSetObjectNameEXT = PFN_vkDebugMarkerSetObjectNameEXT( vkGetDeviceProcAddr( device, "vkDebugMarkerSetObjectNameEXT" ) ); + vkDebugMarkerSetObjectTagEXT = PFN_vkDebugMarkerSetObjectTagEXT( vkGetDeviceProcAddr( device, "vkDebugMarkerSetObjectTagEXT" ) ); + vkDeferredOperationJoinKHR = PFN_vkDeferredOperationJoinKHR( vkGetDeviceProcAddr( device, "vkDeferredOperationJoinKHR" ) ); + vkDestroyAccelerationStructureKHR = PFN_vkDestroyAccelerationStructureKHR( vkGetDeviceProcAddr( device, "vkDestroyAccelerationStructureKHR" ) ); + vkDestroyAccelerationStructureNV = PFN_vkDestroyAccelerationStructureNV( vkGetDeviceProcAddr( device, "vkDestroyAccelerationStructureNV" ) ); + vkDestroyBuffer = PFN_vkDestroyBuffer( vkGetDeviceProcAddr( device, "vkDestroyBuffer" ) ); + vkDestroyBufferView = PFN_vkDestroyBufferView( vkGetDeviceProcAddr( device, "vkDestroyBufferView" ) ); + vkDestroyCommandPool = PFN_vkDestroyCommandPool( vkGetDeviceProcAddr( device, "vkDestroyCommandPool" ) ); + vkDestroyDeferredOperationKHR = PFN_vkDestroyDeferredOperationKHR( vkGetDeviceProcAddr( device, "vkDestroyDeferredOperationKHR" ) ); + vkDestroyDescriptorPool = PFN_vkDestroyDescriptorPool( vkGetDeviceProcAddr( device, "vkDestroyDescriptorPool" ) ); + vkDestroyDescriptorSetLayout = PFN_vkDestroyDescriptorSetLayout( vkGetDeviceProcAddr( device, "vkDestroyDescriptorSetLayout" ) ); + vkDestroyDescriptorUpdateTemplateKHR = PFN_vkDestroyDescriptorUpdateTemplateKHR( vkGetDeviceProcAddr( device, "vkDestroyDescriptorUpdateTemplateKHR" ) ); + vkDestroyDescriptorUpdateTemplate = PFN_vkDestroyDescriptorUpdateTemplate( vkGetDeviceProcAddr( device, "vkDestroyDescriptorUpdateTemplate" ) ); + if ( !vkDestroyDescriptorUpdateTemplate ) vkDestroyDescriptorUpdateTemplate = vkDestroyDescriptorUpdateTemplateKHR; + vkDestroyDevice = PFN_vkDestroyDevice( vkGetDeviceProcAddr( device, "vkDestroyDevice" ) ); + vkDestroyEvent = PFN_vkDestroyEvent( vkGetDeviceProcAddr( device, "vkDestroyEvent" ) ); + vkDestroyFence = PFN_vkDestroyFence( vkGetDeviceProcAddr( device, "vkDestroyFence" ) ); + vkDestroyFramebuffer = PFN_vkDestroyFramebuffer( vkGetDeviceProcAddr( device, "vkDestroyFramebuffer" ) ); + vkDestroyImage = PFN_vkDestroyImage( vkGetDeviceProcAddr( device, "vkDestroyImage" ) ); + vkDestroyImageView = PFN_vkDestroyImageView( vkGetDeviceProcAddr( device, "vkDestroyImageView" ) ); + vkDestroyIndirectCommandsLayoutNV = PFN_vkDestroyIndirectCommandsLayoutNV( vkGetDeviceProcAddr( device, "vkDestroyIndirectCommandsLayoutNV" ) ); + vkDestroyPipeline = PFN_vkDestroyPipeline( vkGetDeviceProcAddr( device, "vkDestroyPipeline" ) ); + vkDestroyPipelineCache = PFN_vkDestroyPipelineCache( vkGetDeviceProcAddr( device, "vkDestroyPipelineCache" ) ); + vkDestroyPipelineLayout = PFN_vkDestroyPipelineLayout( vkGetDeviceProcAddr( device, "vkDestroyPipelineLayout" ) ); + vkDestroyPrivateDataSlotEXT = PFN_vkDestroyPrivateDataSlotEXT( vkGetDeviceProcAddr( device, "vkDestroyPrivateDataSlotEXT" ) ); + vkDestroyQueryPool = PFN_vkDestroyQueryPool( vkGetDeviceProcAddr( device, "vkDestroyQueryPool" ) ); + vkDestroyRenderPass = PFN_vkDestroyRenderPass( vkGetDeviceProcAddr( device, "vkDestroyRenderPass" ) ); + vkDestroySampler = PFN_vkDestroySampler( vkGetDeviceProcAddr( device, "vkDestroySampler" ) ); + vkDestroySamplerYcbcrConversionKHR = PFN_vkDestroySamplerYcbcrConversionKHR( vkGetDeviceProcAddr( device, "vkDestroySamplerYcbcrConversionKHR" ) ); + vkDestroySamplerYcbcrConversion = PFN_vkDestroySamplerYcbcrConversion( vkGetDeviceProcAddr( device, "vkDestroySamplerYcbcrConversion" ) ); + if ( !vkDestroySamplerYcbcrConversion ) vkDestroySamplerYcbcrConversion = vkDestroySamplerYcbcrConversionKHR; + vkDestroySemaphore = PFN_vkDestroySemaphore( vkGetDeviceProcAddr( device, "vkDestroySemaphore" ) ); + vkDestroyShaderModule = PFN_vkDestroyShaderModule( vkGetDeviceProcAddr( device, "vkDestroyShaderModule" ) ); + vkDestroySwapchainKHR = PFN_vkDestroySwapchainKHR( vkGetDeviceProcAddr( device, "vkDestroySwapchainKHR" ) ); + vkDestroyValidationCacheEXT = PFN_vkDestroyValidationCacheEXT( vkGetDeviceProcAddr( device, "vkDestroyValidationCacheEXT" ) ); + vkDeviceWaitIdle = PFN_vkDeviceWaitIdle( vkGetDeviceProcAddr( device, "vkDeviceWaitIdle" ) ); + vkDisplayPowerControlEXT = PFN_vkDisplayPowerControlEXT( vkGetDeviceProcAddr( device, "vkDisplayPowerControlEXT" ) ); + vkEndCommandBuffer = PFN_vkEndCommandBuffer( vkGetDeviceProcAddr( device, "vkEndCommandBuffer" ) ); + vkFlushMappedMemoryRanges = PFN_vkFlushMappedMemoryRanges( vkGetDeviceProcAddr( device, "vkFlushMappedMemoryRanges" ) ); + vkFreeCommandBuffers = PFN_vkFreeCommandBuffers( vkGetDeviceProcAddr( device, "vkFreeCommandBuffers" ) ); + vkFreeDescriptorSets = PFN_vkFreeDescriptorSets( vkGetDeviceProcAddr( device, "vkFreeDescriptorSets" ) ); + vkFreeMemory = PFN_vkFreeMemory( vkGetDeviceProcAddr( device, "vkFreeMemory" ) ); + vkGetAccelerationStructureBuildSizesKHR = PFN_vkGetAccelerationStructureBuildSizesKHR( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureBuildSizesKHR" ) ); + vkGetAccelerationStructureDeviceAddressKHR = PFN_vkGetAccelerationStructureDeviceAddressKHR( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureDeviceAddressKHR" ) ); + vkGetAccelerationStructureHandleNV = PFN_vkGetAccelerationStructureHandleNV( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureHandleNV" ) ); + vkGetAccelerationStructureMemoryRequirementsNV = PFN_vkGetAccelerationStructureMemoryRequirementsNV( vkGetDeviceProcAddr( device, "vkGetAccelerationStructureMemoryRequirementsNV" ) ); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkGetAndroidHardwareBufferPropertiesANDROID = PFN_vkGetAndroidHardwareBufferPropertiesANDROID( vkGetDeviceProcAddr( device, "vkGetAndroidHardwareBufferPropertiesANDROID" ) ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkGetBufferDeviceAddressEXT = PFN_vkGetBufferDeviceAddressEXT( vkGetDeviceProcAddr( device, "vkGetBufferDeviceAddressEXT" ) ); + vkGetBufferDeviceAddressKHR = PFN_vkGetBufferDeviceAddressKHR( vkGetDeviceProcAddr( device, "vkGetBufferDeviceAddressKHR" ) ); + vkGetBufferDeviceAddress = PFN_vkGetBufferDeviceAddress( vkGetDeviceProcAddr( device, "vkGetBufferDeviceAddress" ) ); + if ( !vkGetBufferDeviceAddress ) vkGetBufferDeviceAddress = vkGetBufferDeviceAddressKHR; + if ( !vkGetBufferDeviceAddress ) vkGetBufferDeviceAddress = vkGetBufferDeviceAddressEXT; + vkGetBufferMemoryRequirements = PFN_vkGetBufferMemoryRequirements( vkGetDeviceProcAddr( device, "vkGetBufferMemoryRequirements" ) ); + vkGetBufferMemoryRequirements2KHR = PFN_vkGetBufferMemoryRequirements2KHR( vkGetDeviceProcAddr( device, "vkGetBufferMemoryRequirements2KHR" ) ); + vkGetBufferMemoryRequirements2 = PFN_vkGetBufferMemoryRequirements2( vkGetDeviceProcAddr( device, "vkGetBufferMemoryRequirements2" ) ); + if ( !vkGetBufferMemoryRequirements2 ) vkGetBufferMemoryRequirements2 = vkGetBufferMemoryRequirements2KHR; + vkGetBufferOpaqueCaptureAddressKHR = PFN_vkGetBufferOpaqueCaptureAddressKHR( vkGetDeviceProcAddr( device, "vkGetBufferOpaqueCaptureAddressKHR" ) ); + vkGetBufferOpaqueCaptureAddress = PFN_vkGetBufferOpaqueCaptureAddress( vkGetDeviceProcAddr( device, "vkGetBufferOpaqueCaptureAddress" ) ); + if ( !vkGetBufferOpaqueCaptureAddress ) vkGetBufferOpaqueCaptureAddress = vkGetBufferOpaqueCaptureAddressKHR; + vkGetCalibratedTimestampsEXT = PFN_vkGetCalibratedTimestampsEXT( vkGetDeviceProcAddr( device, "vkGetCalibratedTimestampsEXT" ) ); + vkGetDeferredOperationMaxConcurrencyKHR = PFN_vkGetDeferredOperationMaxConcurrencyKHR( vkGetDeviceProcAddr( device, "vkGetDeferredOperationMaxConcurrencyKHR" ) ); + vkGetDeferredOperationResultKHR = PFN_vkGetDeferredOperationResultKHR( vkGetDeviceProcAddr( device, "vkGetDeferredOperationResultKHR" ) ); + vkGetDescriptorSetLayoutSupportKHR = PFN_vkGetDescriptorSetLayoutSupportKHR( vkGetDeviceProcAddr( device, "vkGetDescriptorSetLayoutSupportKHR" ) ); + vkGetDescriptorSetLayoutSupport = PFN_vkGetDescriptorSetLayoutSupport( vkGetDeviceProcAddr( device, "vkGetDescriptorSetLayoutSupport" ) ); + if ( !vkGetDescriptorSetLayoutSupport ) vkGetDescriptorSetLayoutSupport = vkGetDescriptorSetLayoutSupportKHR; + vkGetDeviceAccelerationStructureCompatibilityKHR = PFN_vkGetDeviceAccelerationStructureCompatibilityKHR( vkGetDeviceProcAddr( device, "vkGetDeviceAccelerationStructureCompatibilityKHR" ) ); + vkGetDeviceGroupPeerMemoryFeaturesKHR = PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR( vkGetDeviceProcAddr( device, "vkGetDeviceGroupPeerMemoryFeaturesKHR" ) ); + vkGetDeviceGroupPeerMemoryFeatures = PFN_vkGetDeviceGroupPeerMemoryFeatures( vkGetDeviceProcAddr( device, "vkGetDeviceGroupPeerMemoryFeatures" ) ); + if ( !vkGetDeviceGroupPeerMemoryFeatures ) vkGetDeviceGroupPeerMemoryFeatures = vkGetDeviceGroupPeerMemoryFeaturesKHR; + vkGetDeviceGroupPresentCapabilitiesKHR = PFN_vkGetDeviceGroupPresentCapabilitiesKHR( vkGetDeviceProcAddr( device, "vkGetDeviceGroupPresentCapabilitiesKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetDeviceGroupSurfacePresentModes2EXT = PFN_vkGetDeviceGroupSurfacePresentModes2EXT( vkGetDeviceProcAddr( device, "vkGetDeviceGroupSurfacePresentModes2EXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetDeviceGroupSurfacePresentModesKHR = PFN_vkGetDeviceGroupSurfacePresentModesKHR( vkGetDeviceProcAddr( device, "vkGetDeviceGroupSurfacePresentModesKHR" ) ); + vkGetDeviceMemoryCommitment = PFN_vkGetDeviceMemoryCommitment( vkGetDeviceProcAddr( device, "vkGetDeviceMemoryCommitment" ) ); + vkGetDeviceMemoryOpaqueCaptureAddressKHR = PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR( vkGetDeviceProcAddr( device, "vkGetDeviceMemoryOpaqueCaptureAddressKHR" ) ); + vkGetDeviceMemoryOpaqueCaptureAddress = PFN_vkGetDeviceMemoryOpaqueCaptureAddress( vkGetDeviceProcAddr( device, "vkGetDeviceMemoryOpaqueCaptureAddress" ) ); + if ( !vkGetDeviceMemoryOpaqueCaptureAddress ) vkGetDeviceMemoryOpaqueCaptureAddress = vkGetDeviceMemoryOpaqueCaptureAddressKHR; + vkGetDeviceProcAddr = PFN_vkGetDeviceProcAddr( vkGetDeviceProcAddr( device, "vkGetDeviceProcAddr" ) ); + vkGetDeviceQueue = PFN_vkGetDeviceQueue( vkGetDeviceProcAddr( device, "vkGetDeviceQueue" ) ); + vkGetDeviceQueue2 = PFN_vkGetDeviceQueue2( vkGetDeviceProcAddr( device, "vkGetDeviceQueue2" ) ); + vkGetEventStatus = PFN_vkGetEventStatus( vkGetDeviceProcAddr( device, "vkGetEventStatus" ) ); + vkGetFenceFdKHR = PFN_vkGetFenceFdKHR( vkGetDeviceProcAddr( device, "vkGetFenceFdKHR" ) ); + vkGetFenceStatus = PFN_vkGetFenceStatus( vkGetDeviceProcAddr( device, "vkGetFenceStatus" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetFenceWin32HandleKHR = PFN_vkGetFenceWin32HandleKHR( vkGetDeviceProcAddr( device, "vkGetFenceWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetGeneratedCommandsMemoryRequirementsNV = PFN_vkGetGeneratedCommandsMemoryRequirementsNV( vkGetDeviceProcAddr( device, "vkGetGeneratedCommandsMemoryRequirementsNV" ) ); + vkGetImageDrmFormatModifierPropertiesEXT = PFN_vkGetImageDrmFormatModifierPropertiesEXT( vkGetDeviceProcAddr( device, "vkGetImageDrmFormatModifierPropertiesEXT" ) ); + vkGetImageMemoryRequirements = PFN_vkGetImageMemoryRequirements( vkGetDeviceProcAddr( device, "vkGetImageMemoryRequirements" ) ); + vkGetImageMemoryRequirements2KHR = PFN_vkGetImageMemoryRequirements2KHR( vkGetDeviceProcAddr( device, "vkGetImageMemoryRequirements2KHR" ) ); + vkGetImageMemoryRequirements2 = PFN_vkGetImageMemoryRequirements2( vkGetDeviceProcAddr( device, "vkGetImageMemoryRequirements2" ) ); + if ( !vkGetImageMemoryRequirements2 ) vkGetImageMemoryRequirements2 = vkGetImageMemoryRequirements2KHR; + vkGetImageSparseMemoryRequirements = PFN_vkGetImageSparseMemoryRequirements( vkGetDeviceProcAddr( device, "vkGetImageSparseMemoryRequirements" ) ); + vkGetImageSparseMemoryRequirements2KHR = PFN_vkGetImageSparseMemoryRequirements2KHR( vkGetDeviceProcAddr( device, "vkGetImageSparseMemoryRequirements2KHR" ) ); + vkGetImageSparseMemoryRequirements2 = PFN_vkGetImageSparseMemoryRequirements2( vkGetDeviceProcAddr( device, "vkGetImageSparseMemoryRequirements2" ) ); + if ( !vkGetImageSparseMemoryRequirements2 ) vkGetImageSparseMemoryRequirements2 = vkGetImageSparseMemoryRequirements2KHR; + vkGetImageSubresourceLayout = PFN_vkGetImageSubresourceLayout( vkGetDeviceProcAddr( device, "vkGetImageSubresourceLayout" ) ); + vkGetImageViewAddressNVX = PFN_vkGetImageViewAddressNVX( vkGetDeviceProcAddr( device, "vkGetImageViewAddressNVX" ) ); + vkGetImageViewHandleNVX = PFN_vkGetImageViewHandleNVX( vkGetDeviceProcAddr( device, "vkGetImageViewHandleNVX" ) ); +#ifdef VK_USE_PLATFORM_ANDROID_KHR + vkGetMemoryAndroidHardwareBufferANDROID = PFN_vkGetMemoryAndroidHardwareBufferANDROID( vkGetDeviceProcAddr( device, "vkGetMemoryAndroidHardwareBufferANDROID" ) ); +#endif /*VK_USE_PLATFORM_ANDROID_KHR*/ + vkGetMemoryFdKHR = PFN_vkGetMemoryFdKHR( vkGetDeviceProcAddr( device, "vkGetMemoryFdKHR" ) ); + vkGetMemoryFdPropertiesKHR = PFN_vkGetMemoryFdPropertiesKHR( vkGetDeviceProcAddr( device, "vkGetMemoryFdPropertiesKHR" ) ); + vkGetMemoryHostPointerPropertiesEXT = PFN_vkGetMemoryHostPointerPropertiesEXT( vkGetDeviceProcAddr( device, "vkGetMemoryHostPointerPropertiesEXT" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandleKHR = PFN_vkGetMemoryWin32HandleKHR( vkGetDeviceProcAddr( device, "vkGetMemoryWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandleNV = PFN_vkGetMemoryWin32HandleNV( vkGetDeviceProcAddr( device, "vkGetMemoryWin32HandleNV" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetMemoryWin32HandlePropertiesKHR = PFN_vkGetMemoryWin32HandlePropertiesKHR( vkGetDeviceProcAddr( device, "vkGetMemoryWin32HandlePropertiesKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetPastPresentationTimingGOOGLE = PFN_vkGetPastPresentationTimingGOOGLE( vkGetDeviceProcAddr( device, "vkGetPastPresentationTimingGOOGLE" ) ); + vkGetPerformanceParameterINTEL = PFN_vkGetPerformanceParameterINTEL( vkGetDeviceProcAddr( device, "vkGetPerformanceParameterINTEL" ) ); + vkGetPipelineCacheData = PFN_vkGetPipelineCacheData( vkGetDeviceProcAddr( device, "vkGetPipelineCacheData" ) ); + vkGetPipelineExecutableInternalRepresentationsKHR = PFN_vkGetPipelineExecutableInternalRepresentationsKHR( vkGetDeviceProcAddr( device, "vkGetPipelineExecutableInternalRepresentationsKHR" ) ); + vkGetPipelineExecutablePropertiesKHR = PFN_vkGetPipelineExecutablePropertiesKHR( vkGetDeviceProcAddr( device, "vkGetPipelineExecutablePropertiesKHR" ) ); + vkGetPipelineExecutableStatisticsKHR = PFN_vkGetPipelineExecutableStatisticsKHR( vkGetDeviceProcAddr( device, "vkGetPipelineExecutableStatisticsKHR" ) ); + vkGetPrivateDataEXT = PFN_vkGetPrivateDataEXT( vkGetDeviceProcAddr( device, "vkGetPrivateDataEXT" ) ); + vkGetQueryPoolResults = PFN_vkGetQueryPoolResults( vkGetDeviceProcAddr( device, "vkGetQueryPoolResults" ) ); + vkGetQueueCheckpointDataNV = PFN_vkGetQueueCheckpointDataNV( vkGetDeviceProcAddr( device, "vkGetQueueCheckpointDataNV" ) ); + vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR" ) ); + vkGetRayTracingShaderGroupHandlesNV = PFN_vkGetRayTracingShaderGroupHandlesNV( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupHandlesNV" ) ); + vkGetRayTracingShaderGroupHandlesKHR = PFN_vkGetRayTracingShaderGroupHandlesKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupHandlesKHR" ) ); + if ( !vkGetRayTracingShaderGroupHandlesKHR ) vkGetRayTracingShaderGroupHandlesKHR = vkGetRayTracingShaderGroupHandlesNV; + vkGetRayTracingShaderGroupStackSizeKHR = PFN_vkGetRayTracingShaderGroupStackSizeKHR( vkGetDeviceProcAddr( device, "vkGetRayTracingShaderGroupStackSizeKHR" ) ); + vkGetRefreshCycleDurationGOOGLE = PFN_vkGetRefreshCycleDurationGOOGLE( vkGetDeviceProcAddr( device, "vkGetRefreshCycleDurationGOOGLE" ) ); + vkGetRenderAreaGranularity = PFN_vkGetRenderAreaGranularity( vkGetDeviceProcAddr( device, "vkGetRenderAreaGranularity" ) ); + vkGetSemaphoreCounterValueKHR = PFN_vkGetSemaphoreCounterValueKHR( vkGetDeviceProcAddr( device, "vkGetSemaphoreCounterValueKHR" ) ); + vkGetSemaphoreCounterValue = PFN_vkGetSemaphoreCounterValue( vkGetDeviceProcAddr( device, "vkGetSemaphoreCounterValue" ) ); + if ( !vkGetSemaphoreCounterValue ) vkGetSemaphoreCounterValue = vkGetSemaphoreCounterValueKHR; + vkGetSemaphoreFdKHR = PFN_vkGetSemaphoreFdKHR( vkGetDeviceProcAddr( device, "vkGetSemaphoreFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkGetSemaphoreWin32HandleKHR = PFN_vkGetSemaphoreWin32HandleKHR( vkGetDeviceProcAddr( device, "vkGetSemaphoreWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkGetShaderInfoAMD = PFN_vkGetShaderInfoAMD( vkGetDeviceProcAddr( device, "vkGetShaderInfoAMD" ) ); + vkGetSwapchainCounterEXT = PFN_vkGetSwapchainCounterEXT( vkGetDeviceProcAddr( device, "vkGetSwapchainCounterEXT" ) ); + vkGetSwapchainImagesKHR = PFN_vkGetSwapchainImagesKHR( vkGetDeviceProcAddr( device, "vkGetSwapchainImagesKHR" ) ); + vkGetSwapchainStatusKHR = PFN_vkGetSwapchainStatusKHR( vkGetDeviceProcAddr( device, "vkGetSwapchainStatusKHR" ) ); + vkGetValidationCacheDataEXT = PFN_vkGetValidationCacheDataEXT( vkGetDeviceProcAddr( device, "vkGetValidationCacheDataEXT" ) ); + vkImportFenceFdKHR = PFN_vkImportFenceFdKHR( vkGetDeviceProcAddr( device, "vkImportFenceFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportFenceWin32HandleKHR = PFN_vkImportFenceWin32HandleKHR( vkGetDeviceProcAddr( device, "vkImportFenceWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkImportSemaphoreFdKHR = PFN_vkImportSemaphoreFdKHR( vkGetDeviceProcAddr( device, "vkImportSemaphoreFdKHR" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkImportSemaphoreWin32HandleKHR = PFN_vkImportSemaphoreWin32HandleKHR( vkGetDeviceProcAddr( device, "vkImportSemaphoreWin32HandleKHR" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkInitializePerformanceApiINTEL = PFN_vkInitializePerformanceApiINTEL( vkGetDeviceProcAddr( device, "vkInitializePerformanceApiINTEL" ) ); + vkInvalidateMappedMemoryRanges = PFN_vkInvalidateMappedMemoryRanges( vkGetDeviceProcAddr( device, "vkInvalidateMappedMemoryRanges" ) ); + vkMapMemory = PFN_vkMapMemory( vkGetDeviceProcAddr( device, "vkMapMemory" ) ); + vkMergePipelineCaches = PFN_vkMergePipelineCaches( vkGetDeviceProcAddr( device, "vkMergePipelineCaches" ) ); + vkMergeValidationCachesEXT = PFN_vkMergeValidationCachesEXT( vkGetDeviceProcAddr( device, "vkMergeValidationCachesEXT" ) ); + vkQueueBeginDebugUtilsLabelEXT = PFN_vkQueueBeginDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkQueueBeginDebugUtilsLabelEXT" ) ); + vkQueueBindSparse = PFN_vkQueueBindSparse( vkGetDeviceProcAddr( device, "vkQueueBindSparse" ) ); + vkQueueEndDebugUtilsLabelEXT = PFN_vkQueueEndDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkQueueEndDebugUtilsLabelEXT" ) ); + vkQueueInsertDebugUtilsLabelEXT = PFN_vkQueueInsertDebugUtilsLabelEXT( vkGetDeviceProcAddr( device, "vkQueueInsertDebugUtilsLabelEXT" ) ); + vkQueuePresentKHR = PFN_vkQueuePresentKHR( vkGetDeviceProcAddr( device, "vkQueuePresentKHR" ) ); + vkQueueSetPerformanceConfigurationINTEL = PFN_vkQueueSetPerformanceConfigurationINTEL( vkGetDeviceProcAddr( device, "vkQueueSetPerformanceConfigurationINTEL" ) ); + vkQueueSubmit = PFN_vkQueueSubmit( vkGetDeviceProcAddr( device, "vkQueueSubmit" ) ); + vkQueueWaitIdle = PFN_vkQueueWaitIdle( vkGetDeviceProcAddr( device, "vkQueueWaitIdle" ) ); + vkRegisterDeviceEventEXT = PFN_vkRegisterDeviceEventEXT( vkGetDeviceProcAddr( device, "vkRegisterDeviceEventEXT" ) ); + vkRegisterDisplayEventEXT = PFN_vkRegisterDisplayEventEXT( vkGetDeviceProcAddr( device, "vkRegisterDisplayEventEXT" ) ); +#ifdef VK_USE_PLATFORM_WIN32_KHR + vkReleaseFullScreenExclusiveModeEXT = PFN_vkReleaseFullScreenExclusiveModeEXT( vkGetDeviceProcAddr( device, "vkReleaseFullScreenExclusiveModeEXT" ) ); +#endif /*VK_USE_PLATFORM_WIN32_KHR*/ + vkReleasePerformanceConfigurationINTEL = PFN_vkReleasePerformanceConfigurationINTEL( vkGetDeviceProcAddr( device, "vkReleasePerformanceConfigurationINTEL" ) ); + vkReleaseProfilingLockKHR = PFN_vkReleaseProfilingLockKHR( vkGetDeviceProcAddr( device, "vkReleaseProfilingLockKHR" ) ); + vkResetCommandBuffer = PFN_vkResetCommandBuffer( vkGetDeviceProcAddr( device, "vkResetCommandBuffer" ) ); + vkResetCommandPool = PFN_vkResetCommandPool( vkGetDeviceProcAddr( device, "vkResetCommandPool" ) ); + vkResetDescriptorPool = PFN_vkResetDescriptorPool( vkGetDeviceProcAddr( device, "vkResetDescriptorPool" ) ); + vkResetEvent = PFN_vkResetEvent( vkGetDeviceProcAddr( device, "vkResetEvent" ) ); + vkResetFences = PFN_vkResetFences( vkGetDeviceProcAddr( device, "vkResetFences" ) ); + vkResetQueryPoolEXT = PFN_vkResetQueryPoolEXT( vkGetDeviceProcAddr( device, "vkResetQueryPoolEXT" ) ); + vkResetQueryPool = PFN_vkResetQueryPool( vkGetDeviceProcAddr( device, "vkResetQueryPool" ) ); + if ( !vkResetQueryPool ) vkResetQueryPool = vkResetQueryPoolEXT; + vkSetDebugUtilsObjectNameEXT = PFN_vkSetDebugUtilsObjectNameEXT( vkGetDeviceProcAddr( device, "vkSetDebugUtilsObjectNameEXT" ) ); + vkSetDebugUtilsObjectTagEXT = PFN_vkSetDebugUtilsObjectTagEXT( vkGetDeviceProcAddr( device, "vkSetDebugUtilsObjectTagEXT" ) ); + vkSetEvent = PFN_vkSetEvent( vkGetDeviceProcAddr( device, "vkSetEvent" ) ); + vkSetHdrMetadataEXT = PFN_vkSetHdrMetadataEXT( vkGetDeviceProcAddr( device, "vkSetHdrMetadataEXT" ) ); + vkSetLocalDimmingAMD = PFN_vkSetLocalDimmingAMD( vkGetDeviceProcAddr( device, "vkSetLocalDimmingAMD" ) ); + vkSetPrivateDataEXT = PFN_vkSetPrivateDataEXT( vkGetDeviceProcAddr( device, "vkSetPrivateDataEXT" ) ); + vkSignalSemaphoreKHR = PFN_vkSignalSemaphoreKHR( vkGetDeviceProcAddr( device, "vkSignalSemaphoreKHR" ) ); + vkSignalSemaphore = PFN_vkSignalSemaphore( vkGetDeviceProcAddr( device, "vkSignalSemaphore" ) ); + if ( !vkSignalSemaphore ) vkSignalSemaphore = vkSignalSemaphoreKHR; + vkTrimCommandPoolKHR = PFN_vkTrimCommandPoolKHR( vkGetDeviceProcAddr( device, "vkTrimCommandPoolKHR" ) ); + vkTrimCommandPool = PFN_vkTrimCommandPool( vkGetDeviceProcAddr( device, "vkTrimCommandPool" ) ); + if ( !vkTrimCommandPool ) vkTrimCommandPool = vkTrimCommandPoolKHR; + vkUninitializePerformanceApiINTEL = PFN_vkUninitializePerformanceApiINTEL( vkGetDeviceProcAddr( device, "vkUninitializePerformanceApiINTEL" ) ); + vkUnmapMemory = PFN_vkUnmapMemory( vkGetDeviceProcAddr( device, "vkUnmapMemory" ) ); + vkUpdateDescriptorSetWithTemplateKHR = PFN_vkUpdateDescriptorSetWithTemplateKHR( vkGetDeviceProcAddr( device, "vkUpdateDescriptorSetWithTemplateKHR" ) ); + vkUpdateDescriptorSetWithTemplate = PFN_vkUpdateDescriptorSetWithTemplate( vkGetDeviceProcAddr( device, "vkUpdateDescriptorSetWithTemplate" ) ); + if ( !vkUpdateDescriptorSetWithTemplate ) vkUpdateDescriptorSetWithTemplate = vkUpdateDescriptorSetWithTemplateKHR; + vkUpdateDescriptorSets = PFN_vkUpdateDescriptorSets( vkGetDeviceProcAddr( device, "vkUpdateDescriptorSets" ) ); + vkWaitForFences = PFN_vkWaitForFences( vkGetDeviceProcAddr( device, "vkWaitForFences" ) ); + vkWaitSemaphoresKHR = PFN_vkWaitSemaphoresKHR( vkGetDeviceProcAddr( device, "vkWaitSemaphoresKHR" ) ); + vkWaitSemaphores = PFN_vkWaitSemaphores( vkGetDeviceProcAddr( device, "vkWaitSemaphores" ) ); + if ( !vkWaitSemaphores ) vkWaitSemaphores = vkWaitSemaphoresKHR; + vkWriteAccelerationStructuresPropertiesKHR = PFN_vkWriteAccelerationStructuresPropertiesKHR( vkGetDeviceProcAddr( device, "vkWriteAccelerationStructuresPropertiesKHR" ) ); + } + }; + +} // namespace VULKAN_HPP_NAMESPACE + +namespace std +{ + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::AccelerationStructureKHR const& accelerationStructureKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(accelerationStructureKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::AccelerationStructureNV const& accelerationStructureNV) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(accelerationStructureNV)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Buffer const& buffer) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(buffer)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::BufferView const& bufferView) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(bufferView)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::CommandBuffer const& commandBuffer) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(commandBuffer)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::CommandPool const& commandPool) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(commandPool)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DebugReportCallbackEXT const& debugReportCallbackEXT) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(debugReportCallbackEXT)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DebugUtilsMessengerEXT const& debugUtilsMessengerEXT) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(debugUtilsMessengerEXT)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DeferredOperationKHR const& deferredOperationKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(deferredOperationKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DescriptorPool const& descriptorPool) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(descriptorPool)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DescriptorSet const& descriptorSet) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(descriptorSet)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DescriptorSetLayout const& descriptorSetLayout) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(descriptorSetLayout)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DescriptorUpdateTemplate const& descriptorUpdateTemplate) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(descriptorUpdateTemplate)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Device const& device) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(device)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DeviceMemory const& deviceMemory) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(deviceMemory)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DisplayKHR const& displayKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(displayKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::DisplayModeKHR const& displayModeKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(displayModeKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Event const& event) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(event)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Fence const& fence) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(fence)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Framebuffer const& framebuffer) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(framebuffer)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Image const& image) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(image)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::ImageView const& imageView) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(imageView)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::IndirectCommandsLayoutNV const& indirectCommandsLayoutNV) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(indirectCommandsLayoutNV)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Instance const& instance) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(instance)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::PerformanceConfigurationINTEL const& performanceConfigurationINTEL) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(performanceConfigurationINTEL)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::PhysicalDevice const& physicalDevice) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(physicalDevice)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Pipeline const& pipeline) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(pipeline)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::PipelineCache const& pipelineCache) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(pipelineCache)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::PipelineLayout const& pipelineLayout) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(pipelineLayout)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::PrivateDataSlotEXT const& privateDataSlotEXT) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(privateDataSlotEXT)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::QueryPool const& queryPool) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(queryPool)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Queue const& queue) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(queue)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::RenderPass const& renderPass) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(renderPass)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Sampler const& sampler) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(sampler)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::SamplerYcbcrConversion const& samplerYcbcrConversion) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(samplerYcbcrConversion)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::Semaphore const& semaphore) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(semaphore)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::ShaderModule const& shaderModule) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(shaderModule)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::SurfaceKHR const& surfaceKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(surfaceKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::SwapchainKHR const& swapchainKHR) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(swapchainKHR)); + } + }; + + template <> struct hash + { + std::size_t operator()(VULKAN_HPP_NAMESPACE::ValidationCacheEXT const& validationCacheEXT) const VULKAN_HPP_NOEXCEPT + { + return std::hash{}(static_cast(validationCacheEXT)); + } + }; +} +#endif diff --git a/src/third_party/vulkan/vulkan_android.h b/src/third_party/vulkan/vulkan_android.h new file mode 100644 index 0000000..50ef85f --- /dev/null +++ b/src/third_party/vulkan/vulkan_android.h @@ -0,0 +1,112 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_beta.h b/src/third_party/vulkan/vulkan_beta.h new file mode 100644 index 0000000..23513b3 --- /dev/null +++ b/src/third_party/vulkan/vulkan_beta.h @@ -0,0 +1,56 @@ +#ifndef VULKAN_BETA_H_ +#define VULKAN_BETA_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_portability_subset 1 +#define VK_KHR_PORTABILITY_SUBSET_SPEC_VERSION 1 +#define VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME "VK_KHR_portability_subset" +typedef struct VkPhysicalDevicePortabilitySubsetFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 constantAlphaColorBlendFactors; + VkBool32 events; + VkBool32 imageViewFormatReinterpretation; + VkBool32 imageViewFormatSwizzle; + VkBool32 imageView2DOn3DImage; + VkBool32 multisampleArrayImage; + VkBool32 mutableComparisonSamplers; + VkBool32 pointPolygons; + VkBool32 samplerMipLodBias; + VkBool32 separateStencilMaskRef; + VkBool32 shaderSampleRateInterpolationFunctions; + VkBool32 tessellationIsolines; + VkBool32 tessellationPointMode; + VkBool32 triangleFans; + VkBool32 vertexAttributeAccessBeyondStride; +} VkPhysicalDevicePortabilitySubsetFeaturesKHR; + +typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t minVertexInputBindingStrideAlignment; +} VkPhysicalDevicePortabilitySubsetPropertiesKHR; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_core.h b/src/third_party/vulkan/vulkan_core.h new file mode 100644 index 0000000..cc0851f --- /dev/null +++ b/src/third_party/vulkan/vulkan_core.h @@ -0,0 +1,11847 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + +#define VK_MAKE_VERSION(major, minor, patch) \ + ((((uint32_t)(major)) << 22) | (((uint32_t)(minor)) << 12) | ((uint32_t)(patch))) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +// Version of this file +#define VK_HEADER_VERSION 162 + +// Complete version of this file +#define VK_HEADER_VERSION_COMPLETE VK_MAKE_VERSION(1, 2, VK_HEADER_VERSION) + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) + +#define VK_NULL_HANDLE 0 + +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceAddress; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkFlags; +typedef uint32_t VkSampleMask; +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_FALSE 0 +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_TRUE 1 +#define VK_WHOLE_SIZE (~0ULL) +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_UNKNOWN = -13, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_FRAGMENTATION = -1000161000, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, + VK_THREAD_IDLE_KHR = 1000268000, + VK_THREAD_DONE_KHR = 1000268001, + VK_OPERATION_DEFERRED_KHR = 1000268002, + VK_OPERATION_NOT_DEFERRED_KHR = 1000268003, + VK_PIPELINE_COMPILE_REQUIRED_EXT = 1000297000, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = VK_PIPELINE_COMPILE_REQUIRED_EXT, + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = 1000215000, + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000, + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, + VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, + VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, + VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, + VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = 1000295000, + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = 1000295001, + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = 1000295002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, + VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, + VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = 1000335000, + VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = 1000337000, + VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = 1000337001, + VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = 1000337002, + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = 1000337003, + VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = 1000337004, + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = 1000337005, + VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = 1000337006, + VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = 1000337007, + VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = 1000337008, + VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = 1000337009, + VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = 1000337010, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000, + VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = 1000164003, + VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1000268000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1000277000, + VK_OBJECT_TYPE_PRIVATE_DATA_SLOT_EXT = 1000295000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_CODEPLAY = 0x10004, + VK_VENDOR_ID_MESA = 0x10005, + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013, + VK_FORMAT_A4R4G4B4_UNORM_PACK16_EXT = 1000340000, + VK_FORMAT_A4B4G4R4_UNORM_PACK16_EXT = 1000340001, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, + VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1000150000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1000150001, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, + VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1000347000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR = 1000226000, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, + VK_DYNAMIC_STATE_CULL_MODE_EXT = 1000267000, + VK_DYNAMIC_STATE_FRONT_FACE_EXT = 1000267001, + VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT = 1000267002, + VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT = 1000267003, + VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT = 1000267004, + VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT = 1000267005, + VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT = 1000267006, + VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT = 1000267007, + VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT = 1000267008, + VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT = 1000267009, + VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT = 1000267010, + VK_DYNAMIC_STATE_STENCIL_OP_EXT = 1000267011, + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_FLOAT_CUSTOM_EXT = 1000287003, + VK_BORDER_COLOR_INT_CUSTOM_EXT = 1000287004, + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_CUBIC_EXT = VK_FILTER_CUBIC_IMG, + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1000150000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_NONE_QCOM = 1000301000, + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1000165000, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_KHR = 1000165000, + VK_INDEX_TYPE_UINT8_EXT = 1000265000, + VK_INDEX_TYPE_NONE_NV = VK_INDEX_TYPE_NONE_KHR, + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x00400000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, + VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 0x00020000, + VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 0x00040000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_ACCESS_FRAGMENT_SHADING_RATE_ATTACHMENT_READ_BIT_KHR = VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, + VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, + VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, + VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR = 0x20000000, + VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = 0x40000000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00000100, + VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x02000000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x00200000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, + VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV = 0x00020000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_PIPELINE_STAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR = VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x00080000, + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x00100000, + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x00000400, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; + +typedef enum VkImageViewCreateFlagBits { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT = 0x00000002, + VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageViewCreateFlagBits; +typedef VkFlags VkImageViewCreateFlags; + +typedef enum VkShaderModuleCreateFlagBits { + VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderModuleCreateFlagBits; +typedef VkFlags VkShaderModuleCreateFlags; + +typedef enum VkPipelineCacheCreateFlagBits { + VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT = 0x00000001, + VK_PIPELINE_CACHE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheCreateFlagBits; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x00004000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x00008000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x00010000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x00020000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x00001000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x00002000, + VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x00080000, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, + VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x00040000, + VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x00000800, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = 0x00000100, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = 0x00000200, + VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; + +typedef enum VkPipelineShaderStageCreateFlagBits { + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002, + VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineShaderStageCreateFlagBits; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_KHR = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, + VK_SHADER_STAGE_RAYGEN_BIT_NV = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + VK_SHADER_STAGE_MISS_BIT_NV = VK_SHADER_STAGE_MISS_BIT_KHR, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + VK_SHADER_STAGE_CALLABLE_BIT_NV = VK_SHADER_STAGE_CALLABLE_BIT_KHR, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; + +typedef enum VkSamplerCreateFlagBits { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, + VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSamplerCreateFlagBits; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkFramebufferCreateFlagBits { + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFramebufferCreateFlagBits; +typedef VkFlags VkFramebufferCreateFlags; + +typedef enum VkRenderPassCreateFlagBits { + VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM = 0x00000002, + VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderPassCreateFlagBits; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM = 0x00000004, + VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM = 0x00000008, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0 + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) +#define VK_MAX_DEVICE_GROUP_SIZE 32 +#define VK_LUID_SIZE 8 +#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1) + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE_BIT = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointersFeatures; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParametersFeatures; + +typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_VERSION_1_2 1 +// Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_VERSION(1, 2, 0)// Patch version should always be set to 0 + +#define VK_MAX_DRIVER_NAME_SIZE 256 +#define VK_MAX_DRIVER_INFO_SIZE 256 + +typedef enum VkDriverId { + VK_DRIVER_ID_AMD_PROPRIETARY = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, + VK_DRIVER_ID_MESA_RADV = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, + VK_DRIVER_ID_ARM_PROPRIETARY = 9, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, + VK_DRIVER_ID_GGP_PROPRIETARY = 11, + VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, + VK_DRIVER_ID_MESA_LLVMPIPE = 13, + VK_DRIVER_ID_MOLTENVK = 14, + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, + VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, + VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF +} VkDriverId; + +typedef enum VkShaderFloatControlsIndependence { + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF +} VkShaderFloatControlsIndependence; + +typedef enum VkSamplerReductionMode { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, + VK_SAMPLER_REDUCTION_MODE_MIN = 1, + VK_SAMPLER_REDUCTION_MODE_MAX = 2, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerReductionMode; + +typedef enum VkSemaphoreType { + VK_SEMAPHORE_TYPE_BINARY = 0, + VK_SEMAPHORE_TYPE_TIMELINE = 1, + VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreType; + +typedef enum VkResolveModeFlagBits { + VK_RESOLVE_MODE_NONE = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT = 0x00000008, + VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, + VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, + VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkResolveModeFlagBits; +typedef VkFlags VkResolveModeFlags; + +typedef enum VkDescriptorBindingFlagBits { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorBindingFlagBits; +typedef VkFlags VkDescriptorBindingFlags; + +typedef enum VkSemaphoreWaitFlagBits { + VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, + VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, + VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreWaitFlagBits; +typedef VkFlags VkSemaphoreWaitFlags; +typedef struct VkPhysicalDeviceVulkan11Features { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; + VkBool32 protectedMemory; + VkBool32 samplerYcbcrConversion; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceVulkan11Features; + +typedef struct VkPhysicalDeviceVulkan11Properties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; + uint32_t subgroupSize; + VkShaderStageFlags subgroupSupportedStages; + VkSubgroupFeatureFlags subgroupSupportedOperations; + VkBool32 subgroupQuadOperationsInAllStages; + VkPointClippingBehavior pointClippingBehavior; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + VkBool32 protectedNoFault; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceVulkan11Properties; + +typedef struct VkPhysicalDeviceVulkan12Features { + VkStructureType sType; + void* pNext; + VkBool32 samplerMirrorClampToEdge; + VkBool32 drawIndirectCount; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; + VkBool32 descriptorIndexing; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; + VkBool32 samplerFilterMinmax; + VkBool32 scalarBlockLayout; + VkBool32 imagelessFramebuffer; + VkBool32 uniformBufferStandardLayout; + VkBool32 shaderSubgroupExtendedTypes; + VkBool32 separateDepthStencilLayouts; + VkBool32 hostQueryReset; + VkBool32 timelineSemaphore; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; + VkBool32 shaderOutputViewportIndex; + VkBool32 shaderOutputLayer; + VkBool32 subgroupBroadcastDynamicId; +} VkPhysicalDeviceVulkan12Features; + +typedef struct VkConformanceVersion { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersion; + +typedef struct VkPhysicalDeviceVulkan12Properties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; + uint64_t maxTimelineSemaphoreValueDifference; + VkSampleCountFlags framebufferIntegerColorSampleCounts; +} VkPhysicalDeviceVulkan12Properties; + +typedef struct VkImageFormatListCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfo; + +typedef struct VkAttachmentDescription2 { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2; + +typedef struct VkAttachmentReference2 { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2; + +typedef struct VkSubpassDescription2 { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2* pColorAttachments; + const VkAttachmentReference2* pResolveAttachments; + const VkAttachmentReference2* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2; + +typedef struct VkSubpassDependency2 { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2; + +typedef struct VkRenderPassCreateInfo2 { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2; + +typedef struct VkSubpassBeginInfo { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfo; + +typedef struct VkSubpassEndInfo { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfo; + +typedef struct VkPhysicalDevice8BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeatures; + +typedef struct VkPhysicalDeviceDriverProperties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; +} VkPhysicalDeviceDriverProperties; + +typedef struct VkPhysicalDeviceShaderAtomicInt64Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64Features; + +typedef struct VkPhysicalDeviceShaderFloat16Int8Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceShaderFloat16Int8Features; + +typedef struct VkPhysicalDeviceFloatControlsProperties { + VkStructureType sType; + void* pNext; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsProperties; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlags* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfo; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeatures; + +typedef struct VkPhysicalDeviceDescriptorIndexingProperties { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingProperties; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfo; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupport; + +typedef struct VkSubpassDescriptionDepthStencilResolve { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBits depthResolveMode; + VkResolveModeFlagBits stencilResolveMode; + const VkAttachmentReference2* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolve; + +typedef struct VkPhysicalDeviceDepthStencilResolveProperties { + VkStructureType sType; + void* pNext; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolveProperties; + +typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 scalarBlockLayout; +} VkPhysicalDeviceScalarBlockLayoutFeatures; + +typedef struct VkImageStencilUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags stencilUsage; +} VkImageStencilUsageCreateInfo; + +typedef struct VkSamplerReductionModeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerReductionMode reductionMode; +} VkSamplerReductionModeCreateInfo; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxProperties; + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; +} VkPhysicalDeviceVulkanMemoryModelFeatures; + +typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { + VkStructureType sType; + void* pNext; + VkBool32 imagelessFramebuffer; +} VkPhysicalDeviceImagelessFramebufferFeatures; + +typedef struct VkFramebufferAttachmentImageInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageUsageFlags usage; + uint32_t width; + uint32_t height; + uint32_t layerCount; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkFramebufferAttachmentImageInfo; + +typedef struct VkFramebufferAttachmentsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentImageInfoCount; + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; +} VkFramebufferAttachmentsCreateInfo; + +typedef struct VkRenderPassAttachmentBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkImageView* pAttachments; +} VkRenderPassAttachmentBeginInfo; + +typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 uniformBufferStandardLayout; +} VkPhysicalDeviceUniformBufferStandardLayoutFeatures; + +typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupExtendedTypes; +} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { + VkStructureType sType; + void* pNext; + VkBool32 separateDepthStencilLayouts; +} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +typedef struct VkAttachmentReferenceStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilLayout; +} VkAttachmentReferenceStencilLayout; + +typedef struct VkAttachmentDescriptionStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilInitialLayout; + VkImageLayout stencilFinalLayout; +} VkAttachmentDescriptionStencilLayout; + +typedef struct VkPhysicalDeviceHostQueryResetFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostQueryReset; +} VkPhysicalDeviceHostQueryResetFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { + VkStructureType sType; + void* pNext; + VkBool32 timelineSemaphore; +} VkPhysicalDeviceTimelineSemaphoreFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { + VkStructureType sType; + void* pNext; + uint64_t maxTimelineSemaphoreValueDifference; +} VkPhysicalDeviceTimelineSemaphoreProperties; + +typedef struct VkSemaphoreTypeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreType semaphoreType; + uint64_t initialValue; +} VkSemaphoreTypeCreateInfo; + +typedef struct VkTimelineSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValueCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValueCount; + const uint64_t* pSignalSemaphoreValues; +} VkTimelineSemaphoreSubmitInfo; + +typedef struct VkSemaphoreWaitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreWaitFlags flags; + uint32_t semaphoreCount; + const VkSemaphore* pSemaphores; + const uint64_t* pValues; +} VkSemaphoreWaitInfo; + +typedef struct VkSemaphoreSignalInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; +} VkSemaphoreSignalInfo; + +typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeatures; + +typedef struct VkBufferDeviceAddressInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferDeviceAddressInfo; + +typedef struct VkBufferOpaqueCaptureAddressCreateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkBufferOpaqueCaptureAddressCreateInfo; + +typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkMemoryOpaqueCaptureAddressAllocateInfo; + +typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkDeviceMemoryOpaqueCaptureAddressInfo; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) +#define VK_KHR_DISPLAY_SPEC_VERSION 23 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" +typedef VkFlags VkDisplayModeCreateFlagsKHR; + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + + +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; + +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + + +#define VK_KHR_imageless_framebuffer 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" +typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; + +typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; + +typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; + +typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; + + + +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" +typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; + +typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; + +typedef VkAttachmentReference2 VkAttachmentReference2KHR; + +typedef VkSubpassDescription2 VkSubpassDescription2KHR; + +typedef VkSubpassDependency2 VkSubpassDependency2KHR; + +typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; + +typedef VkSubpassEndInfo VkSubpassEndInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_performance_query 1 +#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" + +typedef enum VkPerformanceCounterUnitKHR { + VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, + VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, + VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, + VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, + VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, + VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, + VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, + VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, + VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, + VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterUnitKHR; + +typedef enum VkPerformanceCounterScopeKHR { + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, + VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterScopeKHR; + +typedef enum VkPerformanceCounterStorageKHR { + VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, + VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, + VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, + VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, + VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterStorageKHR; + +typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_BIT_KHR, + VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterDescriptionFlagBitsKHR; +typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + +typedef enum VkAcquireProfilingLockFlagBitsKHR { + VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAcquireProfilingLockFlagBitsKHR; +typedef VkFlags VkAcquireProfilingLockFlagsKHR; +typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 performanceCounterQueryPools; + VkBool32 performanceCounterMultipleQueryPools; +} VkPhysicalDevicePerformanceQueryFeaturesKHR; + +typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 allowCommandBufferQueryCopies; +} VkPhysicalDevicePerformanceQueryPropertiesKHR; + +typedef struct VkPerformanceCounterKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterUnitKHR unit; + VkPerformanceCounterScopeKHR scope; + VkPerformanceCounterStorageKHR storage; + uint8_t uuid[VK_UUID_SIZE]; +} VkPerformanceCounterKHR; + +typedef struct VkPerformanceCounterDescriptionKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterDescriptionFlagsKHR flags; + char name[VK_MAX_DESCRIPTION_SIZE]; + char category[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionKHR; + +typedef struct VkQueryPoolPerformanceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + uint32_t counterIndexCount; + const uint32_t* pCounterIndices; +} VkQueryPoolPerformanceCreateInfoKHR; + +typedef union VkPerformanceCounterResultKHR { + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; +} VkPerformanceCounterResultKHR; + +typedef struct VkAcquireProfilingLockInfoKHR { + VkStructureType sType; + const void* pNext; + VkAcquireProfilingLockFlagsKHR flags; + uint64_t timeout; +} VkAcquireProfilingLockInfoKHR; + +typedef struct VkPerformanceQuerySubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t counterPassIndex; +} VkPerformanceQuerySubmitInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( + VkDevice device); +#endif + + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; + + + +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" +typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_KHR_shader_subgroup_extended_types 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" +typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; + + + +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" +typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; + + + +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" +typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +#define VK_KHR_shader_clock 1 +#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 +#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" +typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupClock; + VkBool32 shaderDeviceClock; +} VkPhysicalDeviceShaderClockFeaturesKHR; + + + +#define VK_KHR_driver_properties 1 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" +#define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE +#define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE +typedef VkDriverId VkDriverIdKHR; + +typedef VkConformanceVersion VkConformanceVersionKHR; + +typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; + + + +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" +typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; + +typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" +typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; + +typedef VkResolveModeFlags VkResolveModeFlagsKHR; + +typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; + +typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +#define VK_KHR_timeline_semaphore 1 +#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 +#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" +typedef VkSemaphoreType VkSemaphoreTypeKHR; + +typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; + +typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; + +typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; + +typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; + +typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; + +typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); +#endif + + +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" +typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +#define VK_KHR_shader_terminate_invocation 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_SPEC_VERSION 1 +#define VK_KHR_SHADER_TERMINATE_INVOCATION_EXTENSION_NAME "VK_KHR_shader_terminate_invocation" +typedef struct VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderTerminateInvocation; +} VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR; + + + +#define VK_KHR_fragment_shading_rate 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_SPEC_VERSION 1 +#define VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME "VK_KHR_fragment_shading_rate" + +typedef enum VkFragmentShadingRateCombinerOpKHR { + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR = 0, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR = 1, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MIN_KHR = 2, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_KHR = 3, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR = 4, + VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR = 0x7FFFFFFF +} VkFragmentShadingRateCombinerOpKHR; +typedef struct VkFragmentShadingRateAttachmentInfoKHR { + VkStructureType sType; + const void* pNext; + const VkAttachmentReference2* pFragmentShadingRateAttachment; + VkExtent2D shadingRateAttachmentTexelSize; +} VkFragmentShadingRateAttachmentInfoKHR; + +typedef struct VkPipelineFragmentShadingRateStateCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkExtent2D fragmentSize; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateStateCreateInfoKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineFragmentShadingRate; + VkBool32 primitiveFragmentShadingRate; + VkBool32 attachmentFragmentShadingRate; +} VkPhysicalDeviceFragmentShadingRateFeaturesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRatePropertiesKHR { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentShadingRateAttachmentTexelSize; + VkExtent2D maxFragmentShadingRateAttachmentTexelSize; + uint32_t maxFragmentShadingRateAttachmentTexelSizeAspectRatio; + VkBool32 primitiveFragmentShadingRateWithMultipleViewports; + VkBool32 layeredShadingRateAttachments; + VkBool32 fragmentShadingRateNonTrivialCombinerOps; + VkExtent2D maxFragmentSize; + uint32_t maxFragmentSizeAspectRatio; + uint32_t maxFragmentShadingRateCoverageSamples; + VkSampleCountFlagBits maxFragmentShadingRateRasterizationSamples; + VkBool32 fragmentShadingRateWithShaderDepthStencilWrites; + VkBool32 fragmentShadingRateWithSampleMask; + VkBool32 fragmentShadingRateWithShaderSampleMask; + VkBool32 fragmentShadingRateWithConservativeRasterization; + VkBool32 fragmentShadingRateWithFragmentShaderInterlock; + VkBool32 fragmentShadingRateWithCustomSampleLocations; + VkBool32 fragmentShadingRateStrictMultiplyCombiner; +} VkPhysicalDeviceFragmentShadingRatePropertiesKHR; + +typedef struct VkPhysicalDeviceFragmentShadingRateKHR { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleCounts; + VkExtent2D fragmentSize; +} VkPhysicalDeviceFragmentShadingRateKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pFragmentShadingRateCount, VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateKHR)(VkCommandBuffer commandBuffer, const VkExtent2D* pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceFragmentShadingRatesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pFragmentShadingRateCount, + VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateKHR( + VkCommandBuffer commandBuffer, + const VkExtent2D* pFragmentSize, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + +#define VK_KHR_spirv_1_4 1 +#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 +#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" + + +#define VK_KHR_surface_protected_capabilities 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" +typedef struct VkSurfaceProtectedCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + VkBool32 supportsProtected; +} VkSurfaceProtectedCapabilitiesKHR; + + + +#define VK_KHR_separate_depth_stencil_layouts 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" +typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; + +typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; + +typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; + + + +#define VK_KHR_uniform_buffer_standard_layout 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" +typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; + + + +#define VK_KHR_buffer_device_address 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" +typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; + +typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; + +typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; + +typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_deferred_host_operations 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR) +#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 4 +#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations" +typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation); +typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator); +typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation); +typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR( + VkDevice device, + const VkAllocationCallbacks* pAllocator, + VkDeferredOperationKHR* pDeferredOperation); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR( + VkDevice device, + VkDeferredOperationKHR operation, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR( + VkDevice device, + VkDeferredOperationKHR operation); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR( + VkDevice device, + VkDeferredOperationKHR operation); +#endif + + +#define VK_KHR_pipeline_executable_properties 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" + +typedef enum VkPipelineExecutableStatisticFormatKHR { + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPipelineExecutableStatisticFormatKHR; +typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineExecutableInfo; +} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +typedef struct VkPipelineInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; +} VkPipelineInfoKHR; + +typedef struct VkPipelineExecutablePropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags stages; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t subgroupSize; +} VkPipelineExecutablePropertiesKHR; + +typedef struct VkPipelineExecutableInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; + uint32_t executableIndex; +} VkPipelineExecutableInfoKHR; + +typedef union VkPipelineExecutableStatisticValueKHR { + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +} VkPipelineExecutableStatisticValueKHR; + +typedef struct VkPipelineExecutableStatisticKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkPipelineExecutableStatisticFormatKHR format; + VkPipelineExecutableStatisticValueKHR value; +} VkPipelineExecutableStatisticKHR; + +typedef struct VkPipelineExecutableInternalRepresentationKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkPipelineExecutableInternalRepresentationKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); +#endif + + +#define VK_KHR_pipeline_library 1 +#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library" +typedef struct VkPipelineLibraryCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t libraryCount; + const VkPipeline* pLibraries; +} VkPipelineLibraryCreateInfoKHR; + + + +#define VK_KHR_shader_non_semantic_info 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_SPEC_VERSION 1 +#define VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME "VK_KHR_shader_non_semantic_info" + + +#define VK_KHR_copy_commands2 1 +#define VK_KHR_COPY_COMMANDS_2_SPEC_VERSION 1 +#define VK_KHR_COPY_COMMANDS_2_EXTENSION_NAME "VK_KHR_copy_commands2" +typedef struct VkBufferCopy2KHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy2KHR; + +typedef struct VkCopyBufferInfo2KHR { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferCopy2KHR* pRegions; +} VkCopyBufferInfo2KHR; + +typedef struct VkImageCopy2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy2KHR; + +typedef struct VkCopyImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageCopy2KHR* pRegions; +} VkCopyImageInfo2KHR; + +typedef struct VkBufferImageCopy2KHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy2KHR; + +typedef struct VkCopyBufferToImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkBuffer srcBuffer; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkBufferImageCopy2KHR* pRegions; +} VkCopyBufferToImageInfo2KHR; + +typedef struct VkCopyImageToBufferInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkBuffer dstBuffer; + uint32_t regionCount; + const VkBufferImageCopy2KHR* pRegions; +} VkCopyImageToBufferInfo2KHR; + +typedef struct VkImageBlit2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit2KHR; + +typedef struct VkBlitImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageBlit2KHR* pRegions; + VkFilter filter; +} VkBlitImageInfo2KHR; + +typedef struct VkImageResolve2KHR { + VkStructureType sType; + const void* pNext; + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve2KHR; + +typedef struct VkResolveImageInfo2KHR { + VkStructureType sType; + const void* pNext; + VkImage srcImage; + VkImageLayout srcImageLayout; + VkImage dstImage; + VkImageLayout dstImageLayout; + uint32_t regionCount; + const VkImageResolve2KHR* pRegions; +} VkResolveImageInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferInfo2KHR* pCopyBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR* pCopyImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage2KHR)(VkCommandBuffer commandBuffer, const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer2KHR)(VkCommandBuffer commandBuffer, const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage2KHR)(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR* pBlitImageInfo); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage2KHR)(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferInfo2KHR* pCopyBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageInfo2KHR* pCopyImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage2KHR( + VkCommandBuffer commandBuffer, + const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer2KHR( + VkCommandBuffer commandBuffer, + const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage2KHR( + VkCommandBuffer commandBuffer, + const VkBlitImageInfo2KHR* pBlitImageInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage2KHR( + VkCommandBuffer commandBuffer, + const VkResolveImageInfo2KHR* pResolveImageInfo); +#endif + + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1000150000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_EXT_transform_feedback 1 +#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" +typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; +typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 transformFeedback; + VkBool32 geometryStreams; +} VkPhysicalDeviceTransformFeedbackFeaturesEXT; + +typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTransformFeedbackStreams; + uint32_t maxTransformFeedbackBuffers; + VkDeviceSize maxTransformFeedbackBufferSize; + uint32_t maxTransformFeedbackStreamDataSize; + uint32_t maxTransformFeedbackBufferDataSize; + uint32_t maxTransformFeedbackBufferDataStride; + VkBool32 transformFeedbackQueries; + VkBool32 transformFeedbackStreamsLinesTriangles; + VkBool32 transformFeedbackRasterizationStreamSelect; + VkBool32 transformFeedbackDraw; +} VkPhysicalDeviceTransformFeedbackPropertiesEXT; + +typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateStreamCreateFlagsEXT flags; + uint32_t rasterizationStream; +} VkPipelineRasterizationStateStreamCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); +typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride); +#endif + + +#define VK_NVX_image_view_handle 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 2 +#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" +typedef struct VkImageViewHandleInfoNVX { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkDescriptorType descriptorType; + VkSampler sampler; +} VkImageViewHandleInfoNVX; + +typedef struct VkImageViewAddressPropertiesNVX { + VkStructureType sType; + void* pNext; + VkDeviceAddress deviceAddress; + VkDeviceSize size; +} VkImageViewAddressPropertiesNVX; + +typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetImageViewAddressNVX)(VkDevice device, VkImageView imageView, VkImageViewAddressPropertiesNVX* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageViewAddressNVX( + VkDevice device, + VkImageView imageView, + VkImageViewAddressPropertiesNVX* pProperties); +#endif + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_NV_corner_sampled_image 1 +#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 +#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" +typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cornerSampledImage; +} VkPhysicalDeviceCornerSampledImageFeaturesNV; + + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_EXT_texture_compression_astc_hdr 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" +typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_HDR; +} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + + + +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif + + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_BIT_EXT = 0x00000001, + VK_SURFACE_COUNTER_VBLANK_EXT = VK_SURFACE_COUNTER_VBLANK_BIT_EXT, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_depth_clip_enable 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" +typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipEnable; +} VkPhysicalDeviceDepthClipEnableFeaturesEXT; + +typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; + VkBool32 depthClipEnable; +} VkPipelineRasterizationDepthClipStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 2 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 2 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + const VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + const VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + const VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" +typedef VkSamplerReductionMode VkSamplerReductionModeEXT; + +typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; + +typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" +typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef struct VkWriteDescriptorSetInlineUniformBlockEXT { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_NV_shader_sm_builtins 1 +#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 +#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" +typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderSMCount; + uint32_t shaderWarpsPerSM; +} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; + +typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderSMBuiltins; +} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; + + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_image_drm_format_modifier 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" +typedef struct VkDrmFormatModifierPropertiesEXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags drmFormatModifierTilingFeatures; +} VkDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierPropertiesListEXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesListEXT; + +typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkPhysicalDeviceImageDrmFormatModifierInfoEXT; + +typedef struct VkImageDrmFormatModifierListCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t drmFormatModifierCount; + const uint64_t* pDrmFormatModifiers; +} VkImageDrmFormatModifierListCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + const VkSubresourceLayout* pPlaneLayouts; +} VkImageDrmFormatModifierExplicitCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t drmFormatModifier; +} VkImageDrmFormatModifierPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties); +#endif + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; +typedef VkFlags VkValidationCacheCreateFlagsEXT; +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" +typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; + +typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; + +typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#define VK_NV_shading_rate_image 1 +#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 +#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" + +typedef enum VkShadingRatePaletteEntryNV { + VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, + VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, + VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, + VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, + VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, + VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF +} VkShadingRatePaletteEntryNV; + +typedef enum VkCoarseSampleOrderTypeNV { + VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, + VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, + VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, + VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, + VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoarseSampleOrderTypeNV; +typedef struct VkShadingRatePaletteNV { + uint32_t shadingRatePaletteEntryCount; + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; +} VkShadingRatePaletteNV; + +typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 shadingRateImageEnable; + uint32_t viewportCount; + const VkShadingRatePaletteNV* pShadingRatePalettes; +} VkPipelineViewportShadingRateImageStateCreateInfoNV; + +typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shadingRateImage; + VkBool32 shadingRateCoarseSampleOrder; +} VkPhysicalDeviceShadingRateImageFeaturesNV; + +typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { + VkStructureType sType; + void* pNext; + VkExtent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; +} VkPhysicalDeviceShadingRateImagePropertiesNV; + +typedef struct VkCoarseSampleLocationNV { + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; +} VkCoarseSampleLocationNV; + +typedef struct VkCoarseSampleOrderCustomNV { + VkShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const VkCoarseSampleLocationNV* pSampleLocations; +} VkCoarseSampleOrderCustomNV; + +typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkCoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; +} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); +#endif + + +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) +#define VK_NV_RAY_TRACING_SPEC_VERSION 3 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_KHR (~0U) +#define VK_SHADER_UNUSED_NV VK_SHADER_UNUSED_KHR + +typedef enum VkRayTracingShaderGroupTypeKHR { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR, + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeKHR; +typedef VkRayTracingShaderGroupTypeKHR VkRayTracingShaderGroupTypeNV; + + +typedef enum VkGeometryTypeKHR { + VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, + VK_GEOMETRY_TYPE_AABBS_KHR = 1, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, + VK_GEOMETRY_TYPE_TRIANGLES_NV = VK_GEOMETRY_TYPE_TRIANGLES_KHR, + VK_GEOMETRY_TYPE_AABBS_NV = VK_GEOMETRY_TYPE_AABBS_KHR, + VK_GEOMETRY_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryTypeKHR; +typedef VkGeometryTypeKHR VkGeometryTypeNV; + + +typedef enum VkAccelerationStructureTypeKHR { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 2, + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureTypeKHR; +typedef VkAccelerationStructureTypeKHR VkAccelerationStructureTypeNV; + + +typedef enum VkCopyAccelerationStructureModeKHR { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, + VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR, + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCopyAccelerationStructureModeKHR; +typedef VkCopyAccelerationStructureModeKHR VkCopyAccelerationStructureModeNV; + + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + +typedef enum VkGeometryFlagBitsKHR { + VK_GEOMETRY_OPAQUE_BIT_KHR = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x00000002, + VK_GEOMETRY_OPAQUE_BIT_NV = VK_GEOMETRY_OPAQUE_BIT_KHR, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryFlagBitsKHR; +typedef VkFlags VkGeometryFlagsKHR; +typedef VkGeometryFlagsKHR VkGeometryFlagsNV; + +typedef VkGeometryFlagBitsKHR VkGeometryFlagBitsNV; + + +typedef enum VkGeometryInstanceFlagBitsKHR { + VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x00000008, + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsKHR; +typedef VkFlags VkGeometryInstanceFlagsKHR; +typedef VkGeometryInstanceFlagsKHR VkGeometryInstanceFlagsNV; + +typedef VkGeometryInstanceFlagBitsKHR VkGeometryInstanceFlagBitsNV; + + +typedef enum VkBuildAccelerationStructureFlagBitsKHR { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsKHR; +typedef VkFlags VkBuildAccelerationStructureFlagsKHR; +typedef VkBuildAccelerationStructureFlagsKHR VkBuildAccelerationStructureFlagsNV; + +typedef VkBuildAccelerationStructureFlagBitsKHR VkBuildAccelerationStructureFlagBitsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsKHR flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + +typedef struct VkTransformMatrixKHR { + float matrix[3][4]; +} VkTransformMatrixKHR; + +typedef VkTransformMatrixKHR VkTransformMatrixNV; + +typedef struct VkAabbPositionsKHR { + float minX; + float minY; + float minZ; + float maxX; + float maxY; + float maxZ; +} VkAabbPositionsKHR; + +typedef VkAabbPositionsKHR VkAabbPositionsNV; + +typedef struct VkAccelerationStructureInstanceKHR { + VkTransformMatrixKHR transform; + uint32_t instanceCustomIndex:24; + uint32_t mask:8; + uint32_t instanceShaderBindingTableRecordOffset:24; + VkGeometryInstanceFlagsKHR flags:8; + uint64_t accelerationStructureReference; +} VkAccelerationStructureInstanceKHR; + +typedef VkAccelerationStructureInstanceKHR VkAccelerationStructureInstanceNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeKHR mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeKHR mode); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif + + +#define VK_NV_representative_fragment_test 1 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" +typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 representativeFragmentTest; +} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 representativeFragmentTestEnable; +} VkPipelineRepresentativeFragmentTestStateCreateInfoNV; + + + +#define VK_EXT_filter_cubic 1 +#define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 +#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" +typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { + VkStructureType sType; + void* pNext; + VkImageViewType imageViewType; +} VkPhysicalDeviceImageViewImageFormatInfoEXT; + +typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterCubic; + VkBool32 filterCubicMinmax; +} VkFilterCubicImageViewImageFormatPropertiesEXT; + + + +#define VK_QCOM_render_pass_shader_resolve 1 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_SPEC_VERSION 4 +#define VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME "VK_QCOM_render_pass_shader_resolve" + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + + +#define VK_AMD_pipeline_compiler_control 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" + +typedef enum VkPipelineCompilerControlFlagBitsAMD { + VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkPipelineCompilerControlFlagBitsAMD; +typedef VkFlags VkPipelineCompilerControlFlagsAMD; +typedef struct VkPipelineCompilerControlCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkPipelineCompilerControlFlagsAMD compilerControlFlags; +} VkPipelineCompilerControlCreateInfoAMD; + + + +#define VK_EXT_calibrated_timestamps 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" + +typedef enum VkTimeDomainEXT { + VK_TIME_DOMAIN_DEVICE_EXT = 0, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, + VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF +} VkTimeDomainEXT; +typedef struct VkCalibratedTimestampInfoEXT { + VkStructureType sType; + const void* pNext; + VkTimeDomainEXT timeDomain; +} VkCalibratedTimestampInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoEXT* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif + + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +#define VK_EXT_pipeline_creation_feedback 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" + +typedef enum VkPipelineCreationFeedbackFlagBitsEXT { + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = 0x00000001, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = 0x00000002, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = 0x00000004, + VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPipelineCreationFeedbackFlagBitsEXT; +typedef VkFlags VkPipelineCreationFeedbackFlagsEXT; +typedef struct VkPipelineCreationFeedbackEXT { + VkPipelineCreationFeedbackFlagsEXT flags; + uint64_t duration; +} VkPipelineCreationFeedbackEXT; + +typedef struct VkPipelineCreationFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineCreationFeedbackEXT* pPipelineCreationFeedback; + uint32_t pipelineStageCreationFeedbackCount; + VkPipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks; +} VkPipelineCreationFeedbackCreateInfoEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#define VK_NV_compute_shader_derivatives 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + + + +#define VK_NV_mesh_shader 1 +#define VK_NV_MESH_SHADER_SPEC_VERSION 1 +#define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; +} VkPhysicalDeviceMeshShaderFeaturesNV; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; +} VkPhysicalDeviceMeshShaderPropertiesNV; + +typedef struct VkDrawMeshTasksIndirectCommandNV { + uint32_t taskCount; + uint32_t firstTask; +} VkDrawMeshTasksIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_NV_fragment_shader_barycentric 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" +typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderBarycentric; +} VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + + +#define VK_NV_shader_image_footprint 1 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" +typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 imageFootprint; +} VkPhysicalDeviceShaderImageFootprintFeaturesNV; + + + +#define VK_NV_scissor_exclusive 1 +#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1 +#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" +typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t exclusiveScissorCount; + const VkRect2D* pExclusiveScissors; +} VkPipelineViewportExclusiveScissorStateCreateInfoNV; + +typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 exclusiveScissor; +} VkPhysicalDeviceExclusiveScissorFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors); +#endif + + +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + + +#define VK_INTEL_shader_integer_functions2 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" +typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerFunctions2; +} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + + + +#define VK_INTEL_performance_query 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) +#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 2 +#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" + +typedef enum VkPerformanceConfigurationTypeINTEL { + VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, + VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceConfigurationTypeINTEL; + +typedef enum VkQueryPoolSamplingModeINTEL { + VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, + VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkQueryPoolSamplingModeINTEL; + +typedef enum VkPerformanceOverrideTypeINTEL { + VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, + VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, + VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceOverrideTypeINTEL; + +typedef enum VkPerformanceParameterTypeINTEL { + VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, + VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, + VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceParameterTypeINTEL; + +typedef enum VkPerformanceValueTypeINTEL { + VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, + VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, + VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, + VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, + VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, + VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceValueTypeINTEL; +typedef union VkPerformanceValueDataINTEL { + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +} VkPerformanceValueDataINTEL; + +typedef struct VkPerformanceValueINTEL { + VkPerformanceValueTypeINTEL type; + VkPerformanceValueDataINTEL data; +} VkPerformanceValueINTEL; + +typedef struct VkInitializePerformanceApiInfoINTEL { + VkStructureType sType; + const void* pNext; + void* pUserData; +} VkInitializePerformanceApiInfoINTEL; + +typedef struct VkQueryPoolPerformanceQueryCreateInfoINTEL { + VkStructureType sType; + const void* pNext; + VkQueryPoolSamplingModeINTEL performanceCountersSampling; +} VkQueryPoolPerformanceQueryCreateInfoINTEL; + +typedef VkQueryPoolPerformanceQueryCreateInfoINTEL VkQueryPoolCreateInfoINTEL; + +typedef struct VkPerformanceMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint64_t marker; +} VkPerformanceMarkerInfoINTEL; + +typedef struct VkPerformanceStreamMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint32_t marker; +} VkPerformanceStreamMarkerInfoINTEL; + +typedef struct VkPerformanceOverrideInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceOverrideTypeINTEL type; + VkBool32 enable; + uint64_t parameter; +} VkPerformanceOverrideInfoINTEL; + +typedef struct VkPerformanceConfigurationAcquireInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceConfigurationTypeINTEL type; +} VkPerformanceConfigurationAcquireInfoINTEL; + +typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); +typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); + +VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue); +#endif + + +#define VK_EXT_pci_bus_info 1 +#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 +#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" +typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t pciDomain; + uint32_t pciBus; + uint32_t pciDevice; + uint32_t pciFunction; +} VkPhysicalDevicePCIBusInfoPropertiesEXT; + + + +#define VK_AMD_display_native_hdr 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" +typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 localDimmingSupport; +} VkDisplayNativeHdrSurfaceCapabilitiesAMD; + +typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkBool32 localDimmingEnable; +} VkSwapchainDisplayNativeHdrCreateInfoAMD; + +typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable); +#endif + + +#define VK_EXT_fragment_density_map 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" +typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMap; + VkBool32 fragmentDensityMapDynamic; + VkBool32 fragmentDensityMapNonSubsampledImages; +} VkPhysicalDeviceFragmentDensityMapFeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentDensityTexelSize; + VkExtent2D maxFragmentDensityTexelSize; + VkBool32 fragmentDensityInvocations; +} VkPhysicalDeviceFragmentDensityMapPropertiesEXT; + +typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkAttachmentReference fragmentDensityMapAttachment; +} VkRenderPassFragmentDensityMapCreateInfoEXT; + + + +#define VK_EXT_scalar_block_layout 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" +typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; + + + +#define VK_GOOGLE_hlsl_functionality1 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" + + +#define VK_GOOGLE_decorate_string 1 +#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 +#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" + + +#define VK_EXT_subgroup_size_control 1 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" +typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; +} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; + +typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; +} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; + +typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + VkStructureType sType; + void* pNext; + uint32_t requiredSubgroupSize; +} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + + + +#define VK_AMD_shader_core_properties2 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" + +typedef enum VkShaderCorePropertiesFlagBitsAMD { + VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderCorePropertiesFlagBitsAMD; +typedef VkFlags VkShaderCorePropertiesFlagsAMD; +typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { + VkStructureType sType; + void* pNext; + VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; + uint32_t activeComputeUnitCount; +} VkPhysicalDeviceShaderCoreProperties2AMD; + + + +#define VK_AMD_device_coherent_memory 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" +typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 deviceCoherentMemory; +} VkPhysicalDeviceCoherentMemoryFeaturesAMD; + + + +#define VK_EXT_shader_image_atomic_int64 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_EXT_SHADER_IMAGE_ATOMIC_INT64_EXTENSION_NAME "VK_EXT_shader_image_atomic_int64" +typedef struct VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderImageInt64Atomics; + VkBool32 sparseImageInt64Atomics; +} VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT; + + + +#define VK_EXT_memory_budget 1 +#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 +#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" +typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryBudgetPropertiesEXT; + + + +#define VK_EXT_memory_priority 1 +#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 +#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" +typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryPriority; +} VkPhysicalDeviceMemoryPriorityFeaturesEXT; + +typedef struct VkMemoryPriorityAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + float priority; +} VkMemoryPriorityAllocateInfoEXT; + + + +#define VK_NV_dedicated_allocation_image_aliasing 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" +typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 dedicatedAllocationImageAliasing; +} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + + + +#define VK_EXT_buffer_device_address 1 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" +typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; + +typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; + +typedef struct VkBufferDeviceAddressCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; +} VkBufferDeviceAddressCreateInfoEXT; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + + +#define VK_EXT_tooling_info 1 +#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 +#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" + +typedef enum VkToolPurposeFlagBitsEXT { + VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = 0x00000001, + VK_TOOL_PURPOSE_PROFILING_BIT_EXT = 0x00000002, + VK_TOOL_PURPOSE_TRACING_BIT_EXT = 0x00000004, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = 0x00000008, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = 0x00000010, + VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, + VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, + VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkToolPurposeFlagBitsEXT; +typedef VkFlags VkToolPurposeFlagsEXT; +typedef struct VkPhysicalDeviceToolPropertiesEXT { + VkStructureType sType; + void* pNext; + char name[VK_MAX_EXTENSION_NAME_SIZE]; + char version[VK_MAX_EXTENSION_NAME_SIZE]; + VkToolPurposeFlagsEXT purposes; + char description[VK_MAX_DESCRIPTION_SIZE]; + char layer[VK_MAX_EXTENSION_NAME_SIZE]; +} VkPhysicalDeviceToolPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); +#endif + + +#define VK_EXT_separate_stencil_usage 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" +typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; + + + +#define VK_EXT_validation_features 1 +#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 4 +#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" + +typedef enum VkValidationFeatureEnableEXT { + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, + VK_VALIDATION_FEATURE_ENABLE_DEBUG_PRINTF_EXT = 3, + VK_VALIDATION_FEATURE_ENABLE_SYNCHRONIZATION_VALIDATION_EXT = 4, + VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureEnableEXT; + +typedef enum VkValidationFeatureDisableEXT { + VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, + VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, + VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, + VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, + VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, + VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, + VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, + VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureDisableEXT; +typedef struct VkValidationFeaturesEXT { + VkStructureType sType; + const void* pNext; + uint32_t enabledValidationFeatureCount; + const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; + uint32_t disabledValidationFeatureCount; + const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; +} VkValidationFeaturesEXT; + + + +#define VK_NV_cooperative_matrix 1 +#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" + +typedef enum VkComponentTypeNV { + VK_COMPONENT_TYPE_FLOAT16_NV = 0, + VK_COMPONENT_TYPE_FLOAT32_NV = 1, + VK_COMPONENT_TYPE_FLOAT64_NV = 2, + VK_COMPONENT_TYPE_SINT8_NV = 3, + VK_COMPONENT_TYPE_SINT16_NV = 4, + VK_COMPONENT_TYPE_SINT32_NV = 5, + VK_COMPONENT_TYPE_SINT64_NV = 6, + VK_COMPONENT_TYPE_UINT8_NV = 7, + VK_COMPONENT_TYPE_UINT16_NV = 8, + VK_COMPONENT_TYPE_UINT32_NV = 9, + VK_COMPONENT_TYPE_UINT64_NV = 10, + VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkComponentTypeNV; + +typedef enum VkScopeNV { + VK_SCOPE_DEVICE_NV = 1, + VK_SCOPE_WORKGROUP_NV = 2, + VK_SCOPE_SUBGROUP_NV = 3, + VK_SCOPE_QUEUE_FAMILY_NV = 5, + VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkScopeNV; +typedef struct VkCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeNV AType; + VkComponentTypeNV BType; + VkComponentTypeNV CType; + VkComponentTypeNV DType; + VkScopeNV scope; +} VkCooperativeMatrixPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties); +#endif + + +#define VK_NV_coverage_reduction_mode 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" + +typedef enum VkCoverageReductionModeNV { + VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, + VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, + VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageReductionModeNV; +typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; +typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 coverageReductionMode; +} VkPhysicalDeviceCoverageReductionModeFeaturesNV; + +typedef struct VkPipelineCoverageReductionStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageReductionStateCreateFlagsNV flags; + VkCoverageReductionModeNV coverageReductionMode; +} VkPipelineCoverageReductionStateCreateInfoNV; + +typedef struct VkFramebufferMixedSamplesCombinationNV { + VkStructureType sType; + void* pNext; + VkCoverageReductionModeNV coverageReductionMode; + VkSampleCountFlagBits rasterizationSamples; + VkSampleCountFlags depthStencilSamples; + VkSampleCountFlags colorSamples; +} VkFramebufferMixedSamplesCombinationNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations); +#endif + + +#define VK_EXT_fragment_shader_interlock 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" +typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderSampleInterlock; + VkBool32 fragmentShaderPixelInterlock; + VkBool32 fragmentShaderShadingRateInterlock; +} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; + + + +#define VK_EXT_ycbcr_image_arrays 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" +typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrImageArrays; +} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; + + + +#define VK_EXT_headless_surface 1 +#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 +#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" +typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; +typedef struct VkHeadlessSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkHeadlessSurfaceCreateFlagsEXT flags; +} VkHeadlessSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( + VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_EXT_line_rasterization 1 +#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" + +typedef enum VkLineRasterizationModeEXT { + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3, + VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkLineRasterizationModeEXT; +typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeaturesEXT; + +typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationLineStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkLineRasterizationModeEXT lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif + + +#define VK_EXT_shader_atomic_float 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_SPEC_VERSION 1 +#define VK_EXT_SHADER_ATOMIC_FLOAT_EXTENSION_NAME "VK_EXT_shader_atomic_float" +typedef struct VkPhysicalDeviceShaderAtomicFloatFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferFloat32Atomics; + VkBool32 shaderBufferFloat32AtomicAdd; + VkBool32 shaderBufferFloat64Atomics; + VkBool32 shaderBufferFloat64AtomicAdd; + VkBool32 shaderSharedFloat32Atomics; + VkBool32 shaderSharedFloat32AtomicAdd; + VkBool32 shaderSharedFloat64Atomics; + VkBool32 shaderSharedFloat64AtomicAdd; + VkBool32 shaderImageFloat32Atomics; + VkBool32 shaderImageFloat32AtomicAdd; + VkBool32 sparseImageFloat32Atomics; + VkBool32 sparseImageFloat32AtomicAdd; +} VkPhysicalDeviceShaderAtomicFloatFeaturesEXT; + + + +#define VK_EXT_host_query_reset 1 +#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 +#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" +typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); +#endif + + +#define VK_EXT_index_type_uint8 1 +#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" +typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8FeaturesEXT; + + + +#define VK_EXT_extended_dynamic_state 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_SPEC_VERSION 1 +#define VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME "VK_EXT_extended_dynamic_state" +typedef struct VkPhysicalDeviceExtendedDynamicStateFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 extendedDynamicState; +} VkPhysicalDeviceExtendedDynamicStateFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCullModeEXT)(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode); +typedef void (VKAPI_PTR *PFN_vkCmdSetFrontFaceEXT)(VkCommandBuffer commandBuffer, VkFrontFace frontFace); +typedef void (VKAPI_PTR *PFN_vkCmdSetPrimitiveTopologyEXT)(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissorWithCountEXT)(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers2EXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes, const VkDeviceSize* pStrides); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthWriteEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthCompareOpEXT)(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBoundsTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilTestEnableEXT)(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilOpEXT)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCullModeEXT( + VkCommandBuffer commandBuffer, + VkCullModeFlags cullMode); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetFrontFaceEXT( + VkCommandBuffer commandBuffer, + VkFrontFace frontFace); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetPrimitiveTopologyEXT( + VkCommandBuffer commandBuffer, + VkPrimitiveTopology primitiveTopology); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissorWithCountEXT( + VkCommandBuffer commandBuffer, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers2EXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes, + const VkDeviceSize* pStrides); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthWriteEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthWriteEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthCompareOpEXT( + VkCommandBuffer commandBuffer, + VkCompareOp depthCompareOp); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBoundsTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 depthBoundsTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilTestEnableEXT( + VkCommandBuffer commandBuffer, + VkBool32 stencilTestEnable); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilOpEXT( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + VkStencilOp failOp, + VkStencilOp passOp, + VkStencilOp depthFailOp, + VkCompareOp compareOp); +#endif + + +#define VK_EXT_shader_demote_to_helper_invocation 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" +typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderDemoteToHelperInvocation; +} VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + + + +#define VK_NV_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNV) +#define VK_NV_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NV_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NV_device_generated_commands" + +typedef enum VkIndirectCommandsTokenTypeNV { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_SHADER_GROUP_NV = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_STATE_FLAGS_NV = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NV = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NV = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NV = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NV = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NV = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_TASKS_NV = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNV; + +typedef enum VkIndirectStateFlagBitsNV { + VK_INDIRECT_STATE_FLAG_FRONTFACE_BIT_NV = 0x00000001, + VK_INDIRECT_STATE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectStateFlagBitsNV; +typedef VkFlags VkIndirectStateFlagsNV; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNV { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EXPLICIT_PREPROCESS_BIT_NV = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NV = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NV = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNV; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNV; +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxGraphicsShaderGroupCount; + uint32_t maxIndirectSequenceCount; + uint32_t maxIndirectCommandsTokenCount; + uint32_t maxIndirectCommandsStreamCount; + uint32_t maxIndirectCommandsTokenOffset; + uint32_t maxIndirectCommandsStreamStride; + uint32_t minSequencesCountBufferOffsetAlignment; + uint32_t minSequencesIndexBufferOffsetAlignment; + uint32_t minIndirectCommandsBufferOffsetAlignment; +} VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV; + +typedef struct VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 deviceGeneratedCommands; +} VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV; + +typedef struct VkGraphicsShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; +} VkGraphicsShaderGroupCreateInfoNV; + +typedef struct VkGraphicsPipelineShaderGroupsCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t groupCount; + const VkGraphicsShaderGroupCreateInfoNV* pGroups; + uint32_t pipelineCount; + const VkPipeline* pPipelines; +} VkGraphicsPipelineShaderGroupsCreateInfoNV; + +typedef struct VkBindShaderGroupIndirectCommandNV { + uint32_t groupIndex; +} VkBindShaderGroupIndirectCommandNV; + +typedef struct VkBindIndexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + VkIndexType indexType; +} VkBindIndexBufferIndirectCommandNV; + +typedef struct VkBindVertexBufferIndirectCommandNV { + VkDeviceAddress bufferAddress; + uint32_t size; + uint32_t stride; +} VkBindVertexBufferIndirectCommandNV; + +typedef struct VkSetStateFlagsIndirectCommandNV { + uint32_t data; +} VkSetStateFlagsIndirectCommandNV; + +typedef struct VkIndirectCommandsStreamNV { + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsStreamNV; + +typedef struct VkIndirectCommandsLayoutTokenNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsTokenTypeNV tokenType; + uint32_t stream; + uint32_t offset; + uint32_t vertexBindingUnit; + VkBool32 vertexDynamicStride; + VkPipelineLayout pushconstantPipelineLayout; + VkShaderStageFlags pushconstantShaderStageFlags; + uint32_t pushconstantOffset; + uint32_t pushconstantSize; + VkIndirectStateFlagsNV indirectStateFlags; + uint32_t indexTypeCount; + const VkIndexType* pIndexTypes; + const uint32_t* pIndexTypeValues; +} VkIndirectCommandsLayoutTokenNV; + +typedef struct VkIndirectCommandsLayoutCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkIndirectCommandsLayoutUsageFlagsNV flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNV* pTokens; + uint32_t streamCount; + const uint32_t* pStreamStrides; +} VkIndirectCommandsLayoutCreateInfoNV; + +typedef struct VkGeneratedCommandsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t streamCount; + const VkIndirectCommandsStreamNV* pStreams; + uint32_t sequencesCount; + VkBuffer preprocessBuffer; + VkDeviceSize preprocessOffset; + VkDeviceSize preprocessSize; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkGeneratedCommandsInfoNV; + +typedef struct VkGeneratedCommandsMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkPipeline pipeline; + VkIndirectCommandsLayoutNV indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkGeneratedCommandsMemoryRequirementsInfoNV; + +typedef void (VKAPI_PTR *PFN_vkGetGeneratedCommandsMemoryRequirementsNV)(VkDevice device, const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkCmdPreprocessGeneratedCommandsNV)(VkCommandBuffer commandBuffer, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteGeneratedCommandsNV)(VkCommandBuffer commandBuffer, VkBool32 isPreprocessed, const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipelineShaderGroupNV)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline, uint32_t groupIndex); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNV)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNV)(VkDevice device, VkIndirectCommandsLayoutNV indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetGeneratedCommandsMemoryRequirementsNV( + VkDevice device, + const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkCmdPreprocessGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteGeneratedCommandsNV( + VkCommandBuffer commandBuffer, + VkBool32 isPreprocessed, + const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipelineShaderGroupNV( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline, + uint32_t groupIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNV( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNV* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNV( + VkDevice device, + VkIndirectCommandsLayoutNV indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_EXT_texel_buffer_alignment 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" +typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 texelBufferAlignment; +} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; +} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; + + + +#define VK_QCOM_render_pass_transform 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_SPEC_VERSION 1 +#define VK_QCOM_RENDER_PASS_TRANSFORM_EXTENSION_NAME "VK_QCOM_render_pass_transform" +typedef struct VkRenderPassTransformBeginInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkRenderPassTransformBeginInfoQCOM; + +typedef struct VkCommandBufferInheritanceRenderPassTransformInfoQCOM { + VkStructureType sType; + void* pNext; + VkSurfaceTransformFlagBitsKHR transform; + VkRect2D renderArea; +} VkCommandBufferInheritanceRenderPassTransformInfoQCOM; + + + +#define VK_EXT_device_memory_report 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_SPEC_VERSION 1 +#define VK_EXT_DEVICE_MEMORY_REPORT_EXTENSION_NAME "VK_EXT_device_memory_report" + +typedef enum VkDeviceMemoryReportEventTypeEXT { + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT = 0, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT = 1, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT = 2, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT = 3, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT = 4, + VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceMemoryReportEventTypeEXT; +typedef VkFlags VkDeviceMemoryReportFlagsEXT; +typedef struct VkPhysicalDeviceDeviceMemoryReportFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 deviceMemoryReport; +} VkPhysicalDeviceDeviceMemoryReportFeaturesEXT; + +typedef struct VkDeviceMemoryReportCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + VkDeviceMemoryReportEventTypeEXT type; + uint64_t memoryObjectId; + VkDeviceSize size; + VkObjectType objectType; + uint64_t objectHandle; + uint32_t heapIndex; +} VkDeviceMemoryReportCallbackDataEXT; + +typedef void (VKAPI_PTR *PFN_vkDeviceMemoryReportCallbackEXT)( + const VkDeviceMemoryReportCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDeviceDeviceMemoryReportCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceMemoryReportFlagsEXT flags; + PFN_vkDeviceMemoryReportCallbackEXT pfnUserCallback; + void* pUserData; +} VkDeviceDeviceMemoryReportCreateInfoEXT; + + + +#define VK_EXT_robustness2 1 +#define VK_EXT_ROBUSTNESS_2_SPEC_VERSION 1 +#define VK_EXT_ROBUSTNESS_2_EXTENSION_NAME "VK_EXT_robustness2" +typedef struct VkPhysicalDeviceRobustness2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 robustBufferAccess2; + VkBool32 robustImageAccess2; + VkBool32 nullDescriptor; +} VkPhysicalDeviceRobustness2FeaturesEXT; + +typedef struct VkPhysicalDeviceRobustness2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize robustStorageBufferAccessSizeAlignment; + VkDeviceSize robustUniformBufferAccessSizeAlignment; +} VkPhysicalDeviceRobustness2PropertiesEXT; + + + +#define VK_EXT_custom_border_color 1 +#define VK_EXT_CUSTOM_BORDER_COLOR_SPEC_VERSION 12 +#define VK_EXT_CUSTOM_BORDER_COLOR_EXTENSION_NAME "VK_EXT_custom_border_color" +typedef struct VkSamplerCustomBorderColorCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkClearColorValue customBorderColor; + VkFormat format; +} VkSamplerCustomBorderColorCreateInfoEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxCustomBorderColorSamplers; +} VkPhysicalDeviceCustomBorderColorPropertiesEXT; + +typedef struct VkPhysicalDeviceCustomBorderColorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 customBorderColors; + VkBool32 customBorderColorWithoutFormat; +} VkPhysicalDeviceCustomBorderColorFeaturesEXT; + + + +#define VK_GOOGLE_user_type 1 +#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 +#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" + + +#define VK_EXT_private_data 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPrivateDataSlotEXT) +#define VK_EXT_PRIVATE_DATA_SPEC_VERSION 1 +#define VK_EXT_PRIVATE_DATA_EXTENSION_NAME "VK_EXT_private_data" + +typedef enum VkPrivateDataSlotCreateFlagBitsEXT { + VK_PRIVATE_DATA_SLOT_CREATE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPrivateDataSlotCreateFlagBitsEXT; +typedef VkFlags VkPrivateDataSlotCreateFlagsEXT; +typedef struct VkPhysicalDevicePrivateDataFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 privateData; +} VkPhysicalDevicePrivateDataFeaturesEXT; + +typedef struct VkDevicePrivateDataCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t privateDataSlotRequestCount; +} VkDevicePrivateDataCreateInfoEXT; + +typedef struct VkPrivateDataSlotCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPrivateDataSlotCreateFlagsEXT flags; +} VkPrivateDataSlotCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreatePrivateDataSlotEXT)(VkDevice device, const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPrivateDataSlotEXT* pPrivateDataSlot); +typedef void (VKAPI_PTR *PFN_vkDestroyPrivateDataSlotEXT)(VkDevice device, VkPrivateDataSlotEXT privateDataSlot, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkSetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t data); +typedef void (VKAPI_PTR *PFN_vkGetPrivateDataEXT)(VkDevice device, VkObjectType objectType, uint64_t objectHandle, VkPrivateDataSlotEXT privateDataSlot, uint64_t* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePrivateDataSlotEXT( + VkDevice device, + const VkPrivateDataSlotCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPrivateDataSlotEXT* pPrivateDataSlot); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPrivateDataSlotEXT( + VkDevice device, + VkPrivateDataSlotEXT privateDataSlot, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t data); + +VKAPI_ATTR void VKAPI_CALL vkGetPrivateDataEXT( + VkDevice device, + VkObjectType objectType, + uint64_t objectHandle, + VkPrivateDataSlotEXT privateDataSlot, + uint64_t* pData); +#endif + + +#define VK_EXT_pipeline_creation_cache_control 1 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_SPEC_VERSION 3 +#define VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME "VK_EXT_pipeline_creation_cache_control" +typedef struct VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 pipelineCreationCacheControl; +} VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT; + + + +#define VK_NV_device_diagnostics_config 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_SPEC_VERSION 1 +#define VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME "VK_NV_device_diagnostics_config" + +typedef enum VkDeviceDiagnosticsConfigFlagBitsNV { + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV = 0x00000001, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV = 0x00000002, + VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV = 0x00000004, + VK_DEVICE_DIAGNOSTICS_CONFIG_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkDeviceDiagnosticsConfigFlagBitsNV; +typedef VkFlags VkDeviceDiagnosticsConfigFlagsNV; +typedef struct VkPhysicalDeviceDiagnosticsConfigFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 diagnosticsConfig; +} VkPhysicalDeviceDiagnosticsConfigFeaturesNV; + +typedef struct VkDeviceDiagnosticsConfigCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceDiagnosticsConfigFlagsNV flags; +} VkDeviceDiagnosticsConfigCreateInfoNV; + + + +#define VK_QCOM_render_pass_store_ops 1 +#define VK_QCOM_render_pass_store_ops_SPEC_VERSION 2 +#define VK_QCOM_render_pass_store_ops_EXTENSION_NAME "VK_QCOM_render_pass_store_ops" + + +#define VK_NV_fragment_shading_rate_enums 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME "VK_NV_fragment_shading_rate_enums" + +typedef enum VkFragmentShadingRateTypeNV { + VK_FRAGMENT_SHADING_RATE_TYPE_FRAGMENT_SIZE_NV = 0, + VK_FRAGMENT_SHADING_RATE_TYPE_ENUMS_NV = 1, + VK_FRAGMENT_SHADING_RATE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateTypeNV; + +typedef enum VkFragmentShadingRateNV { + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV = 0, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV = 1, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV = 4, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV = 5, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV = 6, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_FRAGMENT_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV = 10, + VK_FRAGMENT_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV = 11, + VK_FRAGMENT_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV = 12, + VK_FRAGMENT_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV = 13, + VK_FRAGMENT_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV = 14, + VK_FRAGMENT_SHADING_RATE_NO_INVOCATIONS_NV = 15, + VK_FRAGMENT_SHADING_RATE_MAX_ENUM_NV = 0x7FFFFFFF +} VkFragmentShadingRateNV; +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShadingRateEnums; + VkBool32 supersampleFragmentShadingRates; + VkBool32 noInvocationFragmentShadingRates; +} VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV; + +typedef struct VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV { + VkStructureType sType; + void* pNext; + VkSampleCountFlagBits maxFragmentShadingRateInvocationCount; +} VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV; + +typedef struct VkPipelineFragmentShadingRateEnumStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkFragmentShadingRateTypeNV shadingRateType; + VkFragmentShadingRateNV shadingRate; + VkFragmentShadingRateCombinerOpKHR combinerOps[2]; +} VkPipelineFragmentShadingRateEnumStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetFragmentShadingRateEnumNV)(VkCommandBuffer commandBuffer, VkFragmentShadingRateNV shadingRate, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetFragmentShadingRateEnumNV( + VkCommandBuffer commandBuffer, + VkFragmentShadingRateNV shadingRate, + const VkFragmentShadingRateCombinerOpKHR combinerOps[2]); +#endif + + +#define VK_EXT_fragment_density_map2 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME "VK_EXT_fragment_density_map2" +typedef struct VkPhysicalDeviceFragmentDensityMap2FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMapDeferred; +} VkPhysicalDeviceFragmentDensityMap2FeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMap2PropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subsampledLoads; + VkBool32 subsampledCoarseReconstructionEarlyAccess; + uint32_t maxSubsampledArrayLayers; + uint32_t maxDescriptorSetSubsampledSamplers; +} VkPhysicalDeviceFragmentDensityMap2PropertiesEXT; + + + +#define VK_QCOM_rotated_copy_commands 1 +#define VK_QCOM_rotated_copy_commands_SPEC_VERSION 0 +#define VK_QCOM_rotated_copy_commands_EXTENSION_NAME "VK_QCOM_rotated_copy_commands" +typedef struct VkCopyCommandTransformInfoQCOM { + VkStructureType sType; + const void* pNext; + VkSurfaceTransformFlagBitsKHR transform; +} VkCopyCommandTransformInfoQCOM; + + + +#define VK_EXT_image_robustness 1 +#define VK_EXT_IMAGE_ROBUSTNESS_SPEC_VERSION 1 +#define VK_EXT_IMAGE_ROBUSTNESS_EXTENSION_NAME "VK_EXT_image_robustness" +typedef struct VkPhysicalDeviceImageRobustnessFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 robustImageAccess; +} VkPhysicalDeviceImageRobustnessFeaturesEXT; + + + +#define VK_EXT_4444_formats 1 +#define VK_EXT_4444_FORMATS_SPEC_VERSION 1 +#define VK_EXT_4444_FORMATS_EXTENSION_NAME "VK_EXT_4444_formats" +typedef struct VkPhysicalDevice4444FormatsFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 formatA4R4G4B4; + VkBool32 formatA4B4G4R4; +} VkPhysicalDevice4444FormatsFeaturesEXT; + + + +#define VK_KHR_acceleration_structure 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureKHR) +#define VK_KHR_ACCELERATION_STRUCTURE_SPEC_VERSION 11 +#define VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME "VK_KHR_acceleration_structure" + +typedef enum VkBuildAccelerationStructureModeKHR { + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkBuildAccelerationStructureModeKHR; + +typedef enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureBuildTypeKHR; + +typedef enum VkAccelerationStructureCompatibilityKHR { + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCompatibilityKHR; + +typedef enum VkAccelerationStructureCreateFlagBitsKHR { + VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x00000001, + VK_ACCELERATION_STRUCTURE_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAccelerationStructureCreateFlagBitsKHR; +typedef VkFlags VkAccelerationStructureCreateFlagsKHR; +typedef union VkDeviceOrHostAddressKHR { + VkDeviceAddress deviceAddress; + void* hostAddress; +} VkDeviceOrHostAddressKHR; + +typedef union VkDeviceOrHostAddressConstKHR { + VkDeviceAddress deviceAddress; + const void* hostAddress; +} VkDeviceOrHostAddressConstKHR; + +typedef struct VkAccelerationStructureBuildRangeInfoKHR { + uint32_t primitiveCount; + uint32_t primitiveOffset; + uint32_t firstVertex; + uint32_t transformOffset; +} VkAccelerationStructureBuildRangeInfoKHR; + +typedef struct VkAccelerationStructureGeometryTrianglesDataKHR { + VkStructureType sType; + const void* pNext; + VkFormat vertexFormat; + VkDeviceOrHostAddressConstKHR vertexData; + VkDeviceSize vertexStride; + uint32_t maxVertex; + VkIndexType indexType; + VkDeviceOrHostAddressConstKHR indexData; + VkDeviceOrHostAddressConstKHR transformData; +} VkAccelerationStructureGeometryTrianglesDataKHR; + +typedef struct VkAccelerationStructureGeometryAabbsDataKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR data; + VkDeviceSize stride; +} VkAccelerationStructureGeometryAabbsDataKHR; + +typedef struct VkAccelerationStructureGeometryInstancesDataKHR { + VkStructureType sType; + const void* pNext; + VkBool32 arrayOfPointers; + VkDeviceOrHostAddressConstKHR data; +} VkAccelerationStructureGeometryInstancesDataKHR; + +typedef union VkAccelerationStructureGeometryDataKHR { + VkAccelerationStructureGeometryTrianglesDataKHR triangles; + VkAccelerationStructureGeometryAabbsDataKHR aabbs; + VkAccelerationStructureGeometryInstancesDataKHR instances; +} VkAccelerationStructureGeometryDataKHR; + +typedef struct VkAccelerationStructureGeometryKHR { + VkStructureType sType; + const void* pNext; + VkGeometryTypeKHR geometryType; + VkAccelerationStructureGeometryDataKHR geometry; + VkGeometryFlagsKHR flags; +} VkAccelerationStructureGeometryKHR; + +typedef struct VkAccelerationStructureBuildGeometryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeKHR type; + VkBuildAccelerationStructureFlagsKHR flags; + VkBuildAccelerationStructureModeKHR mode; + VkAccelerationStructureKHR srcAccelerationStructure; + VkAccelerationStructureKHR dstAccelerationStructure; + uint32_t geometryCount; + const VkAccelerationStructureGeometryKHR* pGeometries; + const VkAccelerationStructureGeometryKHR* const* ppGeometries; + VkDeviceOrHostAddressKHR scratchData; +} VkAccelerationStructureBuildGeometryInfoKHR; + +typedef struct VkAccelerationStructureCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureCreateFlagsKHR createFlags; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; + VkAccelerationStructureTypeKHR type; + VkDeviceAddress deviceAddress; +} VkAccelerationStructureCreateInfoKHR; + +typedef struct VkWriteDescriptorSetAccelerationStructureKHR { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureKHR* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureKHR; + +typedef struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 accelerationStructure; + VkBool32 accelerationStructureCaptureReplay; + VkBool32 accelerationStructureIndirectBuild; + VkBool32 accelerationStructureHostCommands; + VkBool32 descriptorBindingAccelerationStructureUpdateAfterBind; +} VkPhysicalDeviceAccelerationStructureFeaturesKHR; + +typedef struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { + VkStructureType sType; + void* pNext; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxPrimitiveCount; + uint32_t maxPerStageDescriptorAccelerationStructures; + uint32_t maxPerStageDescriptorUpdateAfterBindAccelerationStructures; + uint32_t maxDescriptorSetAccelerationStructures; + uint32_t maxDescriptorSetUpdateAfterBindAccelerationStructures; + uint32_t minAccelerationStructureScratchOffsetAlignment; +} VkPhysicalDeviceAccelerationStructurePropertiesKHR; + +typedef struct VkAccelerationStructureDeviceAddressInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR accelerationStructure; +} VkAccelerationStructureDeviceAddressInfoKHR; + +typedef struct VkAccelerationStructureVersionInfoKHR { + VkStructureType sType; + const void* pNext; + const uint8_t* pVersionData; +} VkAccelerationStructureVersionInfoKHR; + +typedef struct VkCopyAccelerationStructureToMemoryInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkDeviceOrHostAddressKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureToMemoryInfoKHR; + +typedef struct VkCopyMemoryToAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceOrHostAddressConstKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyMemoryToAccelerationStructureInfoKHR; + +typedef struct VkCopyAccelerationStructureInfoKHR { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureKHR src; + VkAccelerationStructureKHR dst; + VkCopyAccelerationStructureModeKHR mode; +} VkCopyAccelerationStructureInfoKHR; + +typedef struct VkAccelerationStructureBuildSizesInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceSize accelerationStructureSize; + VkDeviceSize updateScratchSize; + VkDeviceSize buildScratchSize; +} VkAccelerationStructureBuildSizesInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureKHR)(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructuresIndirectKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkDeviceAddress* pIndirectDeviceAddresses, const uint32_t* pIndirectStrides, const uint32_t* const* ppMaxPrimitiveCounts); +typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructuresKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesKHR)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef void (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionInfoKHR* pVersionInfo, VkAccelerationStructureCompatibilityKHR* pCompatibility); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureBuildSizesKHR)(VkDevice device, VkAccelerationStructureBuildTypeKHR buildType, const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, const uint32_t* pMaxPrimitiveCounts, VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR( + VkDevice device, + const VkAccelerationStructureCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureKHR* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureKHR( + VkDevice device, + VkAccelerationStructureKHR accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructuresIndirectKHR( + VkCommandBuffer commandBuffer, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkDeviceAddress* pIndirectDeviceAddresses, + const uint32_t* pIndirectStrides, + const uint32_t* const* ppMaxPrimitiveCounts); + +VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructuresKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + uint32_t infoCount, + const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, + const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR( + VkDevice device, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + size_t dataSize, + void* pData, + size_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR( + VkCommandBuffer commandBuffer, + const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR( + VkCommandBuffer commandBuffer, + const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR( + VkDevice device, + const VkAccelerationStructureDeviceAddressInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesKHR( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureKHR* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR( + VkDevice device, + const VkAccelerationStructureVersionInfoKHR* pVersionInfo, + VkAccelerationStructureCompatibilityKHR* pCompatibility); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureBuildSizesKHR( + VkDevice device, + VkAccelerationStructureBuildTypeKHR buildType, + const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo, + const uint32_t* pMaxPrimitiveCounts, + VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo); +#endif + + +#define VK_KHR_ray_tracing_pipeline 1 +#define VK_KHR_RAY_TRACING_PIPELINE_SPEC_VERSION 1 +#define VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME "VK_KHR_ray_tracing_pipeline" + +typedef enum VkShaderGroupShaderKHR { + VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, + VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, + VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, + VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, + VK_SHADER_GROUP_SHADER_MAX_ENUM_KHR = 0x7FFFFFFF +} VkShaderGroupShaderKHR; +typedef struct VkRayTracingShaderGroupCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeKHR type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; + const void* pShaderGroupCaptureReplayHandle; +} VkRayTracingShaderGroupCreateInfoKHR; + +typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t maxPipelineRayPayloadSize; + uint32_t maxPipelineRayHitAttributeSize; +} VkRayTracingPipelineInterfaceCreateInfoKHR; + +typedef struct VkRayTracingPipelineCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoKHR* pGroups; + uint32_t maxPipelineRayRecursionDepth; + const VkPipelineLibraryCreateInfoKHR* pLibraryInfo; + const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelineFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayTracingPipeline; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplay; + VkBool32 rayTracingPipelineShaderGroupHandleCaptureReplayMixed; + VkBool32 rayTracingPipelineTraceRaysIndirect; + VkBool32 rayTraversalPrimitiveCulling; +} VkPhysicalDeviceRayTracingPipelineFeaturesKHR; + +typedef struct VkPhysicalDeviceRayTracingPipelinePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRayRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint32_t shaderGroupHandleCaptureReplaySize; + uint32_t maxRayDispatchInvocationCount; + uint32_t shaderGroupHandleAlignment; + uint32_t maxRayHitAttributeSize; +} VkPhysicalDeviceRayTracingPipelinePropertiesKHR; + +typedef struct VkStridedDeviceAddressRegionKHR { + VkDeviceAddress deviceAddress; + VkDeviceSize stride; + VkDeviceSize size; +} VkStridedDeviceAddressRegionKHR; + +typedef struct VkTraceRaysIndirectCommandKHR { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkTraceRaysIndirectCommandKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, VkDeviceAddress indirectDeviceAddress); +typedef VkDeviceSize (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupStackSizeKHR)(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader); +typedef void (VKAPI_PTR *PFN_vkCmdSetRayTracingPipelineStackSizeKHR)(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR( + VkDevice device, + VkDeferredOperationKHR deferredOperation, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR( + VkCommandBuffer commandBuffer, + const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, + const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, + VkDeviceAddress indirectDeviceAddress); + +VKAPI_ATTR VkDeviceSize VKAPI_CALL vkGetRayTracingShaderGroupStackSizeKHR( + VkDevice device, + VkPipeline pipeline, + uint32_t group, + VkShaderGroupShaderKHR groupShader); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetRayTracingPipelineStackSizeKHR( + VkCommandBuffer commandBuffer, + uint32_t pipelineStackSize); +#endif + + +#define VK_KHR_ray_query 1 +#define VK_KHR_RAY_QUERY_SPEC_VERSION 1 +#define VK_KHR_RAY_QUERY_EXTENSION_NAME "VK_KHR_ray_query" +typedef struct VkPhysicalDeviceRayQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 rayQuery; +} VkPhysicalDeviceRayQueryFeaturesKHR; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_directfb.h b/src/third_party/vulkan/vulkan_directfb.h new file mode 100644 index 0000000..f75bd3a --- /dev/null +++ b/src/third_party/vulkan/vulkan_directfb.h @@ -0,0 +1,54 @@ +#ifndef VULKAN_DIRECTFB_H_ +#define VULKAN_DIRECTFB_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_directfb_surface 1 +#define VK_EXT_DIRECTFB_SURFACE_SPEC_VERSION 1 +#define VK_EXT_DIRECTFB_SURFACE_EXTENSION_NAME "VK_EXT_directfb_surface" +typedef VkFlags VkDirectFBSurfaceCreateFlagsEXT; +typedef struct VkDirectFBSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDirectFBSurfaceCreateFlagsEXT flags; + IDirectFB* dfb; + IDirectFBSurface* surface; +} VkDirectFBSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDirectFBSurfaceEXT)(VkInstance instance, const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, IDirectFB* dfb); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDirectFBSurfaceEXT( + VkInstance instance, + const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceDirectFBPresentationSupportEXT( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + IDirectFB* dfb); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_fuchsia.h b/src/third_party/vulkan/vulkan_fuchsia.h new file mode 100644 index 0000000..03e27cb --- /dev/null +++ b/src/third_party/vulkan/vulkan_fuchsia.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_FUCHSIA_H_ +#define VULKAN_FUCHSIA_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_FUCHSIA_imagepipe_surface 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME "VK_FUCHSIA_imagepipe_surface" +typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA; +typedef struct VkImagePipeSurfaceCreateInfoFUCHSIA { + VkStructureType sType; + const void* pNext; + VkImagePipeSurfaceCreateFlagsFUCHSIA flags; + zx_handle_t imagePipeHandle; +} VkImagePipeSurfaceCreateInfoFUCHSIA; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateImagePipeSurfaceFUCHSIA)(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA( + VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_ggp.h b/src/third_party/vulkan/vulkan_ggp.h new file mode 100644 index 0000000..273c880 --- /dev/null +++ b/src/third_party/vulkan/vulkan_ggp.h @@ -0,0 +1,58 @@ +#ifndef VULKAN_GGP_H_ +#define VULKAN_GGP_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_GGP_stream_descriptor_surface 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME "VK_GGP_stream_descriptor_surface" +typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP; +typedef struct VkStreamDescriptorSurfaceCreateInfoGGP { + VkStructureType sType; + const void* pNext; + VkStreamDescriptorSurfaceCreateFlagsGGP flags; + GgpStreamDescriptor streamDescriptor; +} VkStreamDescriptorSurfaceCreateInfoGGP; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateStreamDescriptorSurfaceGGP)(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateStreamDescriptorSurfaceGGP( + VkInstance instance, + const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_GGP_frame_token 1 +#define VK_GGP_FRAME_TOKEN_SPEC_VERSION 1 +#define VK_GGP_FRAME_TOKEN_EXTENSION_NAME "VK_GGP_frame_token" +typedef struct VkPresentFrameTokenGGP { + VkStructureType sType; + const void* pNext; + GgpFrameToken frameToken; +} VkPresentFrameTokenGGP; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_ios.h b/src/third_party/vulkan/vulkan_ios.h new file mode 100644 index 0000000..446a269 --- /dev/null +++ b/src/third_party/vulkan/vulkan_ios.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 3 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_macos.h b/src/third_party/vulkan/vulkan_macos.h new file mode 100644 index 0000000..35fcabe --- /dev/null +++ b/src/third_party/vulkan/vulkan_macos.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 3 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_metal.h b/src/third_party/vulkan/vulkan_metal.h new file mode 100644 index 0000000..99f097d --- /dev/null +++ b/src/third_party/vulkan/vulkan_metal.h @@ -0,0 +1,54 @@ +#ifndef VULKAN_METAL_H_ +#define VULKAN_METAL_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_metal_surface 1 + +#ifdef __OBJC__ +@class CAMetalLayer; +#else +typedef void CAMetalLayer; +#endif + +#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1 +#define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface" +typedef VkFlags VkMetalSurfaceCreateFlagsEXT; +typedef struct VkMetalSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkMetalSurfaceCreateFlagsEXT flags; + const CAMetalLayer* pLayer; +} VkMetalSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT( + VkInstance instance, + const VkMetalSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_vi.h b/src/third_party/vulkan/vulkan_vi.h new file mode 100644 index 0000000..2e62d7d --- /dev/null +++ b/src/third_party/vulkan/vulkan_vi.h @@ -0,0 +1,47 @@ +#ifndef VULKAN_VI_H_ +#define VULKAN_VI_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" +typedef VkFlags VkViSurfaceCreateFlagsNN; +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_wayland.h b/src/third_party/vulkan/vulkan_wayland.h new file mode 100644 index 0000000..f7b307e --- /dev/null +++ b/src/third_party/vulkan/vulkan_wayland.h @@ -0,0 +1,54 @@ +#ifndef VULKAN_WAYLAND_H_ +#define VULKAN_WAYLAND_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_wayland_surface 1 +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_win32.h b/src/third_party/vulkan/vulkan_win32.h new file mode 100644 index 0000000..4b561ea --- /dev/null +++ b/src/third_party/vulkan/vulkan_win32.h @@ -0,0 +1,315 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#define VK_EXT_full_screen_exclusive 1 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive" + +typedef enum VkFullScreenExclusiveEXT { + VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0, + VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1, + VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2, + VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3, + VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkFullScreenExclusiveEXT; +typedef struct VkSurfaceFullScreenExclusiveInfoEXT { + VkStructureType sType; + void* pNext; + VkFullScreenExclusiveEXT fullScreenExclusive; +} VkSurfaceFullScreenExclusiveInfoEXT; + +typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT { + VkStructureType sType; + void* pNext; + VkBool32 fullScreenExclusiveSupported; +} VkSurfaceCapabilitiesFullScreenExclusiveEXT; + +typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT { + VkStructureType sType; + const void* pNext; + HMONITOR hmonitor; +} VkSurfaceFullScreenExclusiveWin32InfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_xcb.h b/src/third_party/vulkan/vulkan_xcb.h new file mode 100644 index 0000000..c5441b2 --- /dev/null +++ b/src/third_party/vulkan/vulkan_xcb.h @@ -0,0 +1,55 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_xlib.h b/src/third_party/vulkan/vulkan_xlib.h new file mode 100644 index 0000000..c54628a --- /dev/null +++ b/src/third_party/vulkan/vulkan_xlib.h @@ -0,0 +1,55 @@ +#ifndef VULKAN_XLIB_H_ +#define VULKAN_XLIB_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xlib_surface 1 +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/third_party/vulkan/vulkan_xlib_xrandr.h b/src/third_party/vulkan/vulkan_xlib_xrandr.h new file mode 100644 index 0000000..436432f --- /dev/null +++ b/src/third_party/vulkan/vulkan_xlib_xrandr.h @@ -0,0 +1,45 @@ +#ifndef VULKAN_XLIB_XRANDR_H_ +#define VULKAN_XLIB_XRANDR_H_ 1 + +/* +** Copyright (c) 2015-2020 The Khronos Group Inc. +** +** SPDX-License-Identifier: Apache-2.0 +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_acquire_xlib_display 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif + +#ifdef __cplusplus +} +#endif + +#endif